problem_id
stringlengths 18
21
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
54
| prompt
stringlengths 1.28k
64.2k
| golden_diff
stringlengths 166
811
| verification_info
stringlengths 604
118k
|
---|---|---|---|---|---|---|
gh_patches_debug_1400 | rasdani/github-patches | git_diff | numpy__numpy-12439 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: functions marked with NPY_NO_EXPORT still are exported
Steps to reproduce (on linux):
- git checkout
- `python setup.py build_ext`
- choose a random function marked with the `NPY_NO_EXPORT` macro, for instance [`datetime_metadata_divides`](https://github.com/numpy/numpy/blob/v1.15.4/numpy/core/src/multiarray/_datetime.h#L108) and ~call
`nm build/lib*/numpy/core/_multiarray_umath*.so |grep datetime_metadata_divides`~ check that the function is not exported:
```
import ctypes, numpy as np
dll = ctypes.CDLL(np.core._multiarray_umath.__file__)
print(getattr(dll, `datetime_metadata_divides`, None)
```
Note that the function appears in the result. It should not. I think the problem is in the `visibility_define` [function](https://github.com/numpy/numpy/blob/v1.15.4/numpy/core/setup.py#L379) which only hides the functions for gcc 4.
Edit: use ctypes to check for export
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/core/setup.py`
Content:
```
1 from __future__ import division, print_function
2
3 import os
4 import sys
5 import pickle
6 import copy
7 import warnings
8 import platform
9 from os.path import join
10 from numpy.distutils import log
11 from distutils.dep_util import newer
12 from distutils.sysconfig import get_config_var
13 from numpy._build_utils.apple_accelerate import (
14 uses_accelerate_framework, get_sgemv_fix
15 )
16 from numpy.compat import npy_load_module
17 from setup_common import *
18
19 # Set to True to enable relaxed strides checking. This (mostly) means
20 # that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
21 NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
22
23 # Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
24 # bogus value for affected strides in order to help smoke out bad stride usage
25 # when relaxed stride checking is enabled.
26 NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
27 NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
28
29 # XXX: ugly, we use a class to avoid calling twice some expensive functions in
30 # config.h/numpyconfig.h. I don't see a better way because distutils force
31 # config.h generation inside an Extension class, and as such sharing
32 # configuration information between extensions is not easy.
33 # Using a pickled-based memoize does not work because config_cmd is an instance
34 # method, which cPickle does not like.
35 #
36 # Use pickle in all cases, as cPickle is gone in python3 and the difference
37 # in time is only in build. -- Charles Harris, 2013-03-30
38
39 class CallOnceOnly(object):
40 def __init__(self):
41 self._check_types = None
42 self._check_ieee_macros = None
43 self._check_complex = None
44
45 def check_types(self, *a, **kw):
46 if self._check_types is None:
47 out = check_types(*a, **kw)
48 self._check_types = pickle.dumps(out)
49 else:
50 out = copy.deepcopy(pickle.loads(self._check_types))
51 return out
52
53 def check_ieee_macros(self, *a, **kw):
54 if self._check_ieee_macros is None:
55 out = check_ieee_macros(*a, **kw)
56 self._check_ieee_macros = pickle.dumps(out)
57 else:
58 out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
59 return out
60
61 def check_complex(self, *a, **kw):
62 if self._check_complex is None:
63 out = check_complex(*a, **kw)
64 self._check_complex = pickle.dumps(out)
65 else:
66 out = copy.deepcopy(pickle.loads(self._check_complex))
67 return out
68
69 def pythonlib_dir():
70 """return path where libpython* is."""
71 if sys.platform == 'win32':
72 return os.path.join(sys.prefix, "libs")
73 else:
74 return get_config_var('LIBDIR')
75
76 def is_npy_no_signal():
77 """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
78 header."""
79 return sys.platform == 'win32'
80
81 def is_npy_no_smp():
82 """Return True if the NPY_NO_SMP symbol must be defined in public
83 header (when SMP support cannot be reliably enabled)."""
84 # Perhaps a fancier check is in order here.
85 # so that threads are only enabled if there
86 # are actually multiple CPUS? -- but
87 # threaded code can be nice even on a single
88 # CPU so that long-calculating code doesn't
89 # block.
90 return 'NPY_NOSMP' in os.environ
91
92 def win32_checks(deflist):
93 from numpy.distutils.misc_util import get_build_architecture
94 a = get_build_architecture()
95
96 # Distutils hack on AMD64 on windows
97 print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
98 (a, os.name, sys.platform))
99 if a == 'AMD64':
100 deflist.append('DISTUTILS_USE_SDK')
101
102 # On win32, force long double format string to be 'g', not
103 # 'Lg', since the MS runtime does not support long double whose
104 # size is > sizeof(double)
105 if a == "Intel" or a == "AMD64":
106 deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
107
108 def check_math_capabilities(config, moredefs, mathlibs):
109 def check_func(func_name):
110 return config.check_func(func_name, libraries=mathlibs,
111 decl=True, call=True)
112
113 def check_funcs_once(funcs_name):
114 decl = dict([(f, True) for f in funcs_name])
115 st = config.check_funcs_once(funcs_name, libraries=mathlibs,
116 decl=decl, call=decl)
117 if st:
118 moredefs.extend([(fname2def(f), 1) for f in funcs_name])
119 return st
120
121 def check_funcs(funcs_name):
122 # Use check_funcs_once first, and if it does not work, test func per
123 # func. Return success only if all the functions are available
124 if not check_funcs_once(funcs_name):
125 # Global check failed, check func per func
126 for f in funcs_name:
127 if check_func(f):
128 moredefs.append((fname2def(f), 1))
129 return 0
130 else:
131 return 1
132
133 #use_msvc = config.check_decl("_MSC_VER")
134
135 if not check_funcs_once(MANDATORY_FUNCS):
136 raise SystemError("One of the required function to build numpy is not"
137 " available (the list is %s)." % str(MANDATORY_FUNCS))
138
139 # Standard functions which may not be available and for which we have a
140 # replacement implementation. Note that some of these are C99 functions.
141
142 # XXX: hack to circumvent cpp pollution from python: python put its
143 # config.h in the public namespace, so we have a clash for the common
144 # functions we test. We remove every function tested by python's
145 # autoconf, hoping their own test are correct
146 for f in OPTIONAL_STDFUNCS_MAYBE:
147 if config.check_decl(fname2def(f),
148 headers=["Python.h", "math.h"]):
149 OPTIONAL_STDFUNCS.remove(f)
150
151 check_funcs(OPTIONAL_STDFUNCS)
152
153 for h in OPTIONAL_HEADERS:
154 if config.check_func("", decl=False, call=False, headers=[h]):
155 h = h.replace(".", "_").replace(os.path.sep, "_")
156 moredefs.append((fname2def(h), 1))
157
158 for tup in OPTIONAL_INTRINSICS:
159 headers = None
160 if len(tup) == 2:
161 f, args, m = tup[0], tup[1], fname2def(tup[0])
162 elif len(tup) == 3:
163 f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
164 else:
165 f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
166 if config.check_func(f, decl=False, call=True, call_args=args,
167 headers=headers):
168 moredefs.append((m, 1))
169
170 for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
171 if config.check_gcc_function_attribute(dec, fn):
172 moredefs.append((fname2def(fn), 1))
173
174 for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
175 if config.check_gcc_variable_attribute(fn):
176 m = fn.replace("(", "_").replace(")", "_")
177 moredefs.append((fname2def(m), 1))
178
179 # C99 functions: float and long double versions
180 check_funcs(C99_FUNCS_SINGLE)
181 check_funcs(C99_FUNCS_EXTENDED)
182
183 def check_complex(config, mathlibs):
184 priv = []
185 pub = []
186
187 try:
188 if os.uname()[0] == "Interix":
189 warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
190 return priv, pub
191 except Exception:
192 # os.uname not available on all platforms. blanket except ugly but safe
193 pass
194
195 # Check for complex support
196 st = config.check_header('complex.h')
197 if st:
198 priv.append(('HAVE_COMPLEX_H', 1))
199 pub.append(('NPY_USE_C99_COMPLEX', 1))
200
201 for t in C99_COMPLEX_TYPES:
202 st = config.check_type(t, headers=["complex.h"])
203 if st:
204 pub.append(('NPY_HAVE_%s' % type2def(t), 1))
205
206 def check_prec(prec):
207 flist = [f + prec for f in C99_COMPLEX_FUNCS]
208 decl = dict([(f, True) for f in flist])
209 if not config.check_funcs_once(flist, call=decl, decl=decl,
210 libraries=mathlibs):
211 for f in flist:
212 if config.check_func(f, call=True, decl=True,
213 libraries=mathlibs):
214 priv.append((fname2def(f), 1))
215 else:
216 priv.extend([(fname2def(f), 1) for f in flist])
217
218 check_prec('')
219 check_prec('f')
220 check_prec('l')
221
222 return priv, pub
223
224 def check_ieee_macros(config):
225 priv = []
226 pub = []
227
228 macros = []
229
230 def _add_decl(f):
231 priv.append(fname2def("decl_%s" % f))
232 pub.append('NPY_%s' % fname2def("decl_%s" % f))
233
234 # XXX: hack to circumvent cpp pollution from python: python put its
235 # config.h in the public namespace, so we have a clash for the common
236 # functions we test. We remove every function tested by python's
237 # autoconf, hoping their own test are correct
238 _macros = ["isnan", "isinf", "signbit", "isfinite"]
239 for f in _macros:
240 py_symbol = fname2def("decl_%s" % f)
241 already_declared = config.check_decl(py_symbol,
242 headers=["Python.h", "math.h"])
243 if already_declared:
244 if config.check_macro_true(py_symbol,
245 headers=["Python.h", "math.h"]):
246 pub.append('NPY_%s' % fname2def("decl_%s" % f))
247 else:
248 macros.append(f)
249 # Normally, isnan and isinf are macro (C99), but some platforms only have
250 # func, or both func and macro version. Check for macro only, and define
251 # replacement ones if not found.
252 # Note: including Python.h is necessary because it modifies some math.h
253 # definitions
254 for f in macros:
255 st = config.check_decl(f, headers=["Python.h", "math.h"])
256 if st:
257 _add_decl(f)
258
259 return priv, pub
260
261 def check_types(config_cmd, ext, build_dir):
262 private_defines = []
263 public_defines = []
264
265 # Expected size (in number of bytes) for each type. This is an
266 # optimization: those are only hints, and an exhaustive search for the size
267 # is done if the hints are wrong.
268 expected = {'short': [2], 'int': [4], 'long': [8, 4],
269 'float': [4], 'double': [8], 'long double': [16, 12, 8],
270 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
271 'off_t': [8, 4]}
272
273 # Check we have the python header (-dev* packages on Linux)
274 result = config_cmd.check_header('Python.h')
275 if not result:
276 python = 'python'
277 if '__pypy__' in sys.builtin_module_names:
278 python = 'pypy'
279 raise SystemError(
280 "Cannot compile 'Python.h'. Perhaps you need to "
281 "install {0}-dev|{0}-devel.".format(python))
282 res = config_cmd.check_header("endian.h")
283 if res:
284 private_defines.append(('HAVE_ENDIAN_H', 1))
285 public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
286 res = config_cmd.check_header("sys/endian.h")
287 if res:
288 private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
289 public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))
290
291 # Check basic types sizes
292 for type in ('short', 'int', 'long'):
293 res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
294 if res:
295 public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
296 else:
297 res = config_cmd.check_type_size(type, expected=expected[type])
298 if res >= 0:
299 public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
300 else:
301 raise SystemError("Checking sizeof (%s) failed !" % type)
302
303 for type in ('float', 'double', 'long double'):
304 already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
305 headers=["Python.h"])
306 res = config_cmd.check_type_size(type, expected=expected[type])
307 if res >= 0:
308 public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
309 if not already_declared and not type == 'long double':
310 private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
311 else:
312 raise SystemError("Checking sizeof (%s) failed !" % type)
313
314 # Compute size of corresponding complex type: used to check that our
315 # definition is binary compatible with C99 complex type (check done at
316 # build time in npy_common.h)
317 complex_def = "struct {%s __x; %s __y;}" % (type, type)
318 res = config_cmd.check_type_size(complex_def,
319 expected=[2 * x for x in expected[type]])
320 if res >= 0:
321 public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
322 else:
323 raise SystemError("Checking sizeof (%s) failed !" % complex_def)
324
325 for type in ('Py_intptr_t', 'off_t'):
326 res = config_cmd.check_type_size(type, headers=["Python.h"],
327 library_dirs=[pythonlib_dir()],
328 expected=expected[type])
329
330 if res >= 0:
331 private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
332 public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
333 else:
334 raise SystemError("Checking sizeof (%s) failed !" % type)
335
336 # We check declaration AND type because that's how distutils does it.
337 if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
338 res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
339 library_dirs=[pythonlib_dir()],
340 expected=expected['PY_LONG_LONG'])
341 if res >= 0:
342 private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
343 public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
344 else:
345 raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
346
347 res = config_cmd.check_type_size('long long',
348 expected=expected['long long'])
349 if res >= 0:
350 #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
351 public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
352 else:
353 raise SystemError("Checking sizeof (%s) failed !" % 'long long')
354
355 if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
356 raise RuntimeError(
357 "Config wo CHAR_BIT is not supported"
358 ", please contact the maintainers")
359
360 return private_defines, public_defines
361
362 def check_mathlib(config_cmd):
363 # Testing the C math library
364 mathlibs = []
365 mathlibs_choices = [[], ['m'], ['cpml']]
366 mathlib = os.environ.get('MATHLIB')
367 if mathlib:
368 mathlibs_choices.insert(0, mathlib.split(','))
369 for libs in mathlibs_choices:
370 if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
371 mathlibs = libs
372 break
373 else:
374 raise EnvironmentError("math library missing; rerun "
375 "setup.py after setting the "
376 "MATHLIB env variable")
377 return mathlibs
378
379 def visibility_define(config):
380 """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
381 string)."""
382 if config.check_compiler_gcc4():
383 return '__attribute__((visibility("hidden")))'
384 else:
385 return ''
386
387 def configuration(parent_package='',top_path=None):
388 from numpy.distutils.misc_util import Configuration, dot_join
389 from numpy.distutils.system_info import get_info
390
391 config = Configuration('core', parent_package, top_path)
392 local_dir = config.local_path
393 codegen_dir = join(local_dir, 'code_generators')
394
395 if is_released(config):
396 warnings.simplefilter('error', MismatchCAPIWarning)
397
398 # Check whether we have a mismatch between the set C API VERSION and the
399 # actual C API VERSION
400 check_api_version(C_API_VERSION, codegen_dir)
401
402 generate_umath_py = join(codegen_dir, 'generate_umath.py')
403 n = dot_join(config.name, 'generate_umath')
404 generate_umath = npy_load_module('_'.join(n.split('.')),
405 generate_umath_py, ('.py', 'U', 1))
406
407 header_dir = 'include/numpy' # this is relative to config.path_in_package
408
409 cocache = CallOnceOnly()
410
411 def generate_config_h(ext, build_dir):
412 target = join(build_dir, header_dir, 'config.h')
413 d = os.path.dirname(target)
414 if not os.path.exists(d):
415 os.makedirs(d)
416
417 if newer(__file__, target):
418 config_cmd = config.get_config_cmd()
419 log.info('Generating %s', target)
420
421 # Check sizeof
422 moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
423
424 # Check math library and C99 math funcs availability
425 mathlibs = check_mathlib(config_cmd)
426 moredefs.append(('MATHLIB', ','.join(mathlibs)))
427
428 check_math_capabilities(config_cmd, moredefs, mathlibs)
429 moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
430 moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
431
432 # Signal check
433 if is_npy_no_signal():
434 moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
435
436 # Windows checks
437 if sys.platform == 'win32' or os.name == 'nt':
438 win32_checks(moredefs)
439
440 # C99 restrict keyword
441 moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
442
443 # Inline check
444 inline = config_cmd.check_inline()
445
446 # Use relaxed stride checking
447 if NPY_RELAXED_STRIDES_CHECKING:
448 moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
449
450 # Use bogus stride debug aid when relaxed strides are enabled
451 if NPY_RELAXED_STRIDES_DEBUG:
452 moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
453
454 # Get long double representation
455 rep = check_long_double_representation(config_cmd)
456 moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
457
458 # Py3K check
459 if sys.version_info[0] == 3:
460 moredefs.append(('NPY_PY3K', 1))
461
462 # Generate the config.h file from moredefs
463 target_f = open(target, 'w')
464 for d in moredefs:
465 if isinstance(d, str):
466 target_f.write('#define %s\n' % (d))
467 else:
468 target_f.write('#define %s %s\n' % (d[0], d[1]))
469
470 # define inline to our keyword, or nothing
471 target_f.write('#ifndef __cplusplus\n')
472 if inline == 'inline':
473 target_f.write('/* #undef inline */\n')
474 else:
475 target_f.write('#define inline %s\n' % inline)
476 target_f.write('#endif\n')
477
478 # add the guard to make sure config.h is never included directly,
479 # but always through npy_config.h
480 target_f.write("""
481 #ifndef _NPY_NPY_CONFIG_H_
482 #error config.h should never be included directly, include npy_config.h instead
483 #endif
484 """)
485
486 target_f.close()
487 print('File:', target)
488 target_f = open(target)
489 print(target_f.read())
490 target_f.close()
491 print('EOF')
492 else:
493 mathlibs = []
494 target_f = open(target)
495 for line in target_f:
496 s = '#define MATHLIB'
497 if line.startswith(s):
498 value = line[len(s):].strip()
499 if value:
500 mathlibs.extend(value.split(','))
501 target_f.close()
502
503 # Ugly: this can be called within a library and not an extension,
504 # in which case there is no libraries attributes (and none is
505 # needed).
506 if hasattr(ext, 'libraries'):
507 ext.libraries.extend(mathlibs)
508
509 incl_dir = os.path.dirname(target)
510 if incl_dir not in config.numpy_include_dirs:
511 config.numpy_include_dirs.append(incl_dir)
512
513 return target
514
515 def generate_numpyconfig_h(ext, build_dir):
516 """Depends on config.h: generate_config_h has to be called before !"""
517 # put common include directory in build_dir on search path
518 # allows using code generation in headers headers
519 config.add_include_dirs(join(build_dir, "src", "common"))
520 config.add_include_dirs(join(build_dir, "src", "npymath"))
521
522 target = join(build_dir, header_dir, '_numpyconfig.h')
523 d = os.path.dirname(target)
524 if not os.path.exists(d):
525 os.makedirs(d)
526 if newer(__file__, target):
527 config_cmd = config.get_config_cmd()
528 log.info('Generating %s', target)
529
530 # Check sizeof
531 ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
532
533 if is_npy_no_signal():
534 moredefs.append(('NPY_NO_SIGNAL', 1))
535
536 if is_npy_no_smp():
537 moredefs.append(('NPY_NO_SMP', 1))
538 else:
539 moredefs.append(('NPY_NO_SMP', 0))
540
541 mathlibs = check_mathlib(config_cmd)
542 moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
543 moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
544
545 if NPY_RELAXED_STRIDES_CHECKING:
546 moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
547
548 if NPY_RELAXED_STRIDES_DEBUG:
549 moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
550
551 # Check whether we can use inttypes (C99) formats
552 if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
553 moredefs.append(('NPY_USE_C99_FORMATS', 1))
554
555 # visibility check
556 hidden_visibility = visibility_define(config_cmd)
557 moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
558
559 # Add the C API/ABI versions
560 moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
561 moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
562
563 # Add moredefs to header
564 target_f = open(target, 'w')
565 for d in moredefs:
566 if isinstance(d, str):
567 target_f.write('#define %s\n' % (d))
568 else:
569 target_f.write('#define %s %s\n' % (d[0], d[1]))
570
571 # Define __STDC_FORMAT_MACROS
572 target_f.write("""
573 #ifndef __STDC_FORMAT_MACROS
574 #define __STDC_FORMAT_MACROS 1
575 #endif
576 """)
577 target_f.close()
578
579 # Dump the numpyconfig.h header to stdout
580 print('File: %s' % target)
581 target_f = open(target)
582 print(target_f.read())
583 target_f.close()
584 print('EOF')
585 config.add_data_files((header_dir, target))
586 return target
587
588 def generate_api_func(module_name):
589 def generate_api(ext, build_dir):
590 script = join(codegen_dir, module_name + '.py')
591 sys.path.insert(0, codegen_dir)
592 try:
593 m = __import__(module_name)
594 log.info('executing %s', script)
595 h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
596 finally:
597 del sys.path[0]
598 config.add_data_files((header_dir, h_file),
599 (header_dir, doc_file))
600 return (h_file,)
601 return generate_api
602
603 generate_numpy_api = generate_api_func('generate_numpy_api')
604 generate_ufunc_api = generate_api_func('generate_ufunc_api')
605
606 config.add_include_dirs(join(local_dir, "src", "common"))
607 config.add_include_dirs(join(local_dir, "src"))
608 config.add_include_dirs(join(local_dir))
609
610 config.add_data_files('include/numpy/*.h')
611 config.add_include_dirs(join('src', 'npymath'))
612 config.add_include_dirs(join('src', 'multiarray'))
613 config.add_include_dirs(join('src', 'umath'))
614 config.add_include_dirs(join('src', 'npysort'))
615
616 config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process
617 config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
618 if sys.platform[:3] == "aix":
619 config.add_define_macros([("_LARGE_FILES", None)])
620 else:
621 config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
622 config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
623 config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
624
625 config.numpy_include_dirs.extend(config.paths('include'))
626
627 deps = [join('src', 'npymath', '_signbit.c'),
628 join('include', 'numpy', '*object.h'),
629 join(codegen_dir, 'genapi.py'),
630 ]
631
632 #######################################################################
633 # dummy module #
634 #######################################################################
635
636 # npymath needs the config.h and numpyconfig.h files to be generated, but
637 # build_clib cannot handle generate_config_h and generate_numpyconfig_h
638 # (don't ask). Because clib are generated before extensions, we have to
639 # explicitly add an extension which has generate_config_h and
640 # generate_numpyconfig_h as sources *before* adding npymath.
641
642 config.add_extension('_dummy',
643 sources=[join('src', 'dummymodule.c'),
644 generate_config_h,
645 generate_numpyconfig_h,
646 generate_numpy_api]
647 )
648
649 #######################################################################
650 # npymath library #
651 #######################################################################
652
653 subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
654
655 def get_mathlib_info(*args):
656 # Another ugly hack: the mathlib info is known once build_src is run,
657 # but we cannot use add_installed_pkg_config here either, so we only
658 # update the substitution dictionary during npymath build
659 config_cmd = config.get_config_cmd()
660
661 # Check that the toolchain works, to fail early if it doesn't
662 # (avoid late errors with MATHLIB which are confusing if the
663 # compiler does not work).
664 st = config_cmd.try_link('int main(void) { return 0;}')
665 if not st:
666 raise RuntimeError("Broken toolchain: cannot link a simple C program")
667 mlibs = check_mathlib(config_cmd)
668
669 posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
670 msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
671 subst_dict["posix_mathlib"] = posix_mlib
672 subst_dict["msvc_mathlib"] = msvc_mlib
673
674 npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),
675 join('src', 'npymath', 'npy_math.c'),
676 join('src', 'npymath', 'ieee754.c.src'),
677 join('src', 'npymath', 'npy_math_complex.c.src'),
678 join('src', 'npymath', 'halffloat.c')
679 ]
680
681 # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
682 is_msvc = platform.system() == 'Windows'
683 config.add_installed_library('npymath',
684 sources=npymath_sources + [get_mathlib_info],
685 install_dir='lib',
686 build_info={
687 'include_dirs' : [], # empty list required for creating npy_math_internal.h
688 'extra_compiler_args' : (['/GL-'] if is_msvc else []),
689 })
690 config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
691 subst_dict)
692 config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
693 subst_dict)
694
695 #######################################################################
696 # npysort library #
697 #######################################################################
698
699 # This library is created for the build but it is not installed
700 npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
701 join('src', 'npysort', 'quicksort.c.src'),
702 join('src', 'npysort', 'mergesort.c.src'),
703 join('src', 'npysort', 'heapsort.c.src'),
704 join('src', 'common', 'npy_partition.h.src'),
705 join('src', 'npysort', 'selection.c.src'),
706 join('src', 'common', 'npy_binsearch.h.src'),
707 join('src', 'npysort', 'binsearch.c.src'),
708 ]
709 config.add_library('npysort',
710 sources=npysort_sources,
711 include_dirs=[])
712
713 #######################################################################
714 # multiarray_tests module #
715 #######################################################################
716
717 config.add_extension('_multiarray_tests',
718 sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
719 join('src', 'common', 'mem_overlap.c')],
720 depends=[join('src', 'common', 'mem_overlap.h'),
721 join('src', 'common', 'npy_extint128.h')],
722 libraries=['npymath'])
723
724 #######################################################################
725 # _multiarray_umath module - common part #
726 #######################################################################
727
728 common_deps = [
729 join('src', 'common', 'array_assign.h'),
730 join('src', 'common', 'binop_override.h'),
731 join('src', 'common', 'cblasfuncs.h'),
732 join('src', 'common', 'lowlevel_strided_loops.h'),
733 join('src', 'common', 'mem_overlap.h'),
734 join('src', 'common', 'npy_config.h'),
735 join('src', 'common', 'npy_ctypes.h'),
736 join('src', 'common', 'npy_extint128.h'),
737 join('src', 'common', 'npy_import.h'),
738 join('src', 'common', 'npy_longdouble.h'),
739 join('src', 'common', 'templ_common.h.src'),
740 join('src', 'common', 'ucsnarrow.h'),
741 join('src', 'common', 'ufunc_override.h'),
742 join('src', 'common', 'umathmodule.h'),
743 join('src', 'common', 'numpyos.h'),
744 ]
745
746 common_src = [
747 join('src', 'common', 'array_assign.c'),
748 join('src', 'common', 'mem_overlap.c'),
749 join('src', 'common', 'npy_longdouble.c'),
750 join('src', 'common', 'templ_common.h.src'),
751 join('src', 'common', 'ucsnarrow.c'),
752 join('src', 'common', 'ufunc_override.c'),
753 join('src', 'common', 'numpyos.c'),
754 ]
755
756 blas_info = get_info('blas_opt', 0)
757 if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
758 extra_info = blas_info
759 # These files are also in MANIFEST.in so that they are always in
760 # the source distribution independently of HAVE_CBLAS.
761 common_src.extend([join('src', 'common', 'cblasfuncs.c'),
762 join('src', 'common', 'python_xerbla.c'),
763 ])
764 if uses_accelerate_framework(blas_info):
765 common_src.extend(get_sgemv_fix())
766 else:
767 extra_info = {}
768
769 #######################################################################
770 # _multiarray_umath module - multiarray part #
771 #######################################################################
772
773 multiarray_deps = [
774 join('src', 'multiarray', 'arrayobject.h'),
775 join('src', 'multiarray', 'arraytypes.h'),
776 join('src', 'multiarray', 'buffer.h'),
777 join('src', 'multiarray', 'calculation.h'),
778 join('src', 'multiarray', 'common.h'),
779 join('src', 'multiarray', 'convert_datatype.h'),
780 join('src', 'multiarray', 'convert.h'),
781 join('src', 'multiarray', 'conversion_utils.h'),
782 join('src', 'multiarray', 'ctors.h'),
783 join('src', 'multiarray', 'descriptor.h'),
784 join('src', 'multiarray', 'dragon4.h'),
785 join('src', 'multiarray', 'getset.h'),
786 join('src', 'multiarray', 'hashdescr.h'),
787 join('src', 'multiarray', 'iterators.h'),
788 join('src', 'multiarray', 'mapping.h'),
789 join('src', 'multiarray', 'methods.h'),
790 join('src', 'multiarray', 'multiarraymodule.h'),
791 join('src', 'multiarray', 'nditer_impl.h'),
792 join('src', 'multiarray', 'number.h'),
793 join('src', 'multiarray', 'refcount.h'),
794 join('src', 'multiarray', 'scalartypes.h'),
795 join('src', 'multiarray', 'sequence.h'),
796 join('src', 'multiarray', 'shape.h'),
797 join('src', 'multiarray', 'strfuncs.h'),
798 join('src', 'multiarray', 'typeinfo.h'),
799 join('src', 'multiarray', 'usertypes.h'),
800 join('src', 'multiarray', 'vdot.h'),
801 join('include', 'numpy', 'arrayobject.h'),
802 join('include', 'numpy', '_neighborhood_iterator_imp.h'),
803 join('include', 'numpy', 'npy_endian.h'),
804 join('include', 'numpy', 'arrayscalars.h'),
805 join('include', 'numpy', 'noprefix.h'),
806 join('include', 'numpy', 'npy_interrupt.h'),
807 join('include', 'numpy', 'npy_3kcompat.h'),
808 join('include', 'numpy', 'npy_math.h'),
809 join('include', 'numpy', 'halffloat.h'),
810 join('include', 'numpy', 'npy_common.h'),
811 join('include', 'numpy', 'npy_os.h'),
812 join('include', 'numpy', 'utils.h'),
813 join('include', 'numpy', 'ndarrayobject.h'),
814 join('include', 'numpy', 'npy_cpu.h'),
815 join('include', 'numpy', 'numpyconfig.h'),
816 join('include', 'numpy', 'ndarraytypes.h'),
817 join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
818 # add library sources as distuils does not consider libraries
819 # dependencies
820 ] + npysort_sources + npymath_sources
821
822 multiarray_src = [
823 join('src', 'multiarray', 'alloc.c'),
824 join('src', 'multiarray', 'arrayobject.c'),
825 join('src', 'multiarray', 'arraytypes.c.src'),
826 join('src', 'multiarray', 'array_assign_scalar.c'),
827 join('src', 'multiarray', 'array_assign_array.c'),
828 join('src', 'multiarray', 'buffer.c'),
829 join('src', 'multiarray', 'calculation.c'),
830 join('src', 'multiarray', 'compiled_base.c'),
831 join('src', 'multiarray', 'common.c'),
832 join('src', 'multiarray', 'convert.c'),
833 join('src', 'multiarray', 'convert_datatype.c'),
834 join('src', 'multiarray', 'conversion_utils.c'),
835 join('src', 'multiarray', 'ctors.c'),
836 join('src', 'multiarray', 'datetime.c'),
837 join('src', 'multiarray', 'datetime_strings.c'),
838 join('src', 'multiarray', 'datetime_busday.c'),
839 join('src', 'multiarray', 'datetime_busdaycal.c'),
840 join('src', 'multiarray', 'descriptor.c'),
841 join('src', 'multiarray', 'dragon4.c'),
842 join('src', 'multiarray', 'dtype_transfer.c'),
843 join('src', 'multiarray', 'einsum.c.src'),
844 join('src', 'multiarray', 'flagsobject.c'),
845 join('src', 'multiarray', 'getset.c'),
846 join('src', 'multiarray', 'hashdescr.c'),
847 join('src', 'multiarray', 'item_selection.c'),
848 join('src', 'multiarray', 'iterators.c'),
849 join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
850 join('src', 'multiarray', 'mapping.c'),
851 join('src', 'multiarray', 'methods.c'),
852 join('src', 'multiarray', 'multiarraymodule.c'),
853 join('src', 'multiarray', 'nditer_templ.c.src'),
854 join('src', 'multiarray', 'nditer_api.c'),
855 join('src', 'multiarray', 'nditer_constr.c'),
856 join('src', 'multiarray', 'nditer_pywrap.c'),
857 join('src', 'multiarray', 'number.c'),
858 join('src', 'multiarray', 'refcount.c'),
859 join('src', 'multiarray', 'sequence.c'),
860 join('src', 'multiarray', 'shape.c'),
861 join('src', 'multiarray', 'scalarapi.c'),
862 join('src', 'multiarray', 'scalartypes.c.src'),
863 join('src', 'multiarray', 'strfuncs.c'),
864 join('src', 'multiarray', 'temp_elide.c'),
865 join('src', 'multiarray', 'typeinfo.c'),
866 join('src', 'multiarray', 'usertypes.c'),
867 join('src', 'multiarray', 'vdot.c'),
868 ]
869
870 #######################################################################
871 # _multiarray_umath module - umath part #
872 #######################################################################
873
874 def generate_umath_c(ext, build_dir):
875 target = join(build_dir, header_dir, '__umath_generated.c')
876 dir = os.path.dirname(target)
877 if not os.path.exists(dir):
878 os.makedirs(dir)
879 script = generate_umath_py
880 if newer(script, target):
881 f = open(target, 'w')
882 f.write(generate_umath.make_code(generate_umath.defdict,
883 generate_umath.__file__))
884 f.close()
885 return []
886
887 umath_src = [
888 join('src', 'umath', 'umathmodule.c'),
889 join('src', 'umath', 'reduction.c'),
890 join('src', 'umath', 'funcs.inc.src'),
891 join('src', 'umath', 'simd.inc.src'),
892 join('src', 'umath', 'loops.h.src'),
893 join('src', 'umath', 'loops.c.src'),
894 join('src', 'umath', 'ufunc_object.c'),
895 join('src', 'umath', 'extobj.c'),
896 join('src', 'umath', 'cpuid.c'),
897 join('src', 'umath', 'scalarmath.c.src'),
898 join('src', 'umath', 'ufunc_type_resolution.c'),
899 join('src', 'umath', 'override.c'),
900 ]
901
902 umath_deps = [
903 generate_umath_py,
904 join('include', 'numpy', 'npy_math.h'),
905 join('include', 'numpy', 'halffloat.h'),
906 join('src', 'multiarray', 'common.h'),
907 join('src', 'multiarray', 'number.h'),
908 join('src', 'common', 'templ_common.h.src'),
909 join('src', 'umath', 'simd.inc.src'),
910 join('src', 'umath', 'override.h'),
911 join(codegen_dir, 'generate_ufunc_api.py'),
912 ]
913
914 config.add_extension('_multiarray_umath',
915 sources=multiarray_src + umath_src +
916 npymath_sources + common_src +
917 [generate_config_h,
918 generate_numpyconfig_h,
919 generate_numpy_api,
920 join(codegen_dir, 'generate_numpy_api.py'),
921 join('*.py'),
922 generate_umath_c,
923 generate_ufunc_api,
924 ],
925 depends=deps + multiarray_deps + umath_deps +
926 common_deps,
927 libraries=['npymath', 'npysort'],
928 extra_info=extra_info)
929
930 #######################################################################
931 # umath_tests module #
932 #######################################################################
933
934 config.add_extension('_umath_tests',
935 sources=[join('src', 'umath', '_umath_tests.c.src')])
936
937 #######################################################################
938 # custom rational dtype module #
939 #######################################################################
940
941 config.add_extension('_rational_tests',
942 sources=[join('src', 'umath', '_rational_tests.c.src')])
943
944 #######################################################################
945 # struct_ufunc_test module #
946 #######################################################################
947
948 config.add_extension('_struct_ufunc_tests',
949 sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])
950
951
952 #######################################################################
953 # operand_flag_tests module #
954 #######################################################################
955
956 config.add_extension('_operand_flag_tests',
957 sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
958
959 config.add_data_dir('tests')
960 config.add_data_dir('tests/data')
961
962 config.make_svn_version_py()
963
964 return config
965
966 if __name__ == '__main__':
967 from numpy.distutils.core import setup
968 setup(configuration=configuration)
969
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpy/core/setup.py b/numpy/core/setup.py
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -379,8 +379,9 @@
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
- if config.check_compiler_gcc4():
- return '__attribute__((visibility("hidden")))'
+ hide = '__attribute__((visibility("hidden")))'
+ if config.check_gcc_function_attribute(hide, 'hideme'):
+ return hide
else:
return ''
| {"golden_diff": "diff --git a/numpy/core/setup.py b/numpy/core/setup.py\n--- a/numpy/core/setup.py\n+++ b/numpy/core/setup.py\n@@ -379,8 +379,9 @@\n def visibility_define(config):\n \"\"\"Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty\n string).\"\"\"\n- if config.check_compiler_gcc4():\n- return '__attribute__((visibility(\"hidden\")))'\n+ hide = '__attribute__((visibility(\"hidden\")))'\n+ if config.check_gcc_function_attribute(hide, 'hideme'):\n+ return hide\n else:\n return ''\n", "issue": "BUG: functions marked with NPY_NO_EXPORT still are exported\nSteps to reproduce (on linux):\r\n- git checkout\r\n- `python setup.py build_ext`\r\n- choose a random function marked with the `NPY_NO_EXPORT` macro, for instance [`datetime_metadata_divides`](https://github.com/numpy/numpy/blob/v1.15.4/numpy/core/src/multiarray/_datetime.h#L108) and ~call \r\n `nm build/lib*/numpy/core/_multiarray_umath*.so |grep datetime_metadata_divides`~ check that the function is not exported:\r\n ```\r\n import ctypes, numpy as np\r\n dll = ctypes.CDLL(np.core._multiarray_umath.__file__)\r\n print(getattr(dll, `datetime_metadata_divides`, None)\r\n ```\r\nNote that the function appears in the result. It should not. I think the problem is in the `visibility_define` [function](https://github.com/numpy/numpy/blob/v1.15.4/numpy/core/setup.py#L379) which only hides the functions for gcc 4.\r\n\r\nEdit: use ctypes to check for export\n", "before_files": [{"content": "from __future__ import division, print_function\n\nimport os\nimport sys\nimport pickle\nimport copy\nimport warnings\nimport platform\nfrom os.path import join\nfrom numpy.distutils import log\nfrom distutils.dep_util import newer\nfrom distutils.sysconfig import get_config_var\nfrom numpy._build_utils.apple_accelerate import (\n uses_accelerate_framework, get_sgemv_fix\n )\nfrom numpy.compat import npy_load_module\nfrom setup_common import *\n\n# Set to True to enable relaxed strides checking. This (mostly) means\n# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.\nNPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\")\n\n# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a\n# bogus value for affected strides in order to help smoke out bad stride usage\n# when relaxed stride checking is enabled.\nNPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', \"0\") != \"0\")\nNPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING\n\n# XXX: ugly, we use a class to avoid calling twice some expensive functions in\n# config.h/numpyconfig.h. I don't see a better way because distutils force\n# config.h generation inside an Extension class, and as such sharing\n# configuration information between extensions is not easy.\n# Using a pickled-based memoize does not work because config_cmd is an instance\n# method, which cPickle does not like.\n#\n# Use pickle in all cases, as cPickle is gone in python3 and the difference\n# in time is only in build. -- Charles Harris, 2013-03-30\n\nclass CallOnceOnly(object):\n def __init__(self):\n self._check_types = None\n self._check_ieee_macros = None\n self._check_complex = None\n\n def check_types(self, *a, **kw):\n if self._check_types is None:\n out = check_types(*a, **kw)\n self._check_types = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_types))\n return out\n\n def check_ieee_macros(self, *a, **kw):\n if self._check_ieee_macros is None:\n out = check_ieee_macros(*a, **kw)\n self._check_ieee_macros = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_ieee_macros))\n return out\n\n def check_complex(self, *a, **kw):\n if self._check_complex is None:\n out = check_complex(*a, **kw)\n self._check_complex = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_complex))\n return out\n\ndef pythonlib_dir():\n \"\"\"return path where libpython* is.\"\"\"\n if sys.platform == 'win32':\n return os.path.join(sys.prefix, \"libs\")\n else:\n return get_config_var('LIBDIR')\n\ndef is_npy_no_signal():\n \"\"\"Return True if the NPY_NO_SIGNAL symbol must be defined in configuration\n header.\"\"\"\n return sys.platform == 'win32'\n\ndef is_npy_no_smp():\n \"\"\"Return True if the NPY_NO_SMP symbol must be defined in public\n header (when SMP support cannot be reliably enabled).\"\"\"\n # Perhaps a fancier check is in order here.\n # so that threads are only enabled if there\n # are actually multiple CPUS? -- but\n # threaded code can be nice even on a single\n # CPU so that long-calculating code doesn't\n # block.\n return 'NPY_NOSMP' in os.environ\n\ndef win32_checks(deflist):\n from numpy.distutils.misc_util import get_build_architecture\n a = get_build_architecture()\n\n # Distutils hack on AMD64 on windows\n print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %\n (a, os.name, sys.platform))\n if a == 'AMD64':\n deflist.append('DISTUTILS_USE_SDK')\n\n # On win32, force long double format string to be 'g', not\n # 'Lg', since the MS runtime does not support long double whose\n # size is > sizeof(double)\n if a == \"Intel\" or a == \"AMD64\":\n deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')\n\ndef check_math_capabilities(config, moredefs, mathlibs):\n def check_func(func_name):\n return config.check_func(func_name, libraries=mathlibs,\n decl=True, call=True)\n\n def check_funcs_once(funcs_name):\n decl = dict([(f, True) for f in funcs_name])\n st = config.check_funcs_once(funcs_name, libraries=mathlibs,\n decl=decl, call=decl)\n if st:\n moredefs.extend([(fname2def(f), 1) for f in funcs_name])\n return st\n\n def check_funcs(funcs_name):\n # Use check_funcs_once first, and if it does not work, test func per\n # func. Return success only if all the functions are available\n if not check_funcs_once(funcs_name):\n # Global check failed, check func per func\n for f in funcs_name:\n if check_func(f):\n moredefs.append((fname2def(f), 1))\n return 0\n else:\n return 1\n\n #use_msvc = config.check_decl(\"_MSC_VER\")\n\n if not check_funcs_once(MANDATORY_FUNCS):\n raise SystemError(\"One of the required function to build numpy is not\"\n \" available (the list is %s).\" % str(MANDATORY_FUNCS))\n\n # Standard functions which may not be available and for which we have a\n # replacement implementation. Note that some of these are C99 functions.\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n for f in OPTIONAL_STDFUNCS_MAYBE:\n if config.check_decl(fname2def(f),\n headers=[\"Python.h\", \"math.h\"]):\n OPTIONAL_STDFUNCS.remove(f)\n\n check_funcs(OPTIONAL_STDFUNCS)\n\n for h in OPTIONAL_HEADERS:\n if config.check_func(\"\", decl=False, call=False, headers=[h]):\n h = h.replace(\".\", \"_\").replace(os.path.sep, \"_\")\n moredefs.append((fname2def(h), 1))\n\n for tup in OPTIONAL_INTRINSICS:\n headers = None\n if len(tup) == 2:\n f, args, m = tup[0], tup[1], fname2def(tup[0])\n elif len(tup) == 3:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])\n else:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])\n if config.check_func(f, decl=False, call=True, call_args=args,\n headers=headers):\n moredefs.append((m, 1))\n\n for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:\n if config.check_gcc_function_attribute(dec, fn):\n moredefs.append((fname2def(fn), 1))\n\n for fn in OPTIONAL_VARIABLE_ATTRIBUTES:\n if config.check_gcc_variable_attribute(fn):\n m = fn.replace(\"(\", \"_\").replace(\")\", \"_\")\n moredefs.append((fname2def(m), 1))\n\n # C99 functions: float and long double versions\n check_funcs(C99_FUNCS_SINGLE)\n check_funcs(C99_FUNCS_EXTENDED)\n\ndef check_complex(config, mathlibs):\n priv = []\n pub = []\n\n try:\n if os.uname()[0] == \"Interix\":\n warnings.warn(\"Disabling broken complex support. See #1365\", stacklevel=2)\n return priv, pub\n except Exception:\n # os.uname not available on all platforms. blanket except ugly but safe\n pass\n\n # Check for complex support\n st = config.check_header('complex.h')\n if st:\n priv.append(('HAVE_COMPLEX_H', 1))\n pub.append(('NPY_USE_C99_COMPLEX', 1))\n\n for t in C99_COMPLEX_TYPES:\n st = config.check_type(t, headers=[\"complex.h\"])\n if st:\n pub.append(('NPY_HAVE_%s' % type2def(t), 1))\n\n def check_prec(prec):\n flist = [f + prec for f in C99_COMPLEX_FUNCS]\n decl = dict([(f, True) for f in flist])\n if not config.check_funcs_once(flist, call=decl, decl=decl,\n libraries=mathlibs):\n for f in flist:\n if config.check_func(f, call=True, decl=True,\n libraries=mathlibs):\n priv.append((fname2def(f), 1))\n else:\n priv.extend([(fname2def(f), 1) for f in flist])\n\n check_prec('')\n check_prec('f')\n check_prec('l')\n\n return priv, pub\n\ndef check_ieee_macros(config):\n priv = []\n pub = []\n\n macros = []\n\n def _add_decl(f):\n priv.append(fname2def(\"decl_%s\" % f))\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n _macros = [\"isnan\", \"isinf\", \"signbit\", \"isfinite\"]\n for f in _macros:\n py_symbol = fname2def(\"decl_%s\" % f)\n already_declared = config.check_decl(py_symbol,\n headers=[\"Python.h\", \"math.h\"])\n if already_declared:\n if config.check_macro_true(py_symbol,\n headers=[\"Python.h\", \"math.h\"]):\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n else:\n macros.append(f)\n # Normally, isnan and isinf are macro (C99), but some platforms only have\n # func, or both func and macro version. Check for macro only, and define\n # replacement ones if not found.\n # Note: including Python.h is necessary because it modifies some math.h\n # definitions\n for f in macros:\n st = config.check_decl(f, headers=[\"Python.h\", \"math.h\"])\n if st:\n _add_decl(f)\n\n return priv, pub\n\ndef check_types(config_cmd, ext, build_dir):\n private_defines = []\n public_defines = []\n\n # Expected size (in number of bytes) for each type. This is an\n # optimization: those are only hints, and an exhaustive search for the size\n # is done if the hints are wrong.\n expected = {'short': [2], 'int': [4], 'long': [8, 4],\n 'float': [4], 'double': [8], 'long double': [16, 12, 8],\n 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],\n 'off_t': [8, 4]}\n\n # Check we have the python header (-dev* packages on Linux)\n result = config_cmd.check_header('Python.h')\n if not result:\n python = 'python'\n if '__pypy__' in sys.builtin_module_names:\n python = 'pypy'\n raise SystemError(\n \"Cannot compile 'Python.h'. Perhaps you need to \"\n \"install {0}-dev|{0}-devel.\".format(python))\n res = config_cmd.check_header(\"endian.h\")\n if res:\n private_defines.append(('HAVE_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_ENDIAN_H', 1))\n res = config_cmd.check_header(\"sys/endian.h\")\n if res:\n private_defines.append(('HAVE_SYS_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))\n\n # Check basic types sizes\n for type in ('short', 'int', 'long'):\n res = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type), headers=[\"Python.h\"])\n if res:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), \"SIZEOF_%s\" % sym2def(type)))\n else:\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n for type in ('float', 'double', 'long double'):\n already_declared = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type),\n headers=[\"Python.h\"])\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n if not already_declared and not type == 'long double':\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # Compute size of corresponding complex type: used to check that our\n # definition is binary compatible with C99 complex type (check done at\n # build time in npy_common.h)\n complex_def = \"struct {%s __x; %s __y;}\" % (type, type)\n res = config_cmd.check_type_size(complex_def,\n expected=[2 * x for x in expected[type]])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % complex_def)\n\n for type in ('Py_intptr_t', 'off_t'):\n res = config_cmd.check_type_size(type, headers=[\"Python.h\"],\n library_dirs=[pythonlib_dir()],\n expected=expected[type])\n\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # We check declaration AND type because that's how distutils does it.\n if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):\n res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],\n library_dirs=[pythonlib_dir()],\n expected=expected['PY_LONG_LONG'])\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'PY_LONG_LONG')\n\n res = config_cmd.check_type_size('long long',\n expected=expected['long long'])\n if res >= 0:\n #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'long long')\n\n if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):\n raise RuntimeError(\n \"Config wo CHAR_BIT is not supported\"\n \", please contact the maintainers\")\n\n return private_defines, public_defines\n\ndef check_mathlib(config_cmd):\n # Testing the C math library\n mathlibs = []\n mathlibs_choices = [[], ['m'], ['cpml']]\n mathlib = os.environ.get('MATHLIB')\n if mathlib:\n mathlibs_choices.insert(0, mathlib.split(','))\n for libs in mathlibs_choices:\n if config_cmd.check_func(\"exp\", libraries=libs, decl=True, call=True):\n mathlibs = libs\n break\n else:\n raise EnvironmentError(\"math library missing; rerun \"\n \"setup.py after setting the \"\n \"MATHLIB env variable\")\n return mathlibs\n\ndef visibility_define(config):\n \"\"\"Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty\n string).\"\"\"\n if config.check_compiler_gcc4():\n return '__attribute__((visibility(\"hidden\")))'\n else:\n return ''\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration, dot_join\n from numpy.distutils.system_info import get_info\n\n config = Configuration('core', parent_package, top_path)\n local_dir = config.local_path\n codegen_dir = join(local_dir, 'code_generators')\n\n if is_released(config):\n warnings.simplefilter('error', MismatchCAPIWarning)\n\n # Check whether we have a mismatch between the set C API VERSION and the\n # actual C API VERSION\n check_api_version(C_API_VERSION, codegen_dir)\n\n generate_umath_py = join(codegen_dir, 'generate_umath.py')\n n = dot_join(config.name, 'generate_umath')\n generate_umath = npy_load_module('_'.join(n.split('.')),\n generate_umath_py, ('.py', 'U', 1))\n\n header_dir = 'include/numpy' # this is relative to config.path_in_package\n\n cocache = CallOnceOnly()\n\n def generate_config_h(ext, build_dir):\n target = join(build_dir, header_dir, 'config.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)\n\n # Check math library and C99 math funcs availability\n mathlibs = check_mathlib(config_cmd)\n moredefs.append(('MATHLIB', ','.join(mathlibs)))\n\n check_math_capabilities(config_cmd, moredefs, mathlibs)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])\n\n # Signal check\n if is_npy_no_signal():\n moredefs.append('__NPY_PRIVATE_NO_SIGNAL')\n\n # Windows checks\n if sys.platform == 'win32' or os.name == 'nt':\n win32_checks(moredefs)\n\n # C99 restrict keyword\n moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))\n\n # Inline check\n inline = config_cmd.check_inline()\n\n # Use relaxed stride checking\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n # Use bogus stride debug aid when relaxed strides are enabled\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Get long double representation\n rep = check_long_double_representation(config_cmd)\n moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))\n\n # Py3K check\n if sys.version_info[0] == 3:\n moredefs.append(('NPY_PY3K', 1))\n\n # Generate the config.h file from moredefs\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # define inline to our keyword, or nothing\n target_f.write('#ifndef __cplusplus\\n')\n if inline == 'inline':\n target_f.write('/* #undef inline */\\n')\n else:\n target_f.write('#define inline %s\\n' % inline)\n target_f.write('#endif\\n')\n\n # add the guard to make sure config.h is never included directly,\n # but always through npy_config.h\n target_f.write(\"\"\"\n#ifndef _NPY_NPY_CONFIG_H_\n#error config.h should never be included directly, include npy_config.h instead\n#endif\n\"\"\")\n\n target_f.close()\n print('File:', target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n else:\n mathlibs = []\n target_f = open(target)\n for line in target_f:\n s = '#define MATHLIB'\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n target_f.close()\n\n # Ugly: this can be called within a library and not an extension,\n # in which case there is no libraries attributes (and none is\n # needed).\n if hasattr(ext, 'libraries'):\n ext.libraries.extend(mathlibs)\n\n incl_dir = os.path.dirname(target)\n if incl_dir not in config.numpy_include_dirs:\n config.numpy_include_dirs.append(incl_dir)\n\n return target\n\n def generate_numpyconfig_h(ext, build_dir):\n \"\"\"Depends on config.h: generate_config_h has to be called before !\"\"\"\n # put common include directory in build_dir on search path\n # allows using code generation in headers headers\n config.add_include_dirs(join(build_dir, \"src\", \"common\"))\n config.add_include_dirs(join(build_dir, \"src\", \"npymath\"))\n\n target = join(build_dir, header_dir, '_numpyconfig.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)\n\n if is_npy_no_signal():\n moredefs.append(('NPY_NO_SIGNAL', 1))\n\n if is_npy_no_smp():\n moredefs.append(('NPY_NO_SMP', 1))\n else:\n moredefs.append(('NPY_NO_SMP', 0))\n\n mathlibs = check_mathlib(config_cmd)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])\n\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Check whether we can use inttypes (C99) formats\n if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):\n moredefs.append(('NPY_USE_C99_FORMATS', 1))\n\n # visibility check\n hidden_visibility = visibility_define(config_cmd)\n moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))\n\n # Add the C API/ABI versions\n moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))\n moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))\n\n # Add moredefs to header\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # Define __STDC_FORMAT_MACROS\n target_f.write(\"\"\"\n#ifndef __STDC_FORMAT_MACROS\n#define __STDC_FORMAT_MACROS 1\n#endif\n\"\"\")\n target_f.close()\n\n # Dump the numpyconfig.h header to stdout\n print('File: %s' % target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n config.add_data_files((header_dir, target))\n return target\n\n def generate_api_func(module_name):\n def generate_api(ext, build_dir):\n script = join(codegen_dir, module_name + '.py')\n sys.path.insert(0, codegen_dir)\n try:\n m = __import__(module_name)\n log.info('executing %s', script)\n h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))\n finally:\n del sys.path[0]\n config.add_data_files((header_dir, h_file),\n (header_dir, doc_file))\n return (h_file,)\n return generate_api\n\n generate_numpy_api = generate_api_func('generate_numpy_api')\n generate_ufunc_api = generate_api_func('generate_ufunc_api')\n\n config.add_include_dirs(join(local_dir, \"src\", \"common\"))\n config.add_include_dirs(join(local_dir, \"src\"))\n config.add_include_dirs(join(local_dir))\n\n config.add_data_files('include/numpy/*.h')\n config.add_include_dirs(join('src', 'npymath'))\n config.add_include_dirs(join('src', 'multiarray'))\n config.add_include_dirs(join('src', 'umath'))\n config.add_include_dirs(join('src', 'npysort'))\n\n config.add_define_macros([(\"NPY_INTERNAL_BUILD\", \"1\")]) # this macro indicates that Numpy build is in process\n config.add_define_macros([(\"HAVE_NPY_CONFIG_H\", \"1\")])\n if sys.platform[:3] == \"aix\":\n config.add_define_macros([(\"_LARGE_FILES\", None)])\n else:\n config.add_define_macros([(\"_FILE_OFFSET_BITS\", \"64\")])\n config.add_define_macros([('_LARGEFILE_SOURCE', '1')])\n config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])\n\n config.numpy_include_dirs.extend(config.paths('include'))\n\n deps = [join('src', 'npymath', '_signbit.c'),\n join('include', 'numpy', '*object.h'),\n join(codegen_dir, 'genapi.py'),\n ]\n\n #######################################################################\n # dummy module #\n #######################################################################\n\n # npymath needs the config.h and numpyconfig.h files to be generated, but\n # build_clib cannot handle generate_config_h and generate_numpyconfig_h\n # (don't ask). Because clib are generated before extensions, we have to\n # explicitly add an extension which has generate_config_h and\n # generate_numpyconfig_h as sources *before* adding npymath.\n\n config.add_extension('_dummy',\n sources=[join('src', 'dummymodule.c'),\n generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api]\n )\n\n #######################################################################\n # npymath library #\n #######################################################################\n\n subst_dict = dict([(\"sep\", os.path.sep), (\"pkgname\", \"numpy.core\")])\n\n def get_mathlib_info(*args):\n # Another ugly hack: the mathlib info is known once build_src is run,\n # but we cannot use add_installed_pkg_config here either, so we only\n # update the substitution dictionary during npymath build\n config_cmd = config.get_config_cmd()\n\n # Check that the toolchain works, to fail early if it doesn't\n # (avoid late errors with MATHLIB which are confusing if the\n # compiler does not work).\n st = config_cmd.try_link('int main(void) { return 0;}')\n if not st:\n raise RuntimeError(\"Broken toolchain: cannot link a simple C program\")\n mlibs = check_mathlib(config_cmd)\n\n posix_mlib = ' '.join(['-l%s' % l for l in mlibs])\n msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])\n subst_dict[\"posix_mathlib\"] = posix_mlib\n subst_dict[\"msvc_mathlib\"] = msvc_mlib\n\n npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),\n join('src', 'npymath', 'npy_math.c'),\n join('src', 'npymath', 'ieee754.c.src'),\n join('src', 'npymath', 'npy_math_complex.c.src'),\n join('src', 'npymath', 'halffloat.c')\n ]\n\n # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.\n is_msvc = platform.system() == 'Windows'\n config.add_installed_library('npymath',\n sources=npymath_sources + [get_mathlib_info],\n install_dir='lib',\n build_info={\n 'include_dirs' : [], # empty list required for creating npy_math_internal.h\n 'extra_compiler_args' : (['/GL-'] if is_msvc else []),\n })\n config.add_npy_pkg_config(\"npymath.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n config.add_npy_pkg_config(\"mlib.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n\n #######################################################################\n # npysort library #\n #######################################################################\n\n # This library is created for the build but it is not installed\n npysort_sources = [join('src', 'common', 'npy_sort.h.src'),\n join('src', 'npysort', 'quicksort.c.src'),\n join('src', 'npysort', 'mergesort.c.src'),\n join('src', 'npysort', 'heapsort.c.src'),\n join('src', 'common', 'npy_partition.h.src'),\n join('src', 'npysort', 'selection.c.src'),\n join('src', 'common', 'npy_binsearch.h.src'),\n join('src', 'npysort', 'binsearch.c.src'),\n ]\n config.add_library('npysort',\n sources=npysort_sources,\n include_dirs=[])\n\n #######################################################################\n # multiarray_tests module #\n #######################################################################\n\n config.add_extension('_multiarray_tests',\n sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),\n join('src', 'common', 'mem_overlap.c')],\n depends=[join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_extint128.h')],\n libraries=['npymath'])\n\n #######################################################################\n # _multiarray_umath module - common part #\n #######################################################################\n\n common_deps = [\n join('src', 'common', 'array_assign.h'),\n join('src', 'common', 'binop_override.h'),\n join('src', 'common', 'cblasfuncs.h'),\n join('src', 'common', 'lowlevel_strided_loops.h'),\n join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_config.h'),\n join('src', 'common', 'npy_ctypes.h'),\n join('src', 'common', 'npy_extint128.h'),\n join('src', 'common', 'npy_import.h'),\n join('src', 'common', 'npy_longdouble.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.h'),\n join('src', 'common', 'ufunc_override.h'),\n join('src', 'common', 'umathmodule.h'),\n join('src', 'common', 'numpyos.h'),\n ]\n\n common_src = [\n join('src', 'common', 'array_assign.c'),\n join('src', 'common', 'mem_overlap.c'),\n join('src', 'common', 'npy_longdouble.c'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.c'),\n join('src', 'common', 'ufunc_override.c'),\n join('src', 'common', 'numpyos.c'),\n ]\n\n blas_info = get_info('blas_opt', 0)\n if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):\n extra_info = blas_info\n # These files are also in MANIFEST.in so that they are always in\n # the source distribution independently of HAVE_CBLAS.\n common_src.extend([join('src', 'common', 'cblasfuncs.c'),\n join('src', 'common', 'python_xerbla.c'),\n ])\n if uses_accelerate_framework(blas_info):\n common_src.extend(get_sgemv_fix())\n else:\n extra_info = {}\n\n #######################################################################\n # _multiarray_umath module - multiarray part #\n #######################################################################\n\n multiarray_deps = [\n join('src', 'multiarray', 'arrayobject.h'),\n join('src', 'multiarray', 'arraytypes.h'),\n join('src', 'multiarray', 'buffer.h'),\n join('src', 'multiarray', 'calculation.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'convert_datatype.h'),\n join('src', 'multiarray', 'convert.h'),\n join('src', 'multiarray', 'conversion_utils.h'),\n join('src', 'multiarray', 'ctors.h'),\n join('src', 'multiarray', 'descriptor.h'),\n join('src', 'multiarray', 'dragon4.h'),\n join('src', 'multiarray', 'getset.h'),\n join('src', 'multiarray', 'hashdescr.h'),\n join('src', 'multiarray', 'iterators.h'),\n join('src', 'multiarray', 'mapping.h'),\n join('src', 'multiarray', 'methods.h'),\n join('src', 'multiarray', 'multiarraymodule.h'),\n join('src', 'multiarray', 'nditer_impl.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'multiarray', 'refcount.h'),\n join('src', 'multiarray', 'scalartypes.h'),\n join('src', 'multiarray', 'sequence.h'),\n join('src', 'multiarray', 'shape.h'),\n join('src', 'multiarray', 'strfuncs.h'),\n join('src', 'multiarray', 'typeinfo.h'),\n join('src', 'multiarray', 'usertypes.h'),\n join('src', 'multiarray', 'vdot.h'),\n join('include', 'numpy', 'arrayobject.h'),\n join('include', 'numpy', '_neighborhood_iterator_imp.h'),\n join('include', 'numpy', 'npy_endian.h'),\n join('include', 'numpy', 'arrayscalars.h'),\n join('include', 'numpy', 'noprefix.h'),\n join('include', 'numpy', 'npy_interrupt.h'),\n join('include', 'numpy', 'npy_3kcompat.h'),\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('include', 'numpy', 'npy_common.h'),\n join('include', 'numpy', 'npy_os.h'),\n join('include', 'numpy', 'utils.h'),\n join('include', 'numpy', 'ndarrayobject.h'),\n join('include', 'numpy', 'npy_cpu.h'),\n join('include', 'numpy', 'numpyconfig.h'),\n join('include', 'numpy', 'ndarraytypes.h'),\n join('include', 'numpy', 'npy_1_7_deprecated_api.h'),\n # add library sources as distuils does not consider libraries\n # dependencies\n ] + npysort_sources + npymath_sources\n\n multiarray_src = [\n join('src', 'multiarray', 'alloc.c'),\n join('src', 'multiarray', 'arrayobject.c'),\n join('src', 'multiarray', 'arraytypes.c.src'),\n join('src', 'multiarray', 'array_assign_scalar.c'),\n join('src', 'multiarray', 'array_assign_array.c'),\n join('src', 'multiarray', 'buffer.c'),\n join('src', 'multiarray', 'calculation.c'),\n join('src', 'multiarray', 'compiled_base.c'),\n join('src', 'multiarray', 'common.c'),\n join('src', 'multiarray', 'convert.c'),\n join('src', 'multiarray', 'convert_datatype.c'),\n join('src', 'multiarray', 'conversion_utils.c'),\n join('src', 'multiarray', 'ctors.c'),\n join('src', 'multiarray', 'datetime.c'),\n join('src', 'multiarray', 'datetime_strings.c'),\n join('src', 'multiarray', 'datetime_busday.c'),\n join('src', 'multiarray', 'datetime_busdaycal.c'),\n join('src', 'multiarray', 'descriptor.c'),\n join('src', 'multiarray', 'dragon4.c'),\n join('src', 'multiarray', 'dtype_transfer.c'),\n join('src', 'multiarray', 'einsum.c.src'),\n join('src', 'multiarray', 'flagsobject.c'),\n join('src', 'multiarray', 'getset.c'),\n join('src', 'multiarray', 'hashdescr.c'),\n join('src', 'multiarray', 'item_selection.c'),\n join('src', 'multiarray', 'iterators.c'),\n join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),\n join('src', 'multiarray', 'mapping.c'),\n join('src', 'multiarray', 'methods.c'),\n join('src', 'multiarray', 'multiarraymodule.c'),\n join('src', 'multiarray', 'nditer_templ.c.src'),\n join('src', 'multiarray', 'nditer_api.c'),\n join('src', 'multiarray', 'nditer_constr.c'),\n join('src', 'multiarray', 'nditer_pywrap.c'),\n join('src', 'multiarray', 'number.c'),\n join('src', 'multiarray', 'refcount.c'),\n join('src', 'multiarray', 'sequence.c'),\n join('src', 'multiarray', 'shape.c'),\n join('src', 'multiarray', 'scalarapi.c'),\n join('src', 'multiarray', 'scalartypes.c.src'),\n join('src', 'multiarray', 'strfuncs.c'),\n join('src', 'multiarray', 'temp_elide.c'),\n join('src', 'multiarray', 'typeinfo.c'),\n join('src', 'multiarray', 'usertypes.c'),\n join('src', 'multiarray', 'vdot.c'),\n ]\n\n #######################################################################\n # _multiarray_umath module - umath part #\n #######################################################################\n\n def generate_umath_c(ext, build_dir):\n target = join(build_dir, header_dir, '__umath_generated.c')\n dir = os.path.dirname(target)\n if not os.path.exists(dir):\n os.makedirs(dir)\n script = generate_umath_py\n if newer(script, target):\n f = open(target, 'w')\n f.write(generate_umath.make_code(generate_umath.defdict,\n generate_umath.__file__))\n f.close()\n return []\n\n umath_src = [\n join('src', 'umath', 'umathmodule.c'),\n join('src', 'umath', 'reduction.c'),\n join('src', 'umath', 'funcs.inc.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'loops.h.src'),\n join('src', 'umath', 'loops.c.src'),\n join('src', 'umath', 'ufunc_object.c'),\n join('src', 'umath', 'extobj.c'),\n join('src', 'umath', 'cpuid.c'),\n join('src', 'umath', 'scalarmath.c.src'),\n join('src', 'umath', 'ufunc_type_resolution.c'),\n join('src', 'umath', 'override.c'),\n ]\n\n umath_deps = [\n generate_umath_py,\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'override.h'),\n join(codegen_dir, 'generate_ufunc_api.py'),\n ]\n\n config.add_extension('_multiarray_umath',\n sources=multiarray_src + umath_src +\n npymath_sources + common_src +\n [generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api,\n join(codegen_dir, 'generate_numpy_api.py'),\n join('*.py'),\n generate_umath_c,\n generate_ufunc_api,\n ],\n depends=deps + multiarray_deps + umath_deps +\n common_deps,\n libraries=['npymath', 'npysort'],\n extra_info=extra_info)\n\n #######################################################################\n # umath_tests module #\n #######################################################################\n\n config.add_extension('_umath_tests',\n sources=[join('src', 'umath', '_umath_tests.c.src')])\n\n #######################################################################\n # custom rational dtype module #\n #######################################################################\n\n config.add_extension('_rational_tests',\n sources=[join('src', 'umath', '_rational_tests.c.src')])\n\n #######################################################################\n # struct_ufunc_test module #\n #######################################################################\n\n config.add_extension('_struct_ufunc_tests',\n sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])\n\n\n #######################################################################\n # operand_flag_tests module #\n #######################################################################\n\n config.add_extension('_operand_flag_tests',\n sources=[join('src', 'umath', '_operand_flag_tests.c.src')])\n\n config.add_data_dir('tests')\n config.add_data_dir('tests/data')\n\n config.make_svn_version_py()\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n", "path": "numpy/core/setup.py"}], "after_files": [{"content": "from __future__ import division, print_function\n\nimport os\nimport sys\nimport pickle\nimport copy\nimport warnings\nimport platform\nfrom os.path import join\nfrom numpy.distutils import log\nfrom distutils.dep_util import newer\nfrom distutils.sysconfig import get_config_var\nfrom numpy._build_utils.apple_accelerate import (\n uses_accelerate_framework, get_sgemv_fix\n )\nfrom numpy.compat import npy_load_module\nfrom setup_common import *\n\n# Set to True to enable relaxed strides checking. This (mostly) means\n# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.\nNPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\")\n\n# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a\n# bogus value for affected strides in order to help smoke out bad stride usage\n# when relaxed stride checking is enabled.\nNPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', \"0\") != \"0\")\nNPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING\n\n# XXX: ugly, we use a class to avoid calling twice some expensive functions in\n# config.h/numpyconfig.h. I don't see a better way because distutils force\n# config.h generation inside an Extension class, and as such sharing\n# configuration information between extensions is not easy.\n# Using a pickled-based memoize does not work because config_cmd is an instance\n# method, which cPickle does not like.\n#\n# Use pickle in all cases, as cPickle is gone in python3 and the difference\n# in time is only in build. -- Charles Harris, 2013-03-30\n\nclass CallOnceOnly(object):\n def __init__(self):\n self._check_types = None\n self._check_ieee_macros = None\n self._check_complex = None\n\n def check_types(self, *a, **kw):\n if self._check_types is None:\n out = check_types(*a, **kw)\n self._check_types = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_types))\n return out\n\n def check_ieee_macros(self, *a, **kw):\n if self._check_ieee_macros is None:\n out = check_ieee_macros(*a, **kw)\n self._check_ieee_macros = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_ieee_macros))\n return out\n\n def check_complex(self, *a, **kw):\n if self._check_complex is None:\n out = check_complex(*a, **kw)\n self._check_complex = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_complex))\n return out\n\ndef pythonlib_dir():\n \"\"\"return path where libpython* is.\"\"\"\n if sys.platform == 'win32':\n return os.path.join(sys.prefix, \"libs\")\n else:\n return get_config_var('LIBDIR')\n\ndef is_npy_no_signal():\n \"\"\"Return True if the NPY_NO_SIGNAL symbol must be defined in configuration\n header.\"\"\"\n return sys.platform == 'win32'\n\ndef is_npy_no_smp():\n \"\"\"Return True if the NPY_NO_SMP symbol must be defined in public\n header (when SMP support cannot be reliably enabled).\"\"\"\n # Perhaps a fancier check is in order here.\n # so that threads are only enabled if there\n # are actually multiple CPUS? -- but\n # threaded code can be nice even on a single\n # CPU so that long-calculating code doesn't\n # block.\n return 'NPY_NOSMP' in os.environ\n\ndef win32_checks(deflist):\n from numpy.distutils.misc_util import get_build_architecture\n a = get_build_architecture()\n\n # Distutils hack on AMD64 on windows\n print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %\n (a, os.name, sys.platform))\n if a == 'AMD64':\n deflist.append('DISTUTILS_USE_SDK')\n\n # On win32, force long double format string to be 'g', not\n # 'Lg', since the MS runtime does not support long double whose\n # size is > sizeof(double)\n if a == \"Intel\" or a == \"AMD64\":\n deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')\n\ndef check_math_capabilities(config, moredefs, mathlibs):\n def check_func(func_name):\n return config.check_func(func_name, libraries=mathlibs,\n decl=True, call=True)\n\n def check_funcs_once(funcs_name):\n decl = dict([(f, True) for f in funcs_name])\n st = config.check_funcs_once(funcs_name, libraries=mathlibs,\n decl=decl, call=decl)\n if st:\n moredefs.extend([(fname2def(f), 1) for f in funcs_name])\n return st\n\n def check_funcs(funcs_name):\n # Use check_funcs_once first, and if it does not work, test func per\n # func. Return success only if all the functions are available\n if not check_funcs_once(funcs_name):\n # Global check failed, check func per func\n for f in funcs_name:\n if check_func(f):\n moredefs.append((fname2def(f), 1))\n return 0\n else:\n return 1\n\n #use_msvc = config.check_decl(\"_MSC_VER\")\n\n if not check_funcs_once(MANDATORY_FUNCS):\n raise SystemError(\"One of the required function to build numpy is not\"\n \" available (the list is %s).\" % str(MANDATORY_FUNCS))\n\n # Standard functions which may not be available and for which we have a\n # replacement implementation. Note that some of these are C99 functions.\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n for f in OPTIONAL_STDFUNCS_MAYBE:\n if config.check_decl(fname2def(f),\n headers=[\"Python.h\", \"math.h\"]):\n OPTIONAL_STDFUNCS.remove(f)\n\n check_funcs(OPTIONAL_STDFUNCS)\n\n for h in OPTIONAL_HEADERS:\n if config.check_func(\"\", decl=False, call=False, headers=[h]):\n h = h.replace(\".\", \"_\").replace(os.path.sep, \"_\")\n moredefs.append((fname2def(h), 1))\n\n for tup in OPTIONAL_INTRINSICS:\n headers = None\n if len(tup) == 2:\n f, args, m = tup[0], tup[1], fname2def(tup[0])\n elif len(tup) == 3:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])\n else:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])\n if config.check_func(f, decl=False, call=True, call_args=args,\n headers=headers):\n moredefs.append((m, 1))\n\n for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:\n if config.check_gcc_function_attribute(dec, fn):\n moredefs.append((fname2def(fn), 1))\n\n for fn in OPTIONAL_VARIABLE_ATTRIBUTES:\n if config.check_gcc_variable_attribute(fn):\n m = fn.replace(\"(\", \"_\").replace(\")\", \"_\")\n moredefs.append((fname2def(m), 1))\n\n # C99 functions: float and long double versions\n check_funcs(C99_FUNCS_SINGLE)\n check_funcs(C99_FUNCS_EXTENDED)\n\ndef check_complex(config, mathlibs):\n priv = []\n pub = []\n\n try:\n if os.uname()[0] == \"Interix\":\n warnings.warn(\"Disabling broken complex support. See #1365\", stacklevel=2)\n return priv, pub\n except Exception:\n # os.uname not available on all platforms. blanket except ugly but safe\n pass\n\n # Check for complex support\n st = config.check_header('complex.h')\n if st:\n priv.append(('HAVE_COMPLEX_H', 1))\n pub.append(('NPY_USE_C99_COMPLEX', 1))\n\n for t in C99_COMPLEX_TYPES:\n st = config.check_type(t, headers=[\"complex.h\"])\n if st:\n pub.append(('NPY_HAVE_%s' % type2def(t), 1))\n\n def check_prec(prec):\n flist = [f + prec for f in C99_COMPLEX_FUNCS]\n decl = dict([(f, True) for f in flist])\n if not config.check_funcs_once(flist, call=decl, decl=decl,\n libraries=mathlibs):\n for f in flist:\n if config.check_func(f, call=True, decl=True,\n libraries=mathlibs):\n priv.append((fname2def(f), 1))\n else:\n priv.extend([(fname2def(f), 1) for f in flist])\n\n check_prec('')\n check_prec('f')\n check_prec('l')\n\n return priv, pub\n\ndef check_ieee_macros(config):\n priv = []\n pub = []\n\n macros = []\n\n def _add_decl(f):\n priv.append(fname2def(\"decl_%s\" % f))\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n _macros = [\"isnan\", \"isinf\", \"signbit\", \"isfinite\"]\n for f in _macros:\n py_symbol = fname2def(\"decl_%s\" % f)\n already_declared = config.check_decl(py_symbol,\n headers=[\"Python.h\", \"math.h\"])\n if already_declared:\n if config.check_macro_true(py_symbol,\n headers=[\"Python.h\", \"math.h\"]):\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n else:\n macros.append(f)\n # Normally, isnan and isinf are macro (C99), but some platforms only have\n # func, or both func and macro version. Check for macro only, and define\n # replacement ones if not found.\n # Note: including Python.h is necessary because it modifies some math.h\n # definitions\n for f in macros:\n st = config.check_decl(f, headers=[\"Python.h\", \"math.h\"])\n if st:\n _add_decl(f)\n\n return priv, pub\n\ndef check_types(config_cmd, ext, build_dir):\n private_defines = []\n public_defines = []\n\n # Expected size (in number of bytes) for each type. This is an\n # optimization: those are only hints, and an exhaustive search for the size\n # is done if the hints are wrong.\n expected = {'short': [2], 'int': [4], 'long': [8, 4],\n 'float': [4], 'double': [8], 'long double': [16, 12, 8],\n 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],\n 'off_t': [8, 4]}\n\n # Check we have the python header (-dev* packages on Linux)\n result = config_cmd.check_header('Python.h')\n if not result:\n python = 'python'\n if '__pypy__' in sys.builtin_module_names:\n python = 'pypy'\n raise SystemError(\n \"Cannot compile 'Python.h'. Perhaps you need to \"\n \"install {0}-dev|{0}-devel.\".format(python))\n res = config_cmd.check_header(\"endian.h\")\n if res:\n private_defines.append(('HAVE_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_ENDIAN_H', 1))\n res = config_cmd.check_header(\"sys/endian.h\")\n if res:\n private_defines.append(('HAVE_SYS_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))\n\n # Check basic types sizes\n for type in ('short', 'int', 'long'):\n res = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type), headers=[\"Python.h\"])\n if res:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), \"SIZEOF_%s\" % sym2def(type)))\n else:\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n for type in ('float', 'double', 'long double'):\n already_declared = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type),\n headers=[\"Python.h\"])\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n if not already_declared and not type == 'long double':\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # Compute size of corresponding complex type: used to check that our\n # definition is binary compatible with C99 complex type (check done at\n # build time in npy_common.h)\n complex_def = \"struct {%s __x; %s __y;}\" % (type, type)\n res = config_cmd.check_type_size(complex_def,\n expected=[2 * x for x in expected[type]])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % complex_def)\n\n for type in ('Py_intptr_t', 'off_t'):\n res = config_cmd.check_type_size(type, headers=[\"Python.h\"],\n library_dirs=[pythonlib_dir()],\n expected=expected[type])\n\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # We check declaration AND type because that's how distutils does it.\n if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):\n res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],\n library_dirs=[pythonlib_dir()],\n expected=expected['PY_LONG_LONG'])\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'PY_LONG_LONG')\n\n res = config_cmd.check_type_size('long long',\n expected=expected['long long'])\n if res >= 0:\n #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'long long')\n\n if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):\n raise RuntimeError(\n \"Config wo CHAR_BIT is not supported\"\n \", please contact the maintainers\")\n\n return private_defines, public_defines\n\ndef check_mathlib(config_cmd):\n # Testing the C math library\n mathlibs = []\n mathlibs_choices = [[], ['m'], ['cpml']]\n mathlib = os.environ.get('MATHLIB')\n if mathlib:\n mathlibs_choices.insert(0, mathlib.split(','))\n for libs in mathlibs_choices:\n if config_cmd.check_func(\"exp\", libraries=libs, decl=True, call=True):\n mathlibs = libs\n break\n else:\n raise EnvironmentError(\"math library missing; rerun \"\n \"setup.py after setting the \"\n \"MATHLIB env variable\")\n return mathlibs\n\ndef visibility_define(config):\n \"\"\"Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty\n string).\"\"\"\n hide = '__attribute__((visibility(\"hidden\")))'\n if config.check_gcc_function_attribute(hide, 'hideme'):\n return hide\n else:\n return ''\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration, dot_join\n from numpy.distutils.system_info import get_info\n\n config = Configuration('core', parent_package, top_path)\n local_dir = config.local_path\n codegen_dir = join(local_dir, 'code_generators')\n\n if is_released(config):\n warnings.simplefilter('error', MismatchCAPIWarning)\n\n # Check whether we have a mismatch between the set C API VERSION and the\n # actual C API VERSION\n check_api_version(C_API_VERSION, codegen_dir)\n\n generate_umath_py = join(codegen_dir, 'generate_umath.py')\n n = dot_join(config.name, 'generate_umath')\n generate_umath = npy_load_module('_'.join(n.split('.')),\n generate_umath_py, ('.py', 'U', 1))\n\n header_dir = 'include/numpy' # this is relative to config.path_in_package\n\n cocache = CallOnceOnly()\n\n def generate_config_h(ext, build_dir):\n target = join(build_dir, header_dir, 'config.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)\n\n # Check math library and C99 math funcs availability\n mathlibs = check_mathlib(config_cmd)\n moredefs.append(('MATHLIB', ','.join(mathlibs)))\n\n check_math_capabilities(config_cmd, moredefs, mathlibs)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])\n\n # Signal check\n if is_npy_no_signal():\n moredefs.append('__NPY_PRIVATE_NO_SIGNAL')\n\n # Windows checks\n if sys.platform == 'win32' or os.name == 'nt':\n win32_checks(moredefs)\n\n # C99 restrict keyword\n moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))\n\n # Inline check\n inline = config_cmd.check_inline()\n\n # Use relaxed stride checking\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n # Use bogus stride debug aid when relaxed strides are enabled\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Get long double representation\n rep = check_long_double_representation(config_cmd)\n moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))\n\n # Py3K check\n if sys.version_info[0] == 3:\n moredefs.append(('NPY_PY3K', 1))\n\n # Generate the config.h file from moredefs\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # define inline to our keyword, or nothing\n target_f.write('#ifndef __cplusplus\\n')\n if inline == 'inline':\n target_f.write('/* #undef inline */\\n')\n else:\n target_f.write('#define inline %s\\n' % inline)\n target_f.write('#endif\\n')\n\n # add the guard to make sure config.h is never included directly,\n # but always through npy_config.h\n target_f.write(\"\"\"\n#ifndef _NPY_NPY_CONFIG_H_\n#error config.h should never be included directly, include npy_config.h instead\n#endif\n\"\"\")\n\n target_f.close()\n print('File:', target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n else:\n mathlibs = []\n target_f = open(target)\n for line in target_f:\n s = '#define MATHLIB'\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n target_f.close()\n\n # Ugly: this can be called within a library and not an extension,\n # in which case there is no libraries attributes (and none is\n # needed).\n if hasattr(ext, 'libraries'):\n ext.libraries.extend(mathlibs)\n\n incl_dir = os.path.dirname(target)\n if incl_dir not in config.numpy_include_dirs:\n config.numpy_include_dirs.append(incl_dir)\n\n return target\n\n def generate_numpyconfig_h(ext, build_dir):\n \"\"\"Depends on config.h: generate_config_h has to be called before !\"\"\"\n # put common include directory in build_dir on search path\n # allows using code generation in headers headers\n config.add_include_dirs(join(build_dir, \"src\", \"common\"))\n config.add_include_dirs(join(build_dir, \"src\", \"npymath\"))\n\n target = join(build_dir, header_dir, '_numpyconfig.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)\n\n if is_npy_no_signal():\n moredefs.append(('NPY_NO_SIGNAL', 1))\n\n if is_npy_no_smp():\n moredefs.append(('NPY_NO_SMP', 1))\n else:\n moredefs.append(('NPY_NO_SMP', 0))\n\n mathlibs = check_mathlib(config_cmd)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])\n\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Check whether we can use inttypes (C99) formats\n if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):\n moredefs.append(('NPY_USE_C99_FORMATS', 1))\n\n # visibility check\n hidden_visibility = visibility_define(config_cmd)\n moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))\n\n # Add the C API/ABI versions\n moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))\n moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))\n\n # Add moredefs to header\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # Define __STDC_FORMAT_MACROS\n target_f.write(\"\"\"\n#ifndef __STDC_FORMAT_MACROS\n#define __STDC_FORMAT_MACROS 1\n#endif\n\"\"\")\n target_f.close()\n\n # Dump the numpyconfig.h header to stdout\n print('File: %s' % target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n config.add_data_files((header_dir, target))\n return target\n\n def generate_api_func(module_name):\n def generate_api(ext, build_dir):\n script = join(codegen_dir, module_name + '.py')\n sys.path.insert(0, codegen_dir)\n try:\n m = __import__(module_name)\n log.info('executing %s', script)\n h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))\n finally:\n del sys.path[0]\n config.add_data_files((header_dir, h_file),\n (header_dir, doc_file))\n return (h_file,)\n return generate_api\n\n generate_numpy_api = generate_api_func('generate_numpy_api')\n generate_ufunc_api = generate_api_func('generate_ufunc_api')\n\n config.add_include_dirs(join(local_dir, \"src\", \"common\"))\n config.add_include_dirs(join(local_dir, \"src\"))\n config.add_include_dirs(join(local_dir))\n\n config.add_data_files('include/numpy/*.h')\n config.add_include_dirs(join('src', 'npymath'))\n config.add_include_dirs(join('src', 'multiarray'))\n config.add_include_dirs(join('src', 'umath'))\n config.add_include_dirs(join('src', 'npysort'))\n\n config.add_define_macros([(\"NPY_INTERNAL_BUILD\", \"1\")]) # this macro indicates that Numpy build is in process\n config.add_define_macros([(\"HAVE_NPY_CONFIG_H\", \"1\")])\n if sys.platform[:3] == \"aix\":\n config.add_define_macros([(\"_LARGE_FILES\", None)])\n else:\n config.add_define_macros([(\"_FILE_OFFSET_BITS\", \"64\")])\n config.add_define_macros([('_LARGEFILE_SOURCE', '1')])\n config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])\n\n config.numpy_include_dirs.extend(config.paths('include'))\n\n deps = [join('src', 'npymath', '_signbit.c'),\n join('include', 'numpy', '*object.h'),\n join(codegen_dir, 'genapi.py'),\n ]\n\n #######################################################################\n # dummy module #\n #######################################################################\n\n # npymath needs the config.h and numpyconfig.h files to be generated, but\n # build_clib cannot handle generate_config_h and generate_numpyconfig_h\n # (don't ask). Because clib are generated before extensions, we have to\n # explicitly add an extension which has generate_config_h and\n # generate_numpyconfig_h as sources *before* adding npymath.\n\n config.add_extension('_dummy',\n sources=[join('src', 'dummymodule.c'),\n generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api]\n )\n\n #######################################################################\n # npymath library #\n #######################################################################\n\n subst_dict = dict([(\"sep\", os.path.sep), (\"pkgname\", \"numpy.core\")])\n\n def get_mathlib_info(*args):\n # Another ugly hack: the mathlib info is known once build_src is run,\n # but we cannot use add_installed_pkg_config here either, so we only\n # update the substitution dictionary during npymath build\n config_cmd = config.get_config_cmd()\n\n # Check that the toolchain works, to fail early if it doesn't\n # (avoid late errors with MATHLIB which are confusing if the\n # compiler does not work).\n st = config_cmd.try_link('int main(void) { return 0;}')\n if not st:\n raise RuntimeError(\"Broken toolchain: cannot link a simple C program\")\n mlibs = check_mathlib(config_cmd)\n\n posix_mlib = ' '.join(['-l%s' % l for l in mlibs])\n msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])\n subst_dict[\"posix_mathlib\"] = posix_mlib\n subst_dict[\"msvc_mathlib\"] = msvc_mlib\n\n npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),\n join('src', 'npymath', 'npy_math.c'),\n join('src', 'npymath', 'ieee754.c.src'),\n join('src', 'npymath', 'npy_math_complex.c.src'),\n join('src', 'npymath', 'halffloat.c')\n ]\n\n # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.\n is_msvc = platform.system() == 'Windows'\n config.add_installed_library('npymath',\n sources=npymath_sources + [get_mathlib_info],\n install_dir='lib',\n build_info={\n 'include_dirs' : [], # empty list required for creating npy_math_internal.h\n 'extra_compiler_args' : (['/GL-'] if is_msvc else []),\n })\n config.add_npy_pkg_config(\"npymath.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n config.add_npy_pkg_config(\"mlib.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n\n #######################################################################\n # npysort library #\n #######################################################################\n\n # This library is created for the build but it is not installed\n npysort_sources = [join('src', 'common', 'npy_sort.h.src'),\n join('src', 'npysort', 'quicksort.c.src'),\n join('src', 'npysort', 'mergesort.c.src'),\n join('src', 'npysort', 'heapsort.c.src'),\n join('src', 'common', 'npy_partition.h.src'),\n join('src', 'npysort', 'selection.c.src'),\n join('src', 'common', 'npy_binsearch.h.src'),\n join('src', 'npysort', 'binsearch.c.src'),\n ]\n config.add_library('npysort',\n sources=npysort_sources,\n include_dirs=[])\n\n #######################################################################\n # multiarray_tests module #\n #######################################################################\n\n config.add_extension('_multiarray_tests',\n sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),\n join('src', 'common', 'mem_overlap.c')],\n depends=[join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_extint128.h')],\n libraries=['npymath'])\n\n #######################################################################\n # _multiarray_umath module - common part #\n #######################################################################\n\n common_deps = [\n join('src', 'common', 'array_assign.h'),\n join('src', 'common', 'binop_override.h'),\n join('src', 'common', 'cblasfuncs.h'),\n join('src', 'common', 'lowlevel_strided_loops.h'),\n join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_config.h'),\n join('src', 'common', 'npy_ctypes.h'),\n join('src', 'common', 'npy_extint128.h'),\n join('src', 'common', 'npy_import.h'),\n join('src', 'common', 'npy_longdouble.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.h'),\n join('src', 'common', 'ufunc_override.h'),\n join('src', 'common', 'umathmodule.h'),\n join('src', 'common', 'numpyos.h'),\n ]\n\n common_src = [\n join('src', 'common', 'array_assign.c'),\n join('src', 'common', 'mem_overlap.c'),\n join('src', 'common', 'npy_longdouble.c'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.c'),\n join('src', 'common', 'ufunc_override.c'),\n join('src', 'common', 'numpyos.c'),\n ]\n\n blas_info = get_info('blas_opt', 0)\n if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):\n extra_info = blas_info\n # These files are also in MANIFEST.in so that they are always in\n # the source distribution independently of HAVE_CBLAS.\n common_src.extend([join('src', 'common', 'cblasfuncs.c'),\n join('src', 'common', 'python_xerbla.c'),\n ])\n if uses_accelerate_framework(blas_info):\n common_src.extend(get_sgemv_fix())\n else:\n extra_info = {}\n\n #######################################################################\n # _multiarray_umath module - multiarray part #\n #######################################################################\n\n multiarray_deps = [\n join('src', 'multiarray', 'arrayobject.h'),\n join('src', 'multiarray', 'arraytypes.h'),\n join('src', 'multiarray', 'buffer.h'),\n join('src', 'multiarray', 'calculation.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'convert_datatype.h'),\n join('src', 'multiarray', 'convert.h'),\n join('src', 'multiarray', 'conversion_utils.h'),\n join('src', 'multiarray', 'ctors.h'),\n join('src', 'multiarray', 'descriptor.h'),\n join('src', 'multiarray', 'dragon4.h'),\n join('src', 'multiarray', 'getset.h'),\n join('src', 'multiarray', 'hashdescr.h'),\n join('src', 'multiarray', 'iterators.h'),\n join('src', 'multiarray', 'mapping.h'),\n join('src', 'multiarray', 'methods.h'),\n join('src', 'multiarray', 'multiarraymodule.h'),\n join('src', 'multiarray', 'nditer_impl.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'multiarray', 'refcount.h'),\n join('src', 'multiarray', 'scalartypes.h'),\n join('src', 'multiarray', 'sequence.h'),\n join('src', 'multiarray', 'shape.h'),\n join('src', 'multiarray', 'strfuncs.h'),\n join('src', 'multiarray', 'typeinfo.h'),\n join('src', 'multiarray', 'usertypes.h'),\n join('src', 'multiarray', 'vdot.h'),\n join('include', 'numpy', 'arrayobject.h'),\n join('include', 'numpy', '_neighborhood_iterator_imp.h'),\n join('include', 'numpy', 'npy_endian.h'),\n join('include', 'numpy', 'arrayscalars.h'),\n join('include', 'numpy', 'noprefix.h'),\n join('include', 'numpy', 'npy_interrupt.h'),\n join('include', 'numpy', 'npy_3kcompat.h'),\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('include', 'numpy', 'npy_common.h'),\n join('include', 'numpy', 'npy_os.h'),\n join('include', 'numpy', 'utils.h'),\n join('include', 'numpy', 'ndarrayobject.h'),\n join('include', 'numpy', 'npy_cpu.h'),\n join('include', 'numpy', 'numpyconfig.h'),\n join('include', 'numpy', 'ndarraytypes.h'),\n join('include', 'numpy', 'npy_1_7_deprecated_api.h'),\n # add library sources as distuils does not consider libraries\n # dependencies\n ] + npysort_sources + npymath_sources\n\n multiarray_src = [\n join('src', 'multiarray', 'alloc.c'),\n join('src', 'multiarray', 'arrayobject.c'),\n join('src', 'multiarray', 'arraytypes.c.src'),\n join('src', 'multiarray', 'array_assign_scalar.c'),\n join('src', 'multiarray', 'array_assign_array.c'),\n join('src', 'multiarray', 'buffer.c'),\n join('src', 'multiarray', 'calculation.c'),\n join('src', 'multiarray', 'compiled_base.c'),\n join('src', 'multiarray', 'common.c'),\n join('src', 'multiarray', 'convert.c'),\n join('src', 'multiarray', 'convert_datatype.c'),\n join('src', 'multiarray', 'conversion_utils.c'),\n join('src', 'multiarray', 'ctors.c'),\n join('src', 'multiarray', 'datetime.c'),\n join('src', 'multiarray', 'datetime_strings.c'),\n join('src', 'multiarray', 'datetime_busday.c'),\n join('src', 'multiarray', 'datetime_busdaycal.c'),\n join('src', 'multiarray', 'descriptor.c'),\n join('src', 'multiarray', 'dragon4.c'),\n join('src', 'multiarray', 'dtype_transfer.c'),\n join('src', 'multiarray', 'einsum.c.src'),\n join('src', 'multiarray', 'flagsobject.c'),\n join('src', 'multiarray', 'getset.c'),\n join('src', 'multiarray', 'hashdescr.c'),\n join('src', 'multiarray', 'item_selection.c'),\n join('src', 'multiarray', 'iterators.c'),\n join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),\n join('src', 'multiarray', 'mapping.c'),\n join('src', 'multiarray', 'methods.c'),\n join('src', 'multiarray', 'multiarraymodule.c'),\n join('src', 'multiarray', 'nditer_templ.c.src'),\n join('src', 'multiarray', 'nditer_api.c'),\n join('src', 'multiarray', 'nditer_constr.c'),\n join('src', 'multiarray', 'nditer_pywrap.c'),\n join('src', 'multiarray', 'number.c'),\n join('src', 'multiarray', 'refcount.c'),\n join('src', 'multiarray', 'sequence.c'),\n join('src', 'multiarray', 'shape.c'),\n join('src', 'multiarray', 'scalarapi.c'),\n join('src', 'multiarray', 'scalartypes.c.src'),\n join('src', 'multiarray', 'strfuncs.c'),\n join('src', 'multiarray', 'temp_elide.c'),\n join('src', 'multiarray', 'typeinfo.c'),\n join('src', 'multiarray', 'usertypes.c'),\n join('src', 'multiarray', 'vdot.c'),\n ]\n\n #######################################################################\n # _multiarray_umath module - umath part #\n #######################################################################\n\n def generate_umath_c(ext, build_dir):\n target = join(build_dir, header_dir, '__umath_generated.c')\n dir = os.path.dirname(target)\n if not os.path.exists(dir):\n os.makedirs(dir)\n script = generate_umath_py\n if newer(script, target):\n f = open(target, 'w')\n f.write(generate_umath.make_code(generate_umath.defdict,\n generate_umath.__file__))\n f.close()\n return []\n\n umath_src = [\n join('src', 'umath', 'umathmodule.c'),\n join('src', 'umath', 'reduction.c'),\n join('src', 'umath', 'funcs.inc.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'loops.h.src'),\n join('src', 'umath', 'loops.c.src'),\n join('src', 'umath', 'ufunc_object.c'),\n join('src', 'umath', 'extobj.c'),\n join('src', 'umath', 'cpuid.c'),\n join('src', 'umath', 'scalarmath.c.src'),\n join('src', 'umath', 'ufunc_type_resolution.c'),\n join('src', 'umath', 'override.c'),\n ]\n\n umath_deps = [\n generate_umath_py,\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'override.h'),\n join(codegen_dir, 'generate_ufunc_api.py'),\n ]\n\n config.add_extension('_multiarray_umath',\n sources=multiarray_src + umath_src +\n npymath_sources + common_src +\n [generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api,\n join(codegen_dir, 'generate_numpy_api.py'),\n join('*.py'),\n generate_umath_c,\n generate_ufunc_api,\n ],\n depends=deps + multiarray_deps + umath_deps +\n common_deps,\n libraries=['npymath', 'npysort'],\n extra_info=extra_info)\n\n #######################################################################\n # umath_tests module #\n #######################################################################\n\n config.add_extension('_umath_tests',\n sources=[join('src', 'umath', '_umath_tests.c.src')])\n\n #######################################################################\n # custom rational dtype module #\n #######################################################################\n\n config.add_extension('_rational_tests',\n sources=[join('src', 'umath', '_rational_tests.c.src')])\n\n #######################################################################\n # struct_ufunc_test module #\n #######################################################################\n\n config.add_extension('_struct_ufunc_tests',\n sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])\n\n\n #######################################################################\n # operand_flag_tests module #\n #######################################################################\n\n config.add_extension('_operand_flag_tests',\n sources=[join('src', 'umath', '_operand_flag_tests.c.src')])\n\n config.add_data_dir('tests')\n config.add_data_dir('tests/data')\n\n config.make_svn_version_py()\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n", "path": "numpy/core/setup.py"}]} |
gh_patches_debug_1401 | rasdani/github-patches | git_diff | zulip__zulip-21726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create collapsible "Private messages" section in left sidebar
At present, private messages are collapsed in the left sidebar, unless the user is in a private message narrow. This has a few down sides:
1. Getting to a PM conversation generally requires multiple clicks.
2. It's not immediately clear who send you a new private message, which is important for determining whether one needs to read it right away.
3. It can be hard for new users to figure out how to view and send private messages.
In order to address this, we should try making a private messages section in the left sidebar that is open by default. Specifically:
1. Make a Private messages section just above STREAMS in the left sidebar that is open by default.
2. In the new PMs section, use the same algorithm we use for stream topics to decide how many conversations to show.
3. Make the PMs section collapsible, similar to the collapsible sections in #20072. The open/collapsed state should be sticky as the user navigates around Zulip, closes and reopens the window, logs out and in, etc.
Note that this will likely require experimentation for us to get it right. To avoid misdirected effort, please post screenshots in the #design stream on chat.zulip.org for feedback. Also, if (3) can't be implemented quickly, we can test the experience in chat.zulip.org without waiting for it to be completed.
[Prior discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/private.20messages.20UI/near/1159032).
See also #11108.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/lib/capitalization.py`
Content:
```
1 import re
2 from typing import List, Match, Tuple
3
4 from bs4 import BeautifulSoup
5
6 # The phrases in this list will be ignored. The longest phrase is
7 # tried first; this removes the chance of smaller phrases changing
8 # the text before longer phrases are tried.
9 # The errors shown by `tools/check-capitalization` can be added to
10 # this list without any modification.
11 IGNORED_PHRASES = [
12 # Proper nouns and acronyms
13 r"API",
14 r"APNS",
15 r"Botserver",
16 r"Cookie Bot",
17 r"DevAuthBackend",
18 r"GCM",
19 r"GitHub",
20 r"Gravatar",
21 r"Help Center",
22 r"HTTP",
23 r"ID",
24 r"IDs",
25 r"IP",
26 r"JSON",
27 r"Kerberos",
28 r"LDAP",
29 r"Markdown",
30 r"OTP",
31 r"Pivotal",
32 r"PM",
33 r"PMs",
34 r"Slack",
35 r"Google",
36 r"Terms of Service",
37 r"Tuesday",
38 r"URL",
39 r"UUID",
40 r"Webathena",
41 r"WordPress",
42 r"Zephyr",
43 r"Zoom",
44 r"Zulip",
45 r"Zulip Server",
46 r"Zulip Account Security",
47 r"Zulip Security",
48 r"Zulip Cloud Standard",
49 r"BigBlueButton",
50 # Code things
51 r"\.zuliprc",
52 # BeautifulSoup will remove <z-user> which is horribly confusing,
53 # so we need more of the sentence.
54 r"<z-user></z-user> will have the same role",
55 # Things using "I"
56 r"I understand",
57 r"I'm",
58 r"I've",
59 # Specific short words
60 r"beta",
61 r"and",
62 r"bot",
63 r"e\.g\.",
64 r"enabled",
65 r"signups",
66 # Placeholders
67 r"keyword",
68 r"streamname",
69 r"user@example\.com",
70 # Fragments of larger strings
71 (r"your subscriptions on your Streams page"),
72 r"Add global time<br />Everyone sees global times in their own time zone\.",
73 r"user",
74 r"an unknown operating system",
75 r"Go to Settings",
76 # SPECIAL CASES
77 # Because topics usually are lower-case, this would look weird if it were capitalized
78 r"more topics",
79 # Used alone in a parenthetical where capitalized looks worse.
80 r"^deprecated$",
81 # Capital 'i' looks weird in reminders popover
82 r"in 1 hour",
83 r"in 20 minutes",
84 r"in 3 hours",
85 # these are used as topics
86 r"^new streams$",
87 r"^stream events$",
88 # These are used as example short names (e.g. an uncapitalized context):
89 r"^marketing$",
90 r"^cookie$",
91 # Used to refer custom time limits
92 r"\bN\b",
93 # Capital c feels obtrusive in clear status option
94 r"clear",
95 r"group private messages with \{recipient\}",
96 r"private messages with \{recipient\}",
97 r"private messages with yourself",
98 r"GIF",
99 # Emoji name placeholder
100 r"leafy green vegetable",
101 # Subdomain placeholder
102 r"your-organization-url",
103 # Used in invite modal
104 r"or",
105 # Used in GIPHY popover.
106 r"GIFs",
107 r"GIPHY",
108 # Used in our case studies
109 r"Technical University of Munich",
110 r"University of California San Diego",
111 # Used in stream creation form
112 r"email hidden",
113 # Use in compose box.
114 r"to send",
115 r"to add a new line",
116 # Used in showing Notification Bot read receipts message
117 "Notification Bot",
118 # Used in presence_enabled setting label
119 r"invisible mode off",
120 # Typeahead suggestions for "Pronouns" custom field type.
121 r"he/him",
122 r"she/her",
123 r"they/them",
124 ]
125
126 # Sort regexes in descending order of their lengths. As a result, the
127 # longer phrases will be ignored first.
128 IGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)
129
130 # Compile regexes to improve performance. This also extracts the
131 # text using BeautifulSoup and then removes extra whitespaces from
132 # it. This step enables us to add HTML in our regexes directly.
133 COMPILED_IGNORED_PHRASES = [
134 re.compile(" ".join(BeautifulSoup(regex, "lxml").text.split())) for regex in IGNORED_PHRASES
135 ]
136
137 SPLIT_BOUNDARY = "?.!" # Used to split string into sentences.
138 SPLIT_BOUNDARY_REGEX = re.compile(rf"[{SPLIT_BOUNDARY}]")
139
140 # Regexes which check capitalization in sentences.
141 DISALLOWED = [
142 r"^[a-z](?!\})", # Checks if the sentence starts with a lower case character.
143 r"^[A-Z][a-z]+[\sa-z0-9]+[A-Z]", # Checks if an upper case character exists
144 # after a lower case character when the first character is in upper case.
145 ]
146 DISALLOWED_REGEX = re.compile(r"|".join(DISALLOWED))
147
148 BANNED_WORDS = {
149 "realm": "The term realm should not appear in user-facing strings. Use organization instead.",
150 }
151
152
153 def get_safe_phrase(phrase: str) -> str:
154 """
155 Safe phrase is in lower case and doesn't contain characters which can
156 conflict with split boundaries. All conflicting characters are replaced
157 with low dash (_).
158 """
159 phrase = SPLIT_BOUNDARY_REGEX.sub("_", phrase)
160 return phrase.lower()
161
162
163 def replace_with_safe_phrase(matchobj: Match[str]) -> str:
164 """
165 The idea is to convert IGNORED_PHRASES into safe phrases, see
166 `get_safe_phrase()` function. The only exception is when the
167 IGNORED_PHRASE is at the start of the text or after a split
168 boundary; in this case, we change the first letter of the phrase
169 to upper case.
170 """
171 ignored_phrase = matchobj.group(0)
172 safe_string = get_safe_phrase(ignored_phrase)
173
174 start_index = matchobj.start()
175 complete_string = matchobj.string
176
177 is_string_start = start_index == 0
178 # We expect that there will be one space between split boundary
179 # and the next word.
180 punctuation = complete_string[max(start_index - 2, 0)]
181 is_after_split_boundary = punctuation in SPLIT_BOUNDARY
182 if is_string_start or is_after_split_boundary:
183 return safe_string.capitalize()
184
185 return safe_string
186
187
188 def get_safe_text(text: str) -> str:
189 """
190 This returns text which is rendered by BeautifulSoup and is in the
191 form that can be split easily and has all IGNORED_PHRASES processed.
192 """
193 soup = BeautifulSoup(text, "lxml")
194 text = " ".join(soup.text.split()) # Remove extra whitespaces.
195 for phrase_regex in COMPILED_IGNORED_PHRASES:
196 text = phrase_regex.sub(replace_with_safe_phrase, text)
197
198 return text
199
200
201 def is_capitalized(safe_text: str) -> bool:
202 sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)
203 return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)
204
205
206 def check_banned_words(text: str) -> List[str]:
207 lower_cased_text = text.lower()
208 errors = []
209 for word, reason in BANNED_WORDS.items():
210 if word in lower_cased_text:
211 # Hack: Should move this into BANNED_WORDS framework; for
212 # now, just hand-code the skips:
213 if "realm_name" in lower_cased_text:
214 continue
215 kwargs = dict(word=word, text=text, reason=reason)
216 msg = "{word} found in '{text}'. {reason}".format(**kwargs)
217 errors.append(msg)
218
219 return errors
220
221
222 def check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:
223 errors = []
224 ignored = []
225 banned_word_errors = []
226 for text in strings:
227 text = " ".join(text.split()) # Remove extra whitespaces.
228 safe_text = get_safe_text(text)
229 has_ignored_phrase = text != safe_text
230 capitalized = is_capitalized(safe_text)
231 if not capitalized:
232 errors.append(text)
233 elif has_ignored_phrase:
234 ignored.append(text)
235
236 banned_word_errors.extend(check_banned_words(text))
237
238 return sorted(errors), sorted(ignored), sorted(banned_word_errors)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -78,6 +78,9 @@
r"more topics",
# Used alone in a parenthetical where capitalized looks worse.
r"^deprecated$",
+ # We want the similar text in the Private Messages section to have the same capitalization.
+ r"more conversations",
+ r"back to streams",
# Capital 'i' looks weird in reminders popover
r"in 1 hour",
r"in 20 minutes",
| {"golden_diff": "diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py\n--- a/tools/lib/capitalization.py\n+++ b/tools/lib/capitalization.py\n@@ -78,6 +78,9 @@\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n+ # We want the similar text in the Private Messages section to have the same capitalization.\n+ r\"more conversations\",\n+ r\"back to streams\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n", "issue": "Create collapsible \"Private messages\" section in left sidebar\nAt present, private messages are collapsed in the left sidebar, unless the user is in a private message narrow. This has a few down sides:\r\n\r\n1. Getting to a PM conversation generally requires multiple clicks.\r\n2. It's not immediately clear who send you a new private message, which is important for determining whether one needs to read it right away.\r\n3. It can be hard for new users to figure out how to view and send private messages.\r\n\r\nIn order to address this, we should try making a private messages section in the left sidebar that is open by default. Specifically:\r\n\r\n1. Make a Private messages section just above STREAMS in the left sidebar that is open by default.\r\n2. In the new PMs section, use the same algorithm we use for stream topics to decide how many conversations to show.\r\n3. Make the PMs section collapsible, similar to the collapsible sections in #20072. The open/collapsed state should be sticky as the user navigates around Zulip, closes and reopens the window, logs out and in, etc.\r\n\r\nNote that this will likely require experimentation for us to get it right. To avoid misdirected effort, please post screenshots in the #design stream on chat.zulip.org for feedback. Also, if (3) can't be implemented quickly, we can test the experience in chat.zulip.org without waiting for it to be completed.\r\n\r\n[Prior discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/private.20messages.20UI/near/1159032).\r\n\r\nSee also #11108.\n", "before_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"PM\",\n r\"PMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n # Fragments of larger strings\n (r\"your subscriptions on your Streams page\"),\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group private messages with \\{recipient\\}\",\n r\"private messages with \\{recipient\\}\",\n r\"private messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if \"realm_name\" in lower_cased_text:\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}], "after_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"PM\",\n r\"PMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n # Fragments of larger strings\n (r\"your subscriptions on your Streams page\"),\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # We want the similar text in the Private Messages section to have the same capitalization.\n r\"more conversations\",\n r\"back to streams\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group private messages with \\{recipient\\}\",\n r\"private messages with \\{recipient\\}\",\n r\"private messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if \"realm_name\" in lower_cased_text:\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}]} |
gh_patches_debug_1402 | rasdani/github-patches | git_diff | mars-project__mars-426 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
How to perform all tensor on np.array packages
Example:
```
import mars.tensor as mt
import numpy as np
def test_x():
X1 = mt.tensor([0, 1])
X2 = mt.tensor([2, 3])
X = np.array([X1,X2])
print(X.execute())
```
How to calculate X?
How to perform all tensor on np.array packages
Example:
```
import mars.tensor as mt
import numpy as np
def test_x():
X1 = mt.tensor([0, 1])
X2 = mt.tensor([2, 3])
X = np.array([X1,X2])
print(X.execute())
```
How to calculate X?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/tensor/expressions/datasource/array.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import numpy as np
18
19 from .... import opcodes as OperandDef
20 from ....lib.sparse.core import issparse, get_array_module, cp, cps, sps
21 from ....utils import on_serialize_shape, on_deserialize_shape
22 from ....serialize import ValueType, NDArrayField, TupleField
23 from ...core import TENSOR_TYPE, Tensor
24 from ..utils import get_chunk_slices
25 from .core import TensorNoInput
26 from .scalar import scalar
27
28
29 class ArrayDataSource(TensorNoInput):
30 """
31 Represents data from numpy or cupy array
32 """
33
34 _op_type_ = OperandDef.TENSOR_DATA_SOURCE
35
36 _data = NDArrayField('data')
37
38 def __init__(self, data=None, dtype=None, gpu=None, **kw):
39 if dtype is not None:
40 dtype = np.dtype(dtype)
41 elif data is not None:
42 dtype = np.dtype(data.dtype)
43 super(ArrayDataSource, self).__init__(_data=data, _dtype=dtype, _gpu=gpu, **kw)
44
45 @property
46 def data(self):
47 return self._data
48
49 def to_chunk_op(self, *args):
50 _, idx, chunk_size = args
51 chunk_op = self.copy().reset_key()
52 chunk_op._data = self.data[get_chunk_slices(chunk_size, idx)]
53
54 return chunk_op
55
56
57 class CSRMatrixDataSource(TensorNoInput):
58 """
59 Represents data from sparse array include scipy sparse or cupy sparse matrix.
60 """
61
62 _op_type_ = OperandDef.SPARSE_MATRIX_DATA_SOURCE
63
64 _indices = NDArrayField('indices')
65 _indptr = NDArrayField('indptr')
66 _data = NDArrayField('data')
67 _shape = TupleField('shape', ValueType.int64,
68 on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
69
70 def __init__(self, indices=None, indptr=None, data=None, shape=None,
71 dtype=None, gpu=None, **kw):
72 super(CSRMatrixDataSource, self).__init__(_indices=indices, _indptr=indptr,
73 _data=data, _shape=shape, _dtype=dtype,
74 _gpu=gpu, _sparse=True, **kw)
75
76 def to_chunk_op(self, *args):
77 _, idx, chunk_size = args
78
79 xps = cps if self._gpu else sps
80 if len(self._shape) == 1:
81 shape = (1, self._shape[0])
82 else:
83 shape = self._shape
84 data = xps.csr_matrix(
85 (self._data, self._indices, self._indptr), shape)
86 chunk_data = data[get_chunk_slices(chunk_size, idx)]
87
88 chunk_op = self.copy().reset_key()
89 chunk_op._data = chunk_data.data
90 chunk_op._indices = chunk_data.indices
91 chunk_op._indptr = chunk_data.indptr
92 chunk_shape = chunk_data.shape[1:] \
93 if len(self._shape) == 1 else chunk_data.shape
94 chunk_op._shape = chunk_shape
95
96 return chunk_op
97
98 @property
99 def indices(self):
100 return self._indices
101
102 @property
103 def indptr(self):
104 return self._indptr
105
106 @property
107 def data(self):
108 return self._data
109
110 @property
111 def shape(self):
112 return self._shape
113
114
115 def _from_spmatrix(spmatrix, dtype=None, chunk_size=None, gpu=None):
116 if gpu is None and cp is not None and get_array_module(spmatrix) is cp:
117 gpu = True
118 if dtype and spmatrix.dtype != dtype:
119 spmatrix = spmatrix.astype(dtype)
120 spmatrix = spmatrix.tocsr()
121 op = CSRMatrixDataSource(indices=spmatrix.indices, indptr=spmatrix.indptr,
122 data=spmatrix.data, shape=spmatrix.shape,
123 dtype=spmatrix.dtype, gpu=gpu)
124 return op(spmatrix.shape, chunk_size=chunk_size)
125
126
127 def tensor(data, dtype=None, chunk_size=None, gpu=None, sparse=False):
128 if isinstance(data, TENSOR_TYPE):
129 if dtype is not None and data.dtype != dtype:
130 return data.astype(dtype)
131 return data
132 elif isinstance(data, tuple) and all(isinstance(d, TENSOR_TYPE) for d in data):
133 from ..merge import stack
134
135 data = stack(data)
136 if dtype is not None:
137 data = data.astype(dtype)
138 return data
139 elif np.isscalar(data):
140 return scalar(data, dtype=dtype)
141 elif issparse(data):
142 return _from_spmatrix(data, dtype=dtype, chunk_size=chunk_size, gpu=gpu)
143 else:
144 m = get_array_module(data)
145 data = m.asarray(data, dtype=dtype)
146 if gpu is None and cp is not None and m is cp:
147 gpu = True
148
149 if isinstance(data, np.ndarray):
150 if data.ndim == 0:
151 return scalar(data.item(), dtype=dtype)
152 op = ArrayDataSource(data, dtype=dtype, gpu=gpu)
153 t = op(data.shape, chunk_size=chunk_size)
154 if sparse and not t.issparse():
155 return t.tosparse()
156 return t
157 else:
158 raise ValueError('Cannot create tensor by given data: {0}'.format(data))
159
160
161 def array(x, dtype=None, copy=True, ndmin=None, chunk_size=None):
162 """
163 Create a tensor.
164
165 Parameters
166 ----------
167 object : array_like
168 An array, any object exposing the array interface, an object whose
169 __array__ method returns an array, or any (nested) sequence.
170 dtype : data-type, optional
171 The desired data-type for the array. If not given, then the type will
172 be determined as the minimum type required to hold the objects in the
173 sequence. This argument can only be used to 'upcast' the array. For
174 downcasting, use the .astype(t) method.
175 copy : bool, optional
176 If true (default), then the object is copied. Otherwise, a copy will
177 only be made if __array__ returns a copy, if obj is a nested sequence,
178 or if a copy is needed to satisfy any of the other requirements
179 (`dtype`, `order`, etc.).
180 ndmin : int, optional
181 Specifies the minimum number of dimensions that the resulting
182 array should have. Ones will be pre-pended to the shape as
183 needed to meet this requirement.
184 chunk_size: int, tuple, optional
185 Specifies chunk size for each dimension.
186
187 Returns
188 -------
189 out : Tensor
190 An tensor object satisfying the specified requirements.
191
192 See Also
193 --------
194 empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
195
196 Examples
197 --------
198 >>> import mars.tensor as mt
199
200 >>> mt.array([1, 2, 3]).execute()
201 array([1, 2, 3])
202
203 Upcasting:
204
205 >>> mt.array([1, 2, 3.0]).execute()
206 array([ 1., 2., 3.])
207
208 More than one dimension:
209
210 >>> mt.array([[1, 2], [3, 4]]).execute()
211 array([[1, 2],
212 [3, 4]])
213
214 Minimum dimensions 2:
215
216 >>> mt.array([1, 2, 3], ndmin=2).execute()
217 array([[1, 2, 3]])
218
219 Type provided:
220
221 >>> mt.array([1, 2, 3], dtype=complex).execute()
222 array([ 1.+0.j, 2.+0.j, 3.+0.j])
223
224 """
225 raw_x = x
226 x = tensor(x, chunk_size=chunk_size)
227 if copy and x is raw_x:
228 x = Tensor(x.data)
229 while ndmin is not None and x.ndim < ndmin:
230 x = x[np.newaxis, :]
231 if dtype is not None and x.dtype != dtype:
232 x = x.astype(dtype)
233 return x
234
235
236 def asarray(x, dtype=None):
237 """Convert the input to an array.
238
239 Parameters
240 ----------
241 a : array_like
242 Input data, in any form that can be converted to a tensor. This
243 includes lists, lists of tuples, tuples, tuples of tuples, tuples
244 of lists and tensors.
245 dtype : data-type, optional
246 By default, the data-type is inferred from the input data.
247
248 Returns
249 -------
250 out : Tensor
251 Tensor interpretation of `a`. No copy is performed if the input
252 is already an ndarray with matching dtype and order. If `a` is a
253 subclass of ndarray, a base class ndarray is returned.
254
255 Examples
256 --------
257 Convert a list into an array:
258
259 >>> import mars.tensor as mt
260
261 >>> a = [1, 2]
262 >>> mt.asarray(a).execute()
263 array([1, 2])
264
265 Existing arrays are not copied:
266
267 >>> a = mt.array([1, 2])
268 >>> mt.asarray(a) is a
269 True
270
271 If `dtype` is set, array is copied only if dtype does not match:
272
273 >>> a = mt.array([1, 2], dtype=mt.float32)
274 >>> mt.asarray(a, dtype=mt.float32) is a
275 True
276 >>> mt.asarray(a, dtype=mt.float64) is a
277 False
278 """
279 return array(x, dtype=dtype, copy=False)
280
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/tensor/expressions/datasource/array.py b/mars/tensor/expressions/datasource/array.py
--- a/mars/tensor/expressions/datasource/array.py
+++ b/mars/tensor/expressions/datasource/array.py
@@ -129,7 +129,7 @@
if dtype is not None and data.dtype != dtype:
return data.astype(dtype)
return data
- elif isinstance(data, tuple) and all(isinstance(d, TENSOR_TYPE) for d in data):
+ elif isinstance(data, (tuple, list)) and all(isinstance(d, TENSOR_TYPE) for d in data):
from ..merge import stack
data = stack(data)
| {"golden_diff": "diff --git a/mars/tensor/expressions/datasource/array.py b/mars/tensor/expressions/datasource/array.py\n--- a/mars/tensor/expressions/datasource/array.py\n+++ b/mars/tensor/expressions/datasource/array.py\n@@ -129,7 +129,7 @@\n if dtype is not None and data.dtype != dtype:\n return data.astype(dtype)\n return data\n- elif isinstance(data, tuple) and all(isinstance(d, TENSOR_TYPE) for d in data):\n+ elif isinstance(data, (tuple, list)) and all(isinstance(d, TENSOR_TYPE) for d in data):\n from ..merge import stack\n \n data = stack(data)\n", "issue": "How to perform all tensor on np.array packages\nExample\uff1a\r\n```\r\nimport mars.tensor as mt\r\nimport numpy as np\r\n\r\ndef test_x():\r\n X1 = mt.tensor([0, 1])\r\n X2 = mt.tensor([2, 3])\r\n X = np.array([X1,X2])\r\n print(X.execute())\r\n```\r\nHow to calculate X\uff1f\nHow to perform all tensor on np.array packages\nExample\uff1a\r\n```\r\nimport mars.tensor as mt\r\nimport numpy as np\r\n\r\ndef test_x():\r\n X1 = mt.tensor([0, 1])\r\n X2 = mt.tensor([2, 3])\r\n X = np.array([X1,X2])\r\n print(X.execute())\r\n```\r\nHow to calculate X\uff1f\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom .... import opcodes as OperandDef\nfrom ....lib.sparse.core import issparse, get_array_module, cp, cps, sps\nfrom ....utils import on_serialize_shape, on_deserialize_shape\nfrom ....serialize import ValueType, NDArrayField, TupleField\nfrom ...core import TENSOR_TYPE, Tensor\nfrom ..utils import get_chunk_slices\nfrom .core import TensorNoInput\nfrom .scalar import scalar\n\n\nclass ArrayDataSource(TensorNoInput):\n \"\"\"\n Represents data from numpy or cupy array\n \"\"\"\n\n _op_type_ = OperandDef.TENSOR_DATA_SOURCE\n\n _data = NDArrayField('data')\n\n def __init__(self, data=None, dtype=None, gpu=None, **kw):\n if dtype is not None:\n dtype = np.dtype(dtype)\n elif data is not None:\n dtype = np.dtype(data.dtype)\n super(ArrayDataSource, self).__init__(_data=data, _dtype=dtype, _gpu=gpu, **kw)\n\n @property\n def data(self):\n return self._data\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n chunk_op = self.copy().reset_key()\n chunk_op._data = self.data[get_chunk_slices(chunk_size, idx)]\n\n return chunk_op\n\n\nclass CSRMatrixDataSource(TensorNoInput):\n \"\"\"\n Represents data from sparse array include scipy sparse or cupy sparse matrix.\n \"\"\"\n\n _op_type_ = OperandDef.SPARSE_MATRIX_DATA_SOURCE\n\n _indices = NDArrayField('indices')\n _indptr = NDArrayField('indptr')\n _data = NDArrayField('data')\n _shape = TupleField('shape', ValueType.int64,\n on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)\n\n def __init__(self, indices=None, indptr=None, data=None, shape=None,\n dtype=None, gpu=None, **kw):\n super(CSRMatrixDataSource, self).__init__(_indices=indices, _indptr=indptr,\n _data=data, _shape=shape, _dtype=dtype,\n _gpu=gpu, _sparse=True, **kw)\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n\n xps = cps if self._gpu else sps\n if len(self._shape) == 1:\n shape = (1, self._shape[0])\n else:\n shape = self._shape\n data = xps.csr_matrix(\n (self._data, self._indices, self._indptr), shape)\n chunk_data = data[get_chunk_slices(chunk_size, idx)]\n\n chunk_op = self.copy().reset_key()\n chunk_op._data = chunk_data.data\n chunk_op._indices = chunk_data.indices\n chunk_op._indptr = chunk_data.indptr\n chunk_shape = chunk_data.shape[1:] \\\n if len(self._shape) == 1 else chunk_data.shape\n chunk_op._shape = chunk_shape\n\n return chunk_op\n\n @property\n def indices(self):\n return self._indices\n\n @property\n def indptr(self):\n return self._indptr\n\n @property\n def data(self):\n return self._data\n\n @property\n def shape(self):\n return self._shape\n\n\ndef _from_spmatrix(spmatrix, dtype=None, chunk_size=None, gpu=None):\n if gpu is None and cp is not None and get_array_module(spmatrix) is cp:\n gpu = True\n if dtype and spmatrix.dtype != dtype:\n spmatrix = spmatrix.astype(dtype)\n spmatrix = spmatrix.tocsr()\n op = CSRMatrixDataSource(indices=spmatrix.indices, indptr=spmatrix.indptr,\n data=spmatrix.data, shape=spmatrix.shape,\n dtype=spmatrix.dtype, gpu=gpu)\n return op(spmatrix.shape, chunk_size=chunk_size)\n\n\ndef tensor(data, dtype=None, chunk_size=None, gpu=None, sparse=False):\n if isinstance(data, TENSOR_TYPE):\n if dtype is not None and data.dtype != dtype:\n return data.astype(dtype)\n return data\n elif isinstance(data, tuple) and all(isinstance(d, TENSOR_TYPE) for d in data):\n from ..merge import stack\n\n data = stack(data)\n if dtype is not None:\n data = data.astype(dtype)\n return data\n elif np.isscalar(data):\n return scalar(data, dtype=dtype)\n elif issparse(data):\n return _from_spmatrix(data, dtype=dtype, chunk_size=chunk_size, gpu=gpu)\n else:\n m = get_array_module(data)\n data = m.asarray(data, dtype=dtype)\n if gpu is None and cp is not None and m is cp:\n gpu = True\n\n if isinstance(data, np.ndarray):\n if data.ndim == 0:\n return scalar(data.item(), dtype=dtype)\n op = ArrayDataSource(data, dtype=dtype, gpu=gpu)\n t = op(data.shape, chunk_size=chunk_size)\n if sparse and not t.issparse():\n return t.tosparse()\n return t\n else:\n raise ValueError('Cannot create tensor by given data: {0}'.format(data))\n\n\ndef array(x, dtype=None, copy=True, ndmin=None, chunk_size=None):\n \"\"\"\n Create a tensor.\n\n Parameters\n ----------\n object : array_like\n An array, any object exposing the array interface, an object whose\n __array__ method returns an array, or any (nested) sequence.\n dtype : data-type, optional\n The desired data-type for the array. If not given, then the type will\n be determined as the minimum type required to hold the objects in the\n sequence. This argument can only be used to 'upcast' the array. For\n downcasting, use the .astype(t) method.\n copy : bool, optional\n If true (default), then the object is copied. Otherwise, a copy will\n only be made if __array__ returns a copy, if obj is a nested sequence,\n or if a copy is needed to satisfy any of the other requirements\n (`dtype`, `order`, etc.).\n ndmin : int, optional\n Specifies the minimum number of dimensions that the resulting\n array should have. Ones will be pre-pended to the shape as\n needed to meet this requirement.\n chunk_size: int, tuple, optional\n Specifies chunk size for each dimension.\n\n Returns\n -------\n out : Tensor\n An tensor object satisfying the specified requirements.\n\n See Also\n --------\n empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.array([1, 2, 3]).execute()\n array([1, 2, 3])\n\n Upcasting:\n\n >>> mt.array([1, 2, 3.0]).execute()\n array([ 1., 2., 3.])\n\n More than one dimension:\n\n >>> mt.array([[1, 2], [3, 4]]).execute()\n array([[1, 2],\n [3, 4]])\n\n Minimum dimensions 2:\n\n >>> mt.array([1, 2, 3], ndmin=2).execute()\n array([[1, 2, 3]])\n\n Type provided:\n\n >>> mt.array([1, 2, 3], dtype=complex).execute()\n array([ 1.+0.j, 2.+0.j, 3.+0.j])\n\n \"\"\"\n raw_x = x\n x = tensor(x, chunk_size=chunk_size)\n if copy and x is raw_x:\n x = Tensor(x.data)\n while ndmin is not None and x.ndim < ndmin:\n x = x[np.newaxis, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\ndef asarray(x, dtype=None):\n \"\"\"Convert the input to an array.\n\n Parameters\n ----------\n a : array_like\n Input data, in any form that can be converted to a tensor. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples\n of lists and tensors.\n dtype : data-type, optional\n By default, the data-type is inferred from the input data.\n\n Returns\n -------\n out : Tensor\n Tensor interpretation of `a`. No copy is performed if the input\n is already an ndarray with matching dtype and order. If `a` is a\n subclass of ndarray, a base class ndarray is returned.\n\n Examples\n --------\n Convert a list into an array:\n\n >>> import mars.tensor as mt\n\n >>> a = [1, 2]\n >>> mt.asarray(a).execute()\n array([1, 2])\n\n Existing arrays are not copied:\n\n >>> a = mt.array([1, 2])\n >>> mt.asarray(a) is a\n True\n\n If `dtype` is set, array is copied only if dtype does not match:\n\n >>> a = mt.array([1, 2], dtype=mt.float32)\n >>> mt.asarray(a, dtype=mt.float32) is a\n True\n >>> mt.asarray(a, dtype=mt.float64) is a\n False\n \"\"\"\n return array(x, dtype=dtype, copy=False)\n", "path": "mars/tensor/expressions/datasource/array.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom .... import opcodes as OperandDef\nfrom ....lib.sparse.core import issparse, get_array_module, cp, cps, sps\nfrom ....utils import on_serialize_shape, on_deserialize_shape\nfrom ....serialize import ValueType, NDArrayField, TupleField\nfrom ...core import TENSOR_TYPE, Tensor\nfrom ..utils import get_chunk_slices\nfrom .core import TensorNoInput\nfrom .scalar import scalar\n\n\nclass ArrayDataSource(TensorNoInput):\n \"\"\"\n Represents data from numpy or cupy array\n \"\"\"\n\n _op_type_ = OperandDef.TENSOR_DATA_SOURCE\n\n _data = NDArrayField('data')\n\n def __init__(self, data=None, dtype=None, gpu=None, **kw):\n if dtype is not None:\n dtype = np.dtype(dtype)\n elif data is not None:\n dtype = np.dtype(data.dtype)\n super(ArrayDataSource, self).__init__(_data=data, _dtype=dtype, _gpu=gpu, **kw)\n\n @property\n def data(self):\n return self._data\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n chunk_op = self.copy().reset_key()\n chunk_op._data = self.data[get_chunk_slices(chunk_size, idx)]\n\n return chunk_op\n\n\nclass CSRMatrixDataSource(TensorNoInput):\n \"\"\"\n Represents data from sparse array include scipy sparse or cupy sparse matrix.\n \"\"\"\n\n _op_type_ = OperandDef.SPARSE_MATRIX_DATA_SOURCE\n\n _indices = NDArrayField('indices')\n _indptr = NDArrayField('indptr')\n _data = NDArrayField('data')\n _shape = TupleField('shape', ValueType.int64,\n on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)\n\n def __init__(self, indices=None, indptr=None, data=None, shape=None,\n dtype=None, gpu=None, **kw):\n super(CSRMatrixDataSource, self).__init__(_indices=indices, _indptr=indptr,\n _data=data, _shape=shape, _dtype=dtype,\n _gpu=gpu, _sparse=True, **kw)\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n\n xps = cps if self._gpu else sps\n if len(self._shape) == 1:\n shape = (1, self._shape[0])\n else:\n shape = self._shape\n data = xps.csr_matrix(\n (self._data, self._indices, self._indptr), shape)\n chunk_data = data[get_chunk_slices(chunk_size, idx)]\n\n chunk_op = self.copy().reset_key()\n chunk_op._data = chunk_data.data\n chunk_op._indices = chunk_data.indices\n chunk_op._indptr = chunk_data.indptr\n chunk_shape = chunk_data.shape[1:] \\\n if len(self._shape) == 1 else chunk_data.shape\n chunk_op._shape = chunk_shape\n\n return chunk_op\n\n @property\n def indices(self):\n return self._indices\n\n @property\n def indptr(self):\n return self._indptr\n\n @property\n def data(self):\n return self._data\n\n @property\n def shape(self):\n return self._shape\n\n\ndef _from_spmatrix(spmatrix, dtype=None, chunk_size=None, gpu=None):\n if gpu is None and cp is not None and get_array_module(spmatrix) is cp:\n gpu = True\n if dtype and spmatrix.dtype != dtype:\n spmatrix = spmatrix.astype(dtype)\n spmatrix = spmatrix.tocsr()\n op = CSRMatrixDataSource(indices=spmatrix.indices, indptr=spmatrix.indptr,\n data=spmatrix.data, shape=spmatrix.shape,\n dtype=spmatrix.dtype, gpu=gpu)\n return op(spmatrix.shape, chunk_size=chunk_size)\n\n\ndef tensor(data, dtype=None, chunk_size=None, gpu=None, sparse=False):\n if isinstance(data, TENSOR_TYPE):\n if dtype is not None and data.dtype != dtype:\n return data.astype(dtype)\n return data\n elif isinstance(data, (tuple, list)) and all(isinstance(d, TENSOR_TYPE) for d in data):\n from ..merge import stack\n\n data = stack(data)\n if dtype is not None:\n data = data.astype(dtype)\n return data\n elif np.isscalar(data):\n return scalar(data, dtype=dtype)\n elif issparse(data):\n return _from_spmatrix(data, dtype=dtype, chunk_size=chunk_size, gpu=gpu)\n else:\n m = get_array_module(data)\n data = m.asarray(data, dtype=dtype)\n if gpu is None and cp is not None and m is cp:\n gpu = True\n\n if isinstance(data, np.ndarray):\n if data.ndim == 0:\n return scalar(data.item(), dtype=dtype)\n op = ArrayDataSource(data, dtype=dtype, gpu=gpu)\n t = op(data.shape, chunk_size=chunk_size)\n if sparse and not t.issparse():\n return t.tosparse()\n return t\n else:\n raise ValueError('Cannot create tensor by given data: {0}'.format(data))\n\n\ndef array(x, dtype=None, copy=True, ndmin=None, chunk_size=None):\n \"\"\"\n Create a tensor.\n\n Parameters\n ----------\n object : array_like\n An array, any object exposing the array interface, an object whose\n __array__ method returns an array, or any (nested) sequence.\n dtype : data-type, optional\n The desired data-type for the array. If not given, then the type will\n be determined as the minimum type required to hold the objects in the\n sequence. This argument can only be used to 'upcast' the array. For\n downcasting, use the .astype(t) method.\n copy : bool, optional\n If true (default), then the object is copied. Otherwise, a copy will\n only be made if __array__ returns a copy, if obj is a nested sequence,\n or if a copy is needed to satisfy any of the other requirements\n (`dtype`, `order`, etc.).\n ndmin : int, optional\n Specifies the minimum number of dimensions that the resulting\n array should have. Ones will be pre-pended to the shape as\n needed to meet this requirement.\n chunk_size: int, tuple, optional\n Specifies chunk size for each dimension.\n\n Returns\n -------\n out : Tensor\n An tensor object satisfying the specified requirements.\n\n See Also\n --------\n empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.array([1, 2, 3]).execute()\n array([1, 2, 3])\n\n Upcasting:\n\n >>> mt.array([1, 2, 3.0]).execute()\n array([ 1., 2., 3.])\n\n More than one dimension:\n\n >>> mt.array([[1, 2], [3, 4]]).execute()\n array([[1, 2],\n [3, 4]])\n\n Minimum dimensions 2:\n\n >>> mt.array([1, 2, 3], ndmin=2).execute()\n array([[1, 2, 3]])\n\n Type provided:\n\n >>> mt.array([1, 2, 3], dtype=complex).execute()\n array([ 1.+0.j, 2.+0.j, 3.+0.j])\n\n \"\"\"\n raw_x = x\n x = tensor(x, chunk_size=chunk_size)\n if copy and x is raw_x:\n x = Tensor(x.data)\n while ndmin is not None and x.ndim < ndmin:\n x = x[np.newaxis, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\ndef asarray(x, dtype=None):\n \"\"\"Convert the input to an array.\n\n Parameters\n ----------\n a : array_like\n Input data, in any form that can be converted to a tensor. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples\n of lists and tensors.\n dtype : data-type, optional\n By default, the data-type is inferred from the input data.\n\n Returns\n -------\n out : Tensor\n Tensor interpretation of `a`. No copy is performed if the input\n is already an ndarray with matching dtype and order. If `a` is a\n subclass of ndarray, a base class ndarray is returned.\n\n Examples\n --------\n Convert a list into an array:\n\n >>> import mars.tensor as mt\n\n >>> a = [1, 2]\n >>> mt.asarray(a).execute()\n array([1, 2])\n\n Existing arrays are not copied:\n\n >>> a = mt.array([1, 2])\n >>> mt.asarray(a) is a\n True\n\n If `dtype` is set, array is copied only if dtype does not match:\n\n >>> a = mt.array([1, 2], dtype=mt.float32)\n >>> mt.asarray(a, dtype=mt.float32) is a\n True\n >>> mt.asarray(a, dtype=mt.float64) is a\n False\n \"\"\"\n return array(x, dtype=dtype, copy=False)\n", "path": "mars/tensor/expressions/datasource/array.py"}]} |
gh_patches_debug_1403 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-3778 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unarchive issue with ansible 2.1 rc3
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
unarchive
##### ANSIBLE VERSION
```
ansible-playbook 2.1.0.0
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/yannig/dev/ansible-conf/ansible/playbooks/library']
```
##### CONFIGURATION
None
##### OS / ENVIRONMENT
Nothing special
##### SUMMARY
When using unarchive with Ansible 2.1 rc3, I get an error. Using ansible devel or ansible 2.0 work.
##### STEPS TO REPRODUCE
Get unarchive.yml at the following location: https://github.com/Yannig/yannig-ansible-playbooks/blob/master/unarchive/unarchive.yml
And run it.
```
ansible-playbook unarchive.yml
```
##### EXPECTED RESULTS
```
PLAY [Unarchive problem] *******************************************************
TASK [file] ********************************************************************
changed: [localhost]
TASK [file] ********************************************************************
changed: [localhost]
TASK [get_url] *****************************************************************
changed: [localhost]
TASK [unarchive] ***************************************************************
changed: [localhost]
PLAY RECAP *********************************************************************
localhost : ok=4 changed=4 unreachable=0 failed=0
```
##### ACTUAL RESULTS
```
PLAY [Unarchive problem] *******************************************************
TASK [file] ********************************************************************
changed: [localhost]
TASK [file] ********************************************************************
changed: [localhost]
TASK [get_url] *****************************************************************
ok: [localhost]
TASK [unarchive] ***************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Unexpected error when accessing exploded file: [Errno 2] Aucun fichier ou dossier de ce type: '/tmp/unarchive/apache-tomee-plus-1.7.4/webapps'"}
NO MORE HOSTS LEFT *************************************************************
to retry, use: --limit @unarchive.retry
PLAY RECAP *********************************************************************
localhost : ok=3 changed=2 unreachable=0 failed=1
```
Note: the devel version is not concerned about this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `files/unarchive.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2012, Michael DeHaan <[email protected]>
5 # (c) 2013, Dylan Martin <[email protected]>
6 # (c) 2015, Toshio Kuratomi <[email protected]>
7 # (c) 2016, Dag Wieers <[email protected]>
8 # (c) 2016, Virgil Dupras <[email protected]>
9 #
10 # This file is part of Ansible
11 #
12 # Ansible is free software: you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation, either version 3 of the License, or
15 # (at your option) any later version.
16 #
17 # Ansible is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
24
25 DOCUMENTATION = '''
26 ---
27 module: unarchive
28 version_added: 1.4
29 short_description: Unpacks an archive after (optionally) copying it from the local machine.
30 extends_documentation_fragment: files
31 description:
32 - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target..
33 options:
34 src:
35 description:
36 - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack.
37 - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)
38 required: true
39 default: null
40 dest:
41 description:
42 - Remote absolute path where the archive should be unpacked
43 required: true
44 default: null
45 copy:
46 description:
47 - "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine."
48 required: false
49 choices: [ "yes", "no" ]
50 default: "yes"
51 creates:
52 description:
53 - a filename, when it already exists, this step will B(not) be run.
54 required: no
55 default: null
56 version_added: "1.6"
57 list_files:
58 description:
59 - If set to True, return the list of files that are contained in the tarball.
60 required: false
61 choices: [ "yes", "no" ]
62 default: "no"
63 version_added: "2.0"
64 exclude:
65 description:
66 - List the directory and file entries that you would like to exclude from the unarchive action.
67 required: false
68 default: []
69 version_added: "2.1"
70 keep_newer:
71 description:
72 - Do not replace existing files that are newer than files from the archive.
73 required: false
74 default: no
75 version_added: "2.1"
76 extra_opts:
77 description:
78 - Specify additional options by passing in an array.
79 default:
80 required: false
81 version_added: "2.1"
82 validate_certs:
83 description:
84 - This only applies if using a https url as the source of the file.
85 - This should only set to C(no) used on personally controlled sites using self-signed cer
86 - Prior to 2.2 the code worked as if this was set to C(yes).
87 required: false
88 default: "yes"
89 choices: ["yes", "no"]
90 version_added: "2.2"
91 author: "Dag Wieers (@dagwieers)"
92 todo:
93 - re-implement tar support using native tarfile module
94 - re-implement zip support using native zipfile module
95 notes:
96 - requires C(gtar)/C(unzip) command on target host
97 - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files
98 - detects type of archive automatically
99 - uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not
100 supported, it will always unpack the archive
101 - existing files/directories in the destination which are not in the archive
102 are not touched. This is the same behavior as a normal archive extraction
103 - existing files/directories in the destination which are not in the archive
104 are ignored for purposes of deciding if the archive should be unpacked or not
105 '''
106
107 EXAMPLES = '''
108 # Example from Ansible Playbooks
109 - unarchive: src=foo.tgz dest=/var/lib/foo
110
111 # Unarchive a file that is already on the remote machine
112 - unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no
113
114 # Unarchive a file that needs to be downloaded (added in 2.0)
115 - unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no
116 '''
117
118 import re
119 import os
120 import stat
121 import pwd
122 import grp
123 import datetime
124 import time
125 import binascii
126 from zipfile import ZipFile, BadZipfile
127 import tarfile
128 import subprocess
129
130 # String from tar that shows the tar contents are different from the
131 # filesystem
132 OWNER_DIFF_RE = re.compile(r': Uid differs$')
133 GROUP_DIFF_RE = re.compile(r': Gid differs$')
134 MODE_DIFF_RE = re.compile(r': Mode differs$')
135 #NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
136 MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
137 ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][stx-]){3}')
138 # When downloading an archive, how much of the archive to download before
139 # saving to a tempfile (64k)
140 BUFSIZE = 65536
141
142 # Return a CRC32 checksum of a file
143 def crc32(path):
144 return binascii.crc32(open(path).read()) & 0xffffffff
145
146 class UnarchiveError(Exception):
147 pass
148
149 # class to handle .zip files
150 class ZipArchive(object):
151
152 def __init__(self, src, dest, file_args, module):
153 self.src = src
154 self.dest = dest
155 self.file_args = file_args
156 self.opts = module.params['extra_opts']
157 self.module = module
158 self.excludes = module.params['exclude']
159 self.includes = []
160 self.cmd_path = self.module.get_bin_path('unzip')
161 self._files_in_archive = []
162 self._infodict = dict()
163
164 def _permstr_to_octal(self, modestr, umask):
165 ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
166 revstr = modestr[::-1]
167 mode = 0
168 for j in range(0, 3):
169 for i in range(0, 3):
170 if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:
171 mode += 2**(i+3*j)
172 # The unzip utility does not support setting the stST bits
173 # if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:
174 # mode += 2**(9+j)
175 return ( mode & ~umask )
176
177 def _legacy_file_list(self, force_refresh=False):
178 unzip_bin = self.module.get_bin_path('unzip')
179 if not unzip_bin:
180 raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)
181
182 rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])
183 if rc:
184 raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
185
186 for line in out.splitlines()[3:-2]:
187 fields = line.split(None, 7)
188 self._files_in_archive.append(fields[7])
189 self._infodict[fields[7]] = long(fields[6])
190
191 def _crc32(self, path):
192 if self._infodict:
193 return self._infodict[path]
194
195 try:
196 archive = ZipFile(self.src)
197 except BadZipfile:
198 e = get_exception()
199 if e.args[0].lower().startswith('bad magic number'):
200 # Python2.4 can't handle zipfiles with > 64K files. Try using
201 # /usr/bin/unzip instead
202 self._legacy_file_list()
203 else:
204 raise
205 else:
206 try:
207 for item in archive.infolist():
208 self._infodict[item.filename] = long(item.CRC)
209 except:
210 archive.close()
211 raise UnarchiveError('Unable to list files in the archive')
212
213 return self._infodict[path]
214
215 @property
216 def files_in_archive(self, force_refresh=False):
217 if self._files_in_archive and not force_refresh:
218 return self._files_in_archive
219
220 self._files_in_archive = []
221 try:
222 archive = ZipFile(self.src)
223 except BadZipfile:
224 e = get_exception()
225 if e.args[0].lower().startswith('bad magic number'):
226 # Python2.4 can't handle zipfiles with > 64K files. Try using
227 # /usr/bin/unzip instead
228 self._legacy_file_list(force_refresh)
229 else:
230 raise
231 else:
232 try:
233 for member in archive.namelist():
234 if member not in self.excludes:
235 self._files_in_archive.append(member)
236 except:
237 archive.close()
238 raise UnarchiveError('Unable to list files in the archive')
239
240 archive.close()
241 return self._files_in_archive
242
243 def is_unarchived(self):
244 cmd = '%s -ZT -s "%s"' % (self.cmd_path, self.src)
245 if self.excludes:
246 cmd += ' -x "' + '" "'.join(self.excludes) + '"'
247 rc, out, err = self.module.run_command(cmd)
248
249 old_out = out
250 diff = ''
251 out = ''
252 if rc == 0:
253 unarchived = True
254 else:
255 unarchived = False
256
257 # Get some information related to user/group ownership
258 umask = os.umask(0)
259 os.umask(umask)
260
261 # Get current user and group information
262 groups = os.getgroups()
263 run_uid = os.getuid()
264 run_gid = os.getgid()
265 try:
266 run_owner = pwd.getpwuid(run_uid).pw_name
267 except:
268 run_owner = run_uid
269 try:
270 run_group = grp.getgrgid(run_gid).gr_name
271 except:
272 run_group = run_gid
273
274 # Get future user ownership
275 fut_owner = fut_uid = None
276 if self.file_args['owner']:
277 try:
278 tpw = pwd.getpwname(self.file_args['owner'])
279 except:
280 try:
281 tpw = pwd.getpwuid(self.file_args['owner'])
282 except:
283 tpw = pwd.getpwuid(run_uid)
284 fut_owner = tpw.pw_name
285 fut_uid = tpw.pw_uid
286 else:
287 try:
288 fut_owner = run_owner
289 except:
290 pass
291 fut_uid = run_uid
292
293 # Get future group ownership
294 fut_group = fut_gid = None
295 if self.file_args['group']:
296 try:
297 tgr = grp.getgrnam(self.file_args['group'])
298 except:
299 try:
300 tgr = grp.getgrgid(self.file_args['group'])
301 except:
302 tgr = grp.getgrgid(run_gid)
303 fut_group = tgr.gr_name
304 fut_gid = tgr.gr_gid
305 else:
306 try:
307 fut_group = run_group
308 except:
309 pass
310 fut_gid = run_gid
311
312 for line in old_out.splitlines():
313 change = False
314
315 pcs = line.split()
316 if len(pcs) != 8: continue
317
318 ztype = pcs[0][0]
319 permstr = pcs[0][1:10]
320 version = pcs[0][1]
321 ostype = pcs[0][2]
322 size = int(pcs[3])
323 path = pcs[7]
324
325 # Skip excluded files
326 if path in self.excludes:
327 out += 'Path %s is excluded on request\n' % path
328 continue
329
330 # Itemized change requires L for symlink
331 if path[-1] == '/':
332 if ztype != 'd':
333 err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
334 ftype = 'd'
335 elif ztype == 'l':
336 ftype = 'L'
337 elif ztype == '-':
338 ftype = 'f'
339 elif ztype == '?':
340 ftype = 'f'
341
342 # Some files may be storing FAT permissions, not Unix permissions
343 if len(permstr) == 6:
344 if path[-1] == '/':
345 permstr = 'rwxrwxrwx'
346 elif permstr == 'rwx---':
347 permstr = 'rwxrwxrwx'
348 else:
349 permstr = 'rw-rw-rw-'
350
351 # Test string conformity
352 if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
353 raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
354
355 # DEBUG
356 # err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
357
358 dest = os.path.join(self.dest, path)
359 try:
360 st = os.lstat(dest)
361 except:
362 change = True
363 self.includes.append(path)
364 err += 'Path %s is missing\n' % path
365 diff += '>%s++++++.?? %s\n' % (ftype, path)
366 continue
367
368 # Compare file types
369 if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
370 change = True
371 self.includes.append(path)
372 err += 'File %s already exists, but not as a directory\n' % path
373 diff += 'c%s++++++.?? %s\n' % (ftype, path)
374 continue
375
376 if ftype == 'f' and not stat.S_ISREG(st.st_mode):
377 change = True
378 unarchived = False
379 self.includes.append(path)
380 err += 'Directory %s already exists, but not as a regular file\n' % path
381 diff += 'c%s++++++.?? %s\n' % (ftype, path)
382 continue
383
384 if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
385 change = True
386 self.includes.append(path)
387 err += 'Directory %s already exists, but not as a symlink\n' % path
388 diff += 'c%s++++++.?? %s\n' % (ftype, path)
389 continue
390
391 itemized = list('.%s.......??' % ftype)
392
393 dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
394 timestamp = time.mktime(dt_object.timetuple())
395
396 # Compare file timestamps
397 if stat.S_ISREG(st.st_mode):
398 if self.module.params['keep_newer']:
399 if timestamp > st.st_mtime:
400 change = True
401 self.includes.append(path)
402 err += 'File %s is older, replacing file\n' % path
403 itemized[4] = 't'
404 elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
405 # Add to excluded files, ignore other changes
406 out += 'File %s is newer, excluding file\n' % path
407 continue
408 else:
409 if timestamp != st.st_mtime:
410 change = True
411 self.includes.append(path)
412 err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
413 itemized[4] = 't'
414
415 # Compare file sizes
416 if stat.S_ISREG(st.st_mode) and size != st.st_size:
417 change = True
418 err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
419 itemized[3] = 's'
420
421 # Compare file checksums
422 if stat.S_ISREG(st.st_mode):
423 crc = crc32(dest)
424 if crc != self._crc32(path):
425 change = True
426 err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
427 itemized[2] = 'c'
428
429 # Compare file permissions
430
431 # Do not handle permissions of symlinks
432 if ftype != 'L':
433 # Only special files require no umask-handling
434 if ztype == '?':
435 mode = self._permstr_to_octal(permstr, 0)
436 else:
437 mode = self._permstr_to_octal(permstr, umask)
438 if self.file_args['mode'] and self.file_args['mode'] != stat.S_IMODE(st.st_mode):
439 change = True
440 err += 'Path %s differs in permissions (%o vs %o)\n' % (path, self.file_args['mode'], stat.S_IMODE(st.st_mode))
441 itemized[5] = 'p'
442 elif mode != stat.S_IMODE(st.st_mode):
443 change = True
444 itemized[5] = 'p'
445 err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
446
447 # Compare file user ownership
448 owner = uid = None
449 try:
450 owner = pwd.getpwuid(st.st_uid).pw_name
451 except:
452 uid = st.st_uid
453
454 # If we are not root and requested owner is not our user, fail
455 if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
456 raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
457
458 if owner and owner != fut_owner:
459 change = True
460 err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
461 itemized[6] = 'o'
462 elif uid and uid != fut_uid:
463 change = True
464 err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
465 itemized[6] = 'o'
466
467 # Compare file group ownership
468 group = gid = None
469 try:
470 group = grp.getgrgid(st.st_gid).gr_name
471 except:
472 gid = st.st_gid
473
474 if run_uid != 0 and fut_gid not in groups:
475 raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
476
477 if group and group != fut_group:
478 change = True
479 err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
480 itemized[6] = 'g'
481 elif gid and gid != fut_gid:
482 change = True
483 err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
484 itemized[6] = 'g'
485
486 # Register changed files and finalize diff output
487 if change:
488 if path not in self.includes:
489 self.includes.append(path)
490 diff += '%s %s\n' % (''.join(itemized), path)
491
492 if self.includes:
493 unarchived = False
494
495 # DEBUG
496 # out = old_out + out
497
498 return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
499
500 def unarchive(self):
501 cmd = '%s -o "%s"' % (self.cmd_path, self.src)
502 if self.opts:
503 cmd += ' ' + ' '.join(self.opts)
504 if self.includes:
505 cmd += ' "' + '" "'.join(self.includes) + '"'
506 # We don't need to handle excluded files, since we simply do not include them
507 # if self.excludes:
508 # cmd += ' -x ' + ' '.join(self.excludes)
509 cmd += ' -d "%s"' % self.dest
510 rc, out, err = self.module.run_command(cmd)
511 return dict(cmd=cmd, rc=rc, out=out, err=err)
512
513 def can_handle_archive(self):
514 if not self.cmd_path:
515 return False
516 cmd = '%s -l "%s"' % (self.cmd_path, self.src)
517 rc, out, err = self.module.run_command(cmd)
518 if rc == 0:
519 return True
520 return False
521
522
523 # class to handle gzipped tar files
524 class TgzArchive(object):
525
526 def __init__(self, src, dest, file_args, module):
527 self.src = src
528 self.dest = dest
529 self.file_args = file_args
530 self.opts = module.params['extra_opts']
531 self.module = module
532 self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]
533 # Prefer gtar (GNU tar) as it supports the compression options -zjJ
534 self.cmd_path = self.module.get_bin_path('gtar', None)
535 if not self.cmd_path:
536 # Fallback to tar
537 self.cmd_path = self.module.get_bin_path('tar')
538 self.zipflag = 'z'
539 self.compress_mode = 'gz'
540 self._files_in_archive = []
541
542 def _get_tar_fileobj(self):
543 """Returns a file object that can be read by ``tarfile.open()``."""
544 return open(self.src, 'rb')
545
546 @property
547 def files_in_archive(self, force_refresh=False):
548 if self._files_in_archive and not force_refresh:
549 return self._files_in_archive
550
551 # The use of Python's tarfile module here allows us to easily avoid tricky file encoding
552 # problems. Ref #11348
553 try:
554 tf = tarfile.open(fileobj=self._get_tar_fileobj(), mode='r:%s' % self.compress_mode)
555 except Exception:
556 raise UnarchiveError('Unable to list files in the archive')
557
558 for filename in tf.getnames():
559 if filename and filename not in self.excludes:
560 self._files_in_archive.append(filename)
561 return self._files_in_archive
562
563 def is_unarchived(self):
564 cmd = '%s -C "%s" -d%s' % (self.cmd_path, self.dest, self.zipflag)
565 if self.opts:
566 cmd += ' ' + ' '.join(self.opts)
567 if self.file_args['owner']:
568 cmd += ' --owner="%s"' % self.file_args['owner']
569 if self.file_args['group']:
570 cmd += ' --group="%s"' % self.file_args['group']
571 if self.file_args['mode']:
572 cmd += ' --mode="%s"' % self.file_args['mode']
573 if self.module.params['keep_newer']:
574 cmd += ' --keep-newer-files'
575 if self.excludes:
576 cmd += ' --exclude="' + '" --exclude="'.join(self.excludes) + '"'
577 cmd += ' -f "%s"' % self.src
578 rc, out, err = self.module.run_command(cmd)
579
580 # Check whether the differences are in something that we're
581 # setting anyway
582
583 # What is different
584 unarchived = True
585 old_out = out
586 out = ''
587 run_uid = os.getuid()
588 # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
589 # Only way to be sure is to check request with what is on disk (as we do for zip)
590 # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
591 for line in old_out.splitlines() + err.splitlines():
592 if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
593 out += line + '\n'
594 if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
595 out += line + '\n'
596 if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
597 out += line + '\n'
598 if MISSING_FILE_RE.search(line):
599 out += line + '\n'
600 if out:
601 unarchived = False
602 return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
603
604 def unarchive(self):
605 cmd = '%s -C "%s" -x%s' % (self.cmd_path, self.dest, self.zipflag)
606 if self.opts:
607 cmd += ' ' + ' '.join(self.opts)
608 if self.file_args['owner']:
609 cmd += ' --owner="%s"' % self.file_args['owner']
610 if self.file_args['group']:
611 cmd += ' --group="%s"' % self.file_args['group']
612 if self.file_args['mode']:
613 cmd += ' --mode="%s"' % self.file_args['mode']
614 if self.module.params['keep_newer']:
615 cmd += ' --keep-newer-files'
616 if self.excludes:
617 cmd += ' --exclude="' + '" --exclude="'.join(self.excludes) + '"'
618 cmd += ' -f "%s"' % (self.src)
619 rc, out, err = self.module.run_command(cmd, cwd=self.dest)
620 return dict(cmd=cmd, rc=rc, out=out, err=err)
621
622 def can_handle_archive(self):
623 if not self.cmd_path:
624 return False
625
626 try:
627 if self.files_in_archive:
628 return True
629 except UnarchiveError:
630 pass
631 # Errors and no files in archive assume that we weren't able to
632 # properly unarchive it
633 return False
634
635
636 # class to handle tar files that aren't compressed
637 class TarArchive(TgzArchive):
638 def __init__(self, src, dest, file_args, module):
639 super(TarArchive, self).__init__(src, dest, file_args, module)
640 # argument to tar
641 self.zipflag = ''
642 # parameter for python tarfile library
643 self.compress_mode = ''
644
645
646 # class to handle bzip2 compressed tar files
647 class TarBzipArchive(TgzArchive):
648 def __init__(self, src, dest, file_args, module):
649 super(TarBzipArchive, self).__init__(src, dest, file_args, module)
650 self.zipflag = 'j'
651 self.compress_mode = 'bz2'
652
653
654 # class to handle xz compressed tar files
655 class TarXzArchive(TgzArchive):
656 def __init__(self, src, dest, file_args, module):
657 super(TarXzArchive, self).__init__(src, dest, file_args, module)
658 self.zipflag = 'J'
659 self.compress_mode = ''
660
661 def _get_tar_fileobj(self):
662 # Python's tarfile module doesn't support xz compression so we have to manually uncompress
663 # it first.
664 xz_bin_path = self.module.get_bin_path('xz')
665 xz_stdout = tempfile.TemporaryFile()
666 # we don't use self.module.run_command() to avoid loading the whole archive in memory.
667 cmd = subprocess.Popen([xz_bin_path, '-dc', self.src], stdout=xz_stdout)
668 rc = cmd.wait()
669 if rc != 0:
670 raise UnarchiveError("Could not uncompress with xz")
671 xz_stdout.seek(0)
672 return xz_stdout
673
674
675 # try handlers in order and return the one that works or bail if none work
676 def pick_handler(src, dest, file_args, module):
677 handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]
678 for handler in handlers:
679 obj = handler(src, dest, file_args, module)
680 if obj.can_handle_archive():
681 return obj
682 module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.' % src)
683
684
685 def main():
686 module = AnsibleModule(
687 # not checking because of daisy chain to file module
688 argument_spec = dict(
689 src = dict(required=True, type='path'),
690 original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack
691 dest = dict(required=True, type='path'),
692 copy = dict(default=True, type='bool'),
693 creates = dict(required=False, type='path'),
694 list_files = dict(required=False, default=False, type='bool'),
695 keep_newer = dict(required=False, default=False, type='bool'),
696 exclude = dict(required=False, default=[], type='list'),
697 extra_opts = dict(required=False, default=[], type='list'),
698 validate_certs = dict(required=False, default=True, type='bool'),
699 ),
700 add_file_common_args = True,
701 # supports_check_mode = True,
702 )
703
704 src = os.path.expanduser(module.params['src'])
705 dest = os.path.expanduser(module.params['dest'])
706 copy = module.params['copy']
707 file_args = module.load_file_common_arguments(module.params)
708 # did tar file arrive?
709 if not os.path.exists(src):
710 if copy:
711 module.fail_json(msg="Source '%s' failed to transfer" % src)
712 # If copy=false, and src= contains ://, try and download the file to a temp directory.
713 elif '://' in src:
714 tempdir = os.path.dirname(os.path.realpath(__file__))
715 package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))
716 try:
717 rsp, info = fetch_url(module, src)
718 # If download fails, raise a proper exception
719 if rsp is None:
720 raise Exception(info['msg'])
721 f = open(package, 'w')
722 # Read 1kb at a time to save on ram
723 while True:
724 data = rsp.read(BUFSIZE)
725
726 if data == "":
727 break # End of file, break while loop
728
729 f.write(data)
730 f.close()
731 src = package
732 except Exception:
733 e = get_exception()
734 module.fail_json(msg="Failure downloading %s, %s" % (src, e))
735 else:
736 module.fail_json(msg="Source '%s' does not exist" % src)
737 if not os.access(src, os.R_OK):
738 module.fail_json(msg="Source '%s' not readable" % src)
739
740 # skip working with 0 size archives
741 try:
742 if os.path.getsize(src) == 0:
743 module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
744 except Exception:
745 e = get_exception()
746 module.fail_json(msg="Source '%s' not readable" % src)
747
748 # is dest OK to receive tar file?
749 if not os.path.isdir(dest):
750 module.fail_json(msg="Destination '%s' is not a directory" % dest)
751
752 handler = pick_handler(src, dest, file_args, module)
753
754 res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
755
756 # do we need to do unpack?
757 check_results = handler.is_unarchived()
758
759 # DEBUG
760 # res_args['check_results'] = check_results
761
762 if check_results['unarchived']:
763 res_args['changed'] = False
764 else:
765 # do the unpack
766 try:
767 res_args['extract_results'] = handler.unarchive()
768 if res_args['extract_results']['rc'] != 0:
769 module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
770 except IOError:
771 module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
772 else:
773 res_args['changed'] = True
774
775 if check_results.get('diff', False):
776 res_args['diff'] = { 'prepared': check_results['diff'] }
777
778 # Run only if we found differences (idempotence) or diff was missing
779 if res_args.get('diff', True):
780 # do we need to change perms?
781 for filename in handler.files_in_archive:
782 file_args['path'] = os.path.join(dest, filename)
783 try:
784 res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
785 except (IOError, OSError):
786 e = get_exception()
787 module.fail_json(msg="Unexpected error when accessing exploded file: %s" % str(e), **res_args)
788
789 if module.params['list_files']:
790 res_args['files'] = handler.files_in_archive
791
792 module.exit_json(**res_args)
793
794 # import module snippets
795 from ansible.module_utils.basic import *
796 from ansible.module_utils.urls import *
797 if __name__ == '__main__':
798 main()
799
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/files/unarchive.py b/files/unarchive.py
--- a/files/unarchive.py
+++ b/files/unarchive.py
@@ -701,6 +701,9 @@
# supports_check_mode = True,
)
+ # We screenscrape a huge amount of commands so use C locale anytime we do
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
| {"golden_diff": "diff --git a/files/unarchive.py b/files/unarchive.py\n--- a/files/unarchive.py\n+++ b/files/unarchive.py\n@@ -701,6 +701,9 @@\n # supports_check_mode = True,\n )\n \n+ # We screenscrape a huge amount of commands so use C locale anytime we do\n+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')\n+\n src = os.path.expanduser(module.params['src'])\n dest = os.path.expanduser(module.params['dest'])\n copy = module.params['copy']\n", "issue": "unarchive issue with ansible 2.1 rc3\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nunarchive\n##### ANSIBLE VERSION\n\n```\nansible-playbook 2.1.0.0\n config file = /etc/ansible/ansible.cfg\n configured module search path = ['/home/yannig/dev/ansible-conf/ansible/playbooks/library']\n```\n##### CONFIGURATION\n\nNone\n##### OS / ENVIRONMENT\n\nNothing special\n##### SUMMARY\n\nWhen using unarchive with Ansible 2.1 rc3, I get an error. Using ansible devel or ansible 2.0 work.\n##### STEPS TO REPRODUCE\n\nGet unarchive.yml at the following location: https://github.com/Yannig/yannig-ansible-playbooks/blob/master/unarchive/unarchive.yml\n\nAnd run it.\n\n```\nansible-playbook unarchive.yml\n```\n##### EXPECTED RESULTS\n\n```\nPLAY [Unarchive problem] *******************************************************\n\nTASK [file] ********************************************************************\nchanged: [localhost]\n\nTASK [file] ********************************************************************\nchanged: [localhost]\n\nTASK [get_url] *****************************************************************\nchanged: [localhost]\n\nTASK [unarchive] ***************************************************************\nchanged: [localhost]\n\nPLAY RECAP *********************************************************************\nlocalhost : ok=4 changed=4 unreachable=0 failed=0\n```\n##### ACTUAL RESULTS\n\n```\nPLAY [Unarchive problem] *******************************************************\n\nTASK [file] ********************************************************************\nchanged: [localhost]\n\nTASK [file] ********************************************************************\nchanged: [localhost]\n\nTASK [get_url] *****************************************************************\nok: [localhost]\n\nTASK [unarchive] ***************************************************************\nfatal: [localhost]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected error when accessing exploded file: [Errno 2] Aucun fichier ou dossier de ce type: '/tmp/unarchive/apache-tomee-plus-1.7.4/webapps'\"}\n\nNO MORE HOSTS LEFT *************************************************************\n to retry, use: --limit @unarchive.retry\n\nPLAY RECAP *********************************************************************\nlocalhost : ok=3 changed=2 unreachable=0 failed=1\n```\n\nNote: the devel version is not concerned about this issue.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Dylan Martin <[email protected]>\n# (c) 2015, Toshio Kuratomi <[email protected]>\n# (c) 2016, Dag Wieers <[email protected]>\n# (c) 2016, Virgil Dupras <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: unarchive\nversion_added: 1.4\nshort_description: Unpacks an archive after (optionally) copying it from the local machine.\nextends_documentation_fragment: files\ndescription:\n - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target..\noptions:\n src:\n description:\n - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack.\n - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)\n required: true\n default: null\n dest:\n description:\n - Remote absolute path where the archive should be unpacked\n required: true\n default: null\n copy:\n description:\n - \"If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.\"\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"yes\"\n creates:\n description:\n - a filename, when it already exists, this step will B(not) be run.\n required: no\n default: null\n version_added: \"1.6\"\n list_files:\n description:\n - If set to True, return the list of files that are contained in the tarball.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n version_added: \"2.0\"\n exclude:\n description:\n - List the directory and file entries that you would like to exclude from the unarchive action.\n required: false\n default: []\n version_added: \"2.1\"\n keep_newer:\n description:\n - Do not replace existing files that are newer than files from the archive.\n required: false\n default: no\n version_added: \"2.1\"\n extra_opts:\n description:\n - Specify additional options by passing in an array.\n default:\n required: false\n version_added: \"2.1\"\n validate_certs:\n description:\n - This only applies if using a https url as the source of the file.\n - This should only set to C(no) used on personally controlled sites using self-signed cer\n - Prior to 2.2 the code worked as if this was set to C(yes).\n required: false\n default: \"yes\"\n choices: [\"yes\", \"no\"]\n version_added: \"2.2\"\nauthor: \"Dag Wieers (@dagwieers)\"\ntodo:\n - re-implement tar support using native tarfile module\n - re-implement zip support using native zipfile module\nnotes:\n - requires C(gtar)/C(unzip) command on target host\n - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files\n - detects type of archive automatically\n - uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not\n supported, it will always unpack the archive\n - existing files/directories in the destination which are not in the archive\n are not touched. This is the same behavior as a normal archive extraction\n - existing files/directories in the destination which are not in the archive\n are ignored for purposes of deciding if the archive should be unpacked or not\n'''\n\nEXAMPLES = '''\n# Example from Ansible Playbooks\n- unarchive: src=foo.tgz dest=/var/lib/foo\n\n# Unarchive a file that is already on the remote machine\n- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no\n\n# Unarchive a file that needs to be downloaded (added in 2.0)\n- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no\n'''\n\nimport re\nimport os\nimport stat\nimport pwd\nimport grp\nimport datetime\nimport time\nimport binascii\nfrom zipfile import ZipFile, BadZipfile\nimport tarfile\nimport subprocess\n\n# String from tar that shows the tar contents are different from the\n# filesystem\nOWNER_DIFF_RE = re.compile(r': Uid differs$')\nGROUP_DIFF_RE = re.compile(r': Gid differs$')\nMODE_DIFF_RE = re.compile(r': Mode differs$')\n#NEWER_DIFF_RE = re.compile(r' is newer or same age.$')\nMISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')\nZIP_FILE_MODE_RE = re.compile(r'([r-][w-][stx-]){3}')\n# When downloading an archive, how much of the archive to download before\n# saving to a tempfile (64k)\nBUFSIZE = 65536\n\n# Return a CRC32 checksum of a file\ndef crc32(path):\n return binascii.crc32(open(path).read()) & 0xffffffff\n\nclass UnarchiveError(Exception):\n pass\n\n# class to handle .zip files\nclass ZipArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = module.params['exclude']\n self.includes = []\n self.cmd_path = self.module.get_bin_path('unzip')\n self._files_in_archive = []\n self._infodict = dict()\n\n def _permstr_to_octal(self, modestr, umask):\n ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''\n revstr = modestr[::-1]\n mode = 0\n for j in range(0, 3):\n for i in range(0, 3):\n if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:\n mode += 2**(i+3*j)\n # The unzip utility does not support setting the stST bits\n# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:\n# mode += 2**(9+j)\n return ( mode & ~umask )\n\n def _legacy_file_list(self, force_refresh=False):\n unzip_bin = self.module.get_bin_path('unzip')\n if not unzip_bin:\n raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)\n\n rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])\n if rc:\n raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)\n\n for line in out.splitlines()[3:-2]:\n fields = line.split(None, 7)\n self._files_in_archive.append(fields[7])\n self._infodict[fields[7]] = long(fields[6])\n\n def _crc32(self, path):\n if self._infodict:\n return self._infodict[path]\n\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list()\n else:\n raise\n else:\n try:\n for item in archive.infolist():\n self._infodict[item.filename] = long(item.CRC)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n return self._infodict[path]\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n self._files_in_archive = []\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list(force_refresh)\n else:\n raise\n else:\n try:\n for member in archive.namelist():\n if member not in self.excludes:\n self._files_in_archive.append(member)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n archive.close()\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -ZT -s \"%s\"' % (self.cmd_path, self.src)\n if self.excludes:\n cmd += ' -x \"' + '\" \"'.join(self.excludes) + '\"'\n rc, out, err = self.module.run_command(cmd)\n\n old_out = out\n diff = ''\n out = ''\n if rc == 0:\n unarchived = True\n else:\n unarchived = False\n\n # Get some information related to user/group ownership\n umask = os.umask(0)\n os.umask(umask)\n\n # Get current user and group information\n groups = os.getgroups()\n run_uid = os.getuid()\n run_gid = os.getgid()\n try:\n run_owner = pwd.getpwuid(run_uid).pw_name\n except:\n run_owner = run_uid\n try:\n run_group = grp.getgrgid(run_gid).gr_name\n except:\n run_group = run_gid\n\n # Get future user ownership\n fut_owner = fut_uid = None\n if self.file_args['owner']:\n try:\n tpw = pwd.getpwname(self.file_args['owner'])\n except:\n try:\n tpw = pwd.getpwuid(self.file_args['owner'])\n except:\n tpw = pwd.getpwuid(run_uid)\n fut_owner = tpw.pw_name\n fut_uid = tpw.pw_uid\n else:\n try:\n fut_owner = run_owner\n except:\n pass\n fut_uid = run_uid\n\n # Get future group ownership\n fut_group = fut_gid = None\n if self.file_args['group']:\n try:\n tgr = grp.getgrnam(self.file_args['group'])\n except:\n try:\n tgr = grp.getgrgid(self.file_args['group'])\n except:\n tgr = grp.getgrgid(run_gid)\n fut_group = tgr.gr_name\n fut_gid = tgr.gr_gid\n else:\n try:\n fut_group = run_group\n except:\n pass\n fut_gid = run_gid\n\n for line in old_out.splitlines():\n change = False\n\n pcs = line.split()\n if len(pcs) != 8: continue\n\n ztype = pcs[0][0]\n permstr = pcs[0][1:10]\n version = pcs[0][1]\n ostype = pcs[0][2]\n size = int(pcs[3])\n path = pcs[7]\n\n # Skip excluded files\n if path in self.excludes:\n out += 'Path %s is excluded on request\\n' % path\n continue\n\n # Itemized change requires L for symlink\n if path[-1] == '/':\n if ztype != 'd':\n err += 'Path %s incorrectly tagged as \"%s\", but is a directory.\\n' % (path, ztype)\n ftype = 'd'\n elif ztype == 'l':\n ftype = 'L'\n elif ztype == '-':\n ftype = 'f'\n elif ztype == '?':\n ftype = 'f'\n\n # Some files may be storing FAT permissions, not Unix permissions\n if len(permstr) == 6:\n if path[-1] == '/':\n permstr = 'rwxrwxrwx'\n elif permstr == 'rwx---':\n permstr = 'rwxrwxrwx'\n else:\n permstr = 'rw-rw-rw-'\n\n # Test string conformity\n if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):\n raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)\n\n # DEBUG\n# err += \"%s%s %10d %s\\n\" % (ztype, permstr, size, path)\n\n dest = os.path.join(self.dest, path)\n try:\n st = os.lstat(dest)\n except:\n change = True\n self.includes.append(path)\n err += 'Path %s is missing\\n' % path\n diff += '>%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n # Compare file types\n if ftype == 'd' and not stat.S_ISDIR(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'File %s already exists, but not as a directory\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'f' and not stat.S_ISREG(st.st_mode):\n change = True\n unarchived = False\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a regular file\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'L' and not stat.S_ISLNK(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a symlink\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n itemized = list('.%s.......??' % ftype)\n\n dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))\n timestamp = time.mktime(dt_object.timetuple())\n\n # Compare file timestamps\n if stat.S_ISREG(st.st_mode):\n if self.module.params['keep_newer']:\n if timestamp > st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s is older, replacing file\\n' % path\n itemized[4] = 't'\n elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:\n # Add to excluded files, ignore other changes\n out += 'File %s is newer, excluding file\\n' % path\n continue\n else:\n if timestamp != st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s differs in mtime (%f vs %f)\\n' % (path, timestamp, st.st_mtime)\n itemized[4] = 't'\n\n # Compare file sizes\n if stat.S_ISREG(st.st_mode) and size != st.st_size:\n change = True\n err += 'File %s differs in size (%d vs %d)\\n' % (path, size, st.st_size)\n itemized[3] = 's'\n\n # Compare file checksums\n if stat.S_ISREG(st.st_mode):\n crc = crc32(dest)\n if crc != self._crc32(path):\n change = True\n err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\\n' % (path, self._crc32(path), crc)\n itemized[2] = 'c'\n\n # Compare file permissions\n\n # Do not handle permissions of symlinks\n if ftype != 'L':\n # Only special files require no umask-handling\n if ztype == '?':\n mode = self._permstr_to_octal(permstr, 0)\n else:\n mode = self._permstr_to_octal(permstr, umask)\n if self.file_args['mode'] and self.file_args['mode'] != stat.S_IMODE(st.st_mode):\n change = True\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, self.file_args['mode'], stat.S_IMODE(st.st_mode))\n itemized[5] = 'p'\n elif mode != stat.S_IMODE(st.st_mode):\n change = True\n itemized[5] = 'p'\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, mode, stat.S_IMODE(st.st_mode))\n\n # Compare file user ownership\n owner = uid = None\n try:\n owner = pwd.getpwuid(st.st_uid).pw_name\n except:\n uid = st.st_uid\n\n # If we are not root and requested owner is not our user, fail\n if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):\n raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))\n\n if owner and owner != fut_owner:\n change = True\n err += 'Path %s is owned by user %s, not by user %s as expected\\n' % (path, owner, fut_owner)\n itemized[6] = 'o'\n elif uid and uid != fut_uid:\n change = True\n err += 'Path %s is owned by uid %s, not by uid %s as expected\\n' % (path, uid, fut_uid)\n itemized[6] = 'o'\n\n # Compare file group ownership\n group = gid = None\n try:\n group = grp.getgrgid(st.st_gid).gr_name\n except:\n gid = st.st_gid\n\n if run_uid != 0 and fut_gid not in groups:\n raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))\n\n if group and group != fut_group:\n change = True\n err += 'Path %s is owned by group %s, not by group %s as expected\\n' % (path, group, fut_group)\n itemized[6] = 'g'\n elif gid and gid != fut_gid:\n change = True\n err += 'Path %s is owned by gid %s, not by gid %s as expected\\n' % (path, gid, fut_gid)\n itemized[6] = 'g'\n\n # Register changed files and finalize diff output\n if change:\n if path not in self.includes:\n self.includes.append(path)\n diff += '%s %s\\n' % (''.join(itemized), path)\n\n if self.includes:\n unarchived = False\n\n # DEBUG\n# out = old_out + out\n\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)\n\n def unarchive(self):\n cmd = '%s -o \"%s\"' % (self.cmd_path, self.src)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.includes:\n cmd += ' \"' + '\" \"'.join(self.includes) + '\"'\n # We don't need to handle excluded files, since we simply do not include them\n# if self.excludes:\n# cmd += ' -x ' + ' '.join(self.excludes)\n cmd += ' -d \"%s\"' % self.dest\n rc, out, err = self.module.run_command(cmd)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n cmd = '%s -l \"%s\"' % (self.cmd_path, self.src)\n rc, out, err = self.module.run_command(cmd)\n if rc == 0:\n return True\n return False\n\n\n# class to handle gzipped tar files\nclass TgzArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]\n # Prefer gtar (GNU tar) as it supports the compression options -zjJ\n self.cmd_path = self.module.get_bin_path('gtar', None)\n if not self.cmd_path:\n # Fallback to tar\n self.cmd_path = self.module.get_bin_path('tar')\n self.zipflag = 'z'\n self.compress_mode = 'gz'\n self._files_in_archive = []\n\n def _get_tar_fileobj(self):\n \"\"\"Returns a file object that can be read by ``tarfile.open()``.\"\"\"\n return open(self.src, 'rb')\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n # The use of Python's tarfile module here allows us to easily avoid tricky file encoding\n # problems. Ref #11348\n try:\n tf = tarfile.open(fileobj=self._get_tar_fileobj(), mode='r:%s' % self.compress_mode)\n except Exception:\n raise UnarchiveError('Unable to list files in the archive')\n\n for filename in tf.getnames():\n if filename and filename not in self.excludes:\n self._files_in_archive.append(filename)\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -C \"%s\" -d%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % self.src\n rc, out, err = self.module.run_command(cmd)\n\n # Check whether the differences are in something that we're\n # setting anyway\n\n # What is different\n unarchived = True\n old_out = out\n out = ''\n run_uid = os.getuid()\n # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient\n # Only way to be sure is to check request with what is on disk (as we do for zip)\n # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change\n for line in old_out.splitlines() + err.splitlines():\n if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):\n out += line + '\\n'\n if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):\n out += line + '\\n'\n if not self.file_args['mode'] and MODE_DIFF_RE.search(line):\n out += line + '\\n'\n if MISSING_FILE_RE.search(line):\n out += line + '\\n'\n if out:\n unarchived = False\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)\n\n def unarchive(self):\n cmd = '%s -C \"%s\" -x%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % (self.src)\n rc, out, err = self.module.run_command(cmd, cwd=self.dest)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n\n try:\n if self.files_in_archive:\n return True\n except UnarchiveError:\n pass\n # Errors and no files in archive assume that we weren't able to\n # properly unarchive it\n return False\n\n\n# class to handle tar files that aren't compressed\nclass TarArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarArchive, self).__init__(src, dest, file_args, module)\n # argument to tar\n self.zipflag = ''\n # parameter for python tarfile library\n self.compress_mode = ''\n\n\n# class to handle bzip2 compressed tar files\nclass TarBzipArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarBzipArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'j'\n self.compress_mode = 'bz2'\n\n\n# class to handle xz compressed tar files\nclass TarXzArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarXzArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'J'\n self.compress_mode = ''\n\n def _get_tar_fileobj(self):\n # Python's tarfile module doesn't support xz compression so we have to manually uncompress\n # it first.\n xz_bin_path = self.module.get_bin_path('xz')\n xz_stdout = tempfile.TemporaryFile()\n # we don't use self.module.run_command() to avoid loading the whole archive in memory.\n cmd = subprocess.Popen([xz_bin_path, '-dc', self.src], stdout=xz_stdout)\n rc = cmd.wait()\n if rc != 0:\n raise UnarchiveError(\"Could not uncompress with xz\")\n xz_stdout.seek(0)\n return xz_stdout\n\n\n# try handlers in order and return the one that works or bail if none work\ndef pick_handler(src, dest, file_args, module):\n handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]\n for handler in handlers:\n obj = handler(src, dest, file_args, module)\n if obj.can_handle_archive():\n return obj\n module.fail_json(msg='Failed to find handler for \"%s\". Make sure the required command to extract the file is installed.' % src)\n\n\ndef main():\n module = AnsibleModule(\n # not checking because of daisy chain to file module\n argument_spec = dict(\n src = dict(required=True, type='path'),\n original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack\n dest = dict(required=True, type='path'),\n copy = dict(default=True, type='bool'),\n creates = dict(required=False, type='path'),\n list_files = dict(required=False, default=False, type='bool'),\n keep_newer = dict(required=False, default=False, type='bool'),\n exclude = dict(required=False, default=[], type='list'),\n extra_opts = dict(required=False, default=[], type='list'),\n validate_certs = dict(required=False, default=True, type='bool'),\n ),\n add_file_common_args = True,\n# supports_check_mode = True,\n )\n\n src = os.path.expanduser(module.params['src'])\n dest = os.path.expanduser(module.params['dest'])\n copy = module.params['copy']\n file_args = module.load_file_common_arguments(module.params)\n # did tar file arrive?\n if not os.path.exists(src):\n if copy:\n module.fail_json(msg=\"Source '%s' failed to transfer\" % src)\n # If copy=false, and src= contains ://, try and download the file to a temp directory.\n elif '://' in src:\n tempdir = os.path.dirname(os.path.realpath(__file__))\n package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))\n try:\n rsp, info = fetch_url(module, src)\n # If download fails, raise a proper exception\n if rsp is None:\n raise Exception(info['msg'])\n f = open(package, 'w')\n # Read 1kb at a time to save on ram\n while True:\n data = rsp.read(BUFSIZE)\n\n if data == \"\":\n break # End of file, break while loop\n\n f.write(data)\n f.close()\n src = package\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Failure downloading %s, %s\" % (src, e))\n else:\n module.fail_json(msg=\"Source '%s' does not exist\" % src)\n if not os.access(src, os.R_OK):\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # skip working with 0 size archives\n try:\n if os.path.getsize(src) == 0:\n module.fail_json(msg=\"Invalid archive '%s', the file is 0 bytes\" % src)\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # is dest OK to receive tar file?\n if not os.path.isdir(dest):\n module.fail_json(msg=\"Destination '%s' is not a directory\" % dest)\n\n handler = pick_handler(src, dest, file_args, module)\n\n res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)\n\n # do we need to do unpack?\n check_results = handler.is_unarchived()\n\n # DEBUG\n# res_args['check_results'] = check_results\n\n if check_results['unarchived']:\n res_args['changed'] = False\n else:\n # do the unpack\n try:\n res_args['extract_results'] = handler.unarchive()\n if res_args['extract_results']['rc'] != 0:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n except IOError:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n else:\n res_args['changed'] = True\n\n if check_results.get('diff', False):\n res_args['diff'] = { 'prepared': check_results['diff'] }\n\n # Run only if we found differences (idempotence) or diff was missing\n if res_args.get('diff', True):\n # do we need to change perms?\n for filename in handler.files_in_archive:\n file_args['path'] = os.path.join(dest, filename)\n try:\n res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])\n except (IOError, OSError):\n e = get_exception()\n module.fail_json(msg=\"Unexpected error when accessing exploded file: %s\" % str(e), **res_args)\n\n if module.params['list_files']:\n res_args['files'] = handler.files_in_archive\n\n module.exit_json(**res_args)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nif __name__ == '__main__':\n main()\n", "path": "files/unarchive.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Dylan Martin <[email protected]>\n# (c) 2015, Toshio Kuratomi <[email protected]>\n# (c) 2016, Dag Wieers <[email protected]>\n# (c) 2016, Virgil Dupras <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: unarchive\nversion_added: 1.4\nshort_description: Unpacks an archive after (optionally) copying it from the local machine.\nextends_documentation_fragment: files\ndescription:\n - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target..\noptions:\n src:\n description:\n - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack.\n - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)\n required: true\n default: null\n dest:\n description:\n - Remote absolute path where the archive should be unpacked\n required: true\n default: null\n copy:\n description:\n - \"If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.\"\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"yes\"\n creates:\n description:\n - a filename, when it already exists, this step will B(not) be run.\n required: no\n default: null\n version_added: \"1.6\"\n list_files:\n description:\n - If set to True, return the list of files that are contained in the tarball.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n version_added: \"2.0\"\n exclude:\n description:\n - List the directory and file entries that you would like to exclude from the unarchive action.\n required: false\n default: []\n version_added: \"2.1\"\n keep_newer:\n description:\n - Do not replace existing files that are newer than files from the archive.\n required: false\n default: no\n version_added: \"2.1\"\n extra_opts:\n description:\n - Specify additional options by passing in an array.\n default:\n required: false\n version_added: \"2.1\"\n validate_certs:\n description:\n - This only applies if using a https url as the source of the file.\n - This should only set to C(no) used on personally controlled sites using self-signed cer\n - Prior to 2.2 the code worked as if this was set to C(yes).\n required: false\n default: \"yes\"\n choices: [\"yes\", \"no\"]\n version_added: \"2.2\"\nauthor: \"Dag Wieers (@dagwieers)\"\ntodo:\n - re-implement tar support using native tarfile module\n - re-implement zip support using native zipfile module\nnotes:\n - requires C(gtar)/C(unzip) command on target host\n - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files\n - detects type of archive automatically\n - uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not\n supported, it will always unpack the archive\n - existing files/directories in the destination which are not in the archive\n are not touched. This is the same behavior as a normal archive extraction\n - existing files/directories in the destination which are not in the archive\n are ignored for purposes of deciding if the archive should be unpacked or not\n'''\n\nEXAMPLES = '''\n# Example from Ansible Playbooks\n- unarchive: src=foo.tgz dest=/var/lib/foo\n\n# Unarchive a file that is already on the remote machine\n- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no\n\n# Unarchive a file that needs to be downloaded (added in 2.0)\n- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no\n'''\n\nimport re\nimport os\nimport stat\nimport pwd\nimport grp\nimport datetime\nimport time\nimport binascii\nfrom zipfile import ZipFile, BadZipfile\nimport tarfile\nimport subprocess\n\n# String from tar that shows the tar contents are different from the\n# filesystem\nOWNER_DIFF_RE = re.compile(r': Uid differs$')\nGROUP_DIFF_RE = re.compile(r': Gid differs$')\nMODE_DIFF_RE = re.compile(r': Mode differs$')\n#NEWER_DIFF_RE = re.compile(r' is newer or same age.$')\nMISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')\nZIP_FILE_MODE_RE = re.compile(r'([r-][w-][stx-]){3}')\n# When downloading an archive, how much of the archive to download before\n# saving to a tempfile (64k)\nBUFSIZE = 65536\n\n# Return a CRC32 checksum of a file\ndef crc32(path):\n return binascii.crc32(open(path).read()) & 0xffffffff\n\nclass UnarchiveError(Exception):\n pass\n\n# class to handle .zip files\nclass ZipArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = module.params['exclude']\n self.includes = []\n self.cmd_path = self.module.get_bin_path('unzip')\n self._files_in_archive = []\n self._infodict = dict()\n\n def _permstr_to_octal(self, modestr, umask):\n ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''\n revstr = modestr[::-1]\n mode = 0\n for j in range(0, 3):\n for i in range(0, 3):\n if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:\n mode += 2**(i+3*j)\n # The unzip utility does not support setting the stST bits\n# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:\n# mode += 2**(9+j)\n return ( mode & ~umask )\n\n def _legacy_file_list(self, force_refresh=False):\n unzip_bin = self.module.get_bin_path('unzip')\n if not unzip_bin:\n raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)\n\n rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])\n if rc:\n raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)\n\n for line in out.splitlines()[3:-2]:\n fields = line.split(None, 7)\n self._files_in_archive.append(fields[7])\n self._infodict[fields[7]] = long(fields[6])\n\n def _crc32(self, path):\n if self._infodict:\n return self._infodict[path]\n\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list()\n else:\n raise\n else:\n try:\n for item in archive.infolist():\n self._infodict[item.filename] = long(item.CRC)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n return self._infodict[path]\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n self._files_in_archive = []\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list(force_refresh)\n else:\n raise\n else:\n try:\n for member in archive.namelist():\n if member not in self.excludes:\n self._files_in_archive.append(member)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n archive.close()\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -ZT -s \"%s\"' % (self.cmd_path, self.src)\n if self.excludes:\n cmd += ' -x \"' + '\" \"'.join(self.excludes) + '\"'\n rc, out, err = self.module.run_command(cmd)\n\n old_out = out\n diff = ''\n out = ''\n if rc == 0:\n unarchived = True\n else:\n unarchived = False\n\n # Get some information related to user/group ownership\n umask = os.umask(0)\n os.umask(umask)\n\n # Get current user and group information\n groups = os.getgroups()\n run_uid = os.getuid()\n run_gid = os.getgid()\n try:\n run_owner = pwd.getpwuid(run_uid).pw_name\n except:\n run_owner = run_uid\n try:\n run_group = grp.getgrgid(run_gid).gr_name\n except:\n run_group = run_gid\n\n # Get future user ownership\n fut_owner = fut_uid = None\n if self.file_args['owner']:\n try:\n tpw = pwd.getpwname(self.file_args['owner'])\n except:\n try:\n tpw = pwd.getpwuid(self.file_args['owner'])\n except:\n tpw = pwd.getpwuid(run_uid)\n fut_owner = tpw.pw_name\n fut_uid = tpw.pw_uid\n else:\n try:\n fut_owner = run_owner\n except:\n pass\n fut_uid = run_uid\n\n # Get future group ownership\n fut_group = fut_gid = None\n if self.file_args['group']:\n try:\n tgr = grp.getgrnam(self.file_args['group'])\n except:\n try:\n tgr = grp.getgrgid(self.file_args['group'])\n except:\n tgr = grp.getgrgid(run_gid)\n fut_group = tgr.gr_name\n fut_gid = tgr.gr_gid\n else:\n try:\n fut_group = run_group\n except:\n pass\n fut_gid = run_gid\n\n for line in old_out.splitlines():\n change = False\n\n pcs = line.split()\n if len(pcs) != 8: continue\n\n ztype = pcs[0][0]\n permstr = pcs[0][1:10]\n version = pcs[0][1]\n ostype = pcs[0][2]\n size = int(pcs[3])\n path = pcs[7]\n\n # Skip excluded files\n if path in self.excludes:\n out += 'Path %s is excluded on request\\n' % path\n continue\n\n # Itemized change requires L for symlink\n if path[-1] == '/':\n if ztype != 'd':\n err += 'Path %s incorrectly tagged as \"%s\", but is a directory.\\n' % (path, ztype)\n ftype = 'd'\n elif ztype == 'l':\n ftype = 'L'\n elif ztype == '-':\n ftype = 'f'\n elif ztype == '?':\n ftype = 'f'\n\n # Some files may be storing FAT permissions, not Unix permissions\n if len(permstr) == 6:\n if path[-1] == '/':\n permstr = 'rwxrwxrwx'\n elif permstr == 'rwx---':\n permstr = 'rwxrwxrwx'\n else:\n permstr = 'rw-rw-rw-'\n\n # Test string conformity\n if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):\n raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)\n\n # DEBUG\n# err += \"%s%s %10d %s\\n\" % (ztype, permstr, size, path)\n\n dest = os.path.join(self.dest, path)\n try:\n st = os.lstat(dest)\n except:\n change = True\n self.includes.append(path)\n err += 'Path %s is missing\\n' % path\n diff += '>%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n # Compare file types\n if ftype == 'd' and not stat.S_ISDIR(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'File %s already exists, but not as a directory\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'f' and not stat.S_ISREG(st.st_mode):\n change = True\n unarchived = False\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a regular file\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'L' and not stat.S_ISLNK(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a symlink\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n itemized = list('.%s.......??' % ftype)\n\n dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))\n timestamp = time.mktime(dt_object.timetuple())\n\n # Compare file timestamps\n if stat.S_ISREG(st.st_mode):\n if self.module.params['keep_newer']:\n if timestamp > st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s is older, replacing file\\n' % path\n itemized[4] = 't'\n elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:\n # Add to excluded files, ignore other changes\n out += 'File %s is newer, excluding file\\n' % path\n continue\n else:\n if timestamp != st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s differs in mtime (%f vs %f)\\n' % (path, timestamp, st.st_mtime)\n itemized[4] = 't'\n\n # Compare file sizes\n if stat.S_ISREG(st.st_mode) and size != st.st_size:\n change = True\n err += 'File %s differs in size (%d vs %d)\\n' % (path, size, st.st_size)\n itemized[3] = 's'\n\n # Compare file checksums\n if stat.S_ISREG(st.st_mode):\n crc = crc32(dest)\n if crc != self._crc32(path):\n change = True\n err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\\n' % (path, self._crc32(path), crc)\n itemized[2] = 'c'\n\n # Compare file permissions\n\n # Do not handle permissions of symlinks\n if ftype != 'L':\n # Only special files require no umask-handling\n if ztype == '?':\n mode = self._permstr_to_octal(permstr, 0)\n else:\n mode = self._permstr_to_octal(permstr, umask)\n if self.file_args['mode'] and self.file_args['mode'] != stat.S_IMODE(st.st_mode):\n change = True\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, self.file_args['mode'], stat.S_IMODE(st.st_mode))\n itemized[5] = 'p'\n elif mode != stat.S_IMODE(st.st_mode):\n change = True\n itemized[5] = 'p'\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, mode, stat.S_IMODE(st.st_mode))\n\n # Compare file user ownership\n owner = uid = None\n try:\n owner = pwd.getpwuid(st.st_uid).pw_name\n except:\n uid = st.st_uid\n\n # If we are not root and requested owner is not our user, fail\n if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):\n raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))\n\n if owner and owner != fut_owner:\n change = True\n err += 'Path %s is owned by user %s, not by user %s as expected\\n' % (path, owner, fut_owner)\n itemized[6] = 'o'\n elif uid and uid != fut_uid:\n change = True\n err += 'Path %s is owned by uid %s, not by uid %s as expected\\n' % (path, uid, fut_uid)\n itemized[6] = 'o'\n\n # Compare file group ownership\n group = gid = None\n try:\n group = grp.getgrgid(st.st_gid).gr_name\n except:\n gid = st.st_gid\n\n if run_uid != 0 and fut_gid not in groups:\n raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))\n\n if group and group != fut_group:\n change = True\n err += 'Path %s is owned by group %s, not by group %s as expected\\n' % (path, group, fut_group)\n itemized[6] = 'g'\n elif gid and gid != fut_gid:\n change = True\n err += 'Path %s is owned by gid %s, not by gid %s as expected\\n' % (path, gid, fut_gid)\n itemized[6] = 'g'\n\n # Register changed files and finalize diff output\n if change:\n if path not in self.includes:\n self.includes.append(path)\n diff += '%s %s\\n' % (''.join(itemized), path)\n\n if self.includes:\n unarchived = False\n\n # DEBUG\n# out = old_out + out\n\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)\n\n def unarchive(self):\n cmd = '%s -o \"%s\"' % (self.cmd_path, self.src)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.includes:\n cmd += ' \"' + '\" \"'.join(self.includes) + '\"'\n # We don't need to handle excluded files, since we simply do not include them\n# if self.excludes:\n# cmd += ' -x ' + ' '.join(self.excludes)\n cmd += ' -d \"%s\"' % self.dest\n rc, out, err = self.module.run_command(cmd)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n cmd = '%s -l \"%s\"' % (self.cmd_path, self.src)\n rc, out, err = self.module.run_command(cmd)\n if rc == 0:\n return True\n return False\n\n\n# class to handle gzipped tar files\nclass TgzArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]\n # Prefer gtar (GNU tar) as it supports the compression options -zjJ\n self.cmd_path = self.module.get_bin_path('gtar', None)\n if not self.cmd_path:\n # Fallback to tar\n self.cmd_path = self.module.get_bin_path('tar')\n self.zipflag = 'z'\n self.compress_mode = 'gz'\n self._files_in_archive = []\n\n def _get_tar_fileobj(self):\n \"\"\"Returns a file object that can be read by ``tarfile.open()``.\"\"\"\n return open(self.src, 'rb')\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n # The use of Python's tarfile module here allows us to easily avoid tricky file encoding\n # problems. Ref #11348\n try:\n tf = tarfile.open(fileobj=self._get_tar_fileobj(), mode='r:%s' % self.compress_mode)\n except Exception:\n raise UnarchiveError('Unable to list files in the archive')\n\n for filename in tf.getnames():\n if filename and filename not in self.excludes:\n self._files_in_archive.append(filename)\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -C \"%s\" -d%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % self.src\n rc, out, err = self.module.run_command(cmd)\n\n # Check whether the differences are in something that we're\n # setting anyway\n\n # What is different\n unarchived = True\n old_out = out\n out = ''\n run_uid = os.getuid()\n # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient\n # Only way to be sure is to check request with what is on disk (as we do for zip)\n # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change\n for line in old_out.splitlines() + err.splitlines():\n if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):\n out += line + '\\n'\n if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):\n out += line + '\\n'\n if not self.file_args['mode'] and MODE_DIFF_RE.search(line):\n out += line + '\\n'\n if MISSING_FILE_RE.search(line):\n out += line + '\\n'\n if out:\n unarchived = False\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)\n\n def unarchive(self):\n cmd = '%s -C \"%s\" -x%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % (self.src)\n rc, out, err = self.module.run_command(cmd, cwd=self.dest)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n\n try:\n if self.files_in_archive:\n return True\n except UnarchiveError:\n pass\n # Errors and no files in archive assume that we weren't able to\n # properly unarchive it\n return False\n\n\n# class to handle tar files that aren't compressed\nclass TarArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarArchive, self).__init__(src, dest, file_args, module)\n # argument to tar\n self.zipflag = ''\n # parameter for python tarfile library\n self.compress_mode = ''\n\n\n# class to handle bzip2 compressed tar files\nclass TarBzipArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarBzipArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'j'\n self.compress_mode = 'bz2'\n\n\n# class to handle xz compressed tar files\nclass TarXzArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarXzArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'J'\n self.compress_mode = ''\n\n def _get_tar_fileobj(self):\n # Python's tarfile module doesn't support xz compression so we have to manually uncompress\n # it first.\n xz_bin_path = self.module.get_bin_path('xz')\n xz_stdout = tempfile.TemporaryFile()\n # we don't use self.module.run_command() to avoid loading the whole archive in memory.\n cmd = subprocess.Popen([xz_bin_path, '-dc', self.src], stdout=xz_stdout)\n rc = cmd.wait()\n if rc != 0:\n raise UnarchiveError(\"Could not uncompress with xz\")\n xz_stdout.seek(0)\n return xz_stdout\n\n\n# try handlers in order and return the one that works or bail if none work\ndef pick_handler(src, dest, file_args, module):\n handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]\n for handler in handlers:\n obj = handler(src, dest, file_args, module)\n if obj.can_handle_archive():\n return obj\n module.fail_json(msg='Failed to find handler for \"%s\". Make sure the required command to extract the file is installed.' % src)\n\n\ndef main():\n module = AnsibleModule(\n # not checking because of daisy chain to file module\n argument_spec = dict(\n src = dict(required=True, type='path'),\n original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack\n dest = dict(required=True, type='path'),\n copy = dict(default=True, type='bool'),\n creates = dict(required=False, type='path'),\n list_files = dict(required=False, default=False, type='bool'),\n keep_newer = dict(required=False, default=False, type='bool'),\n exclude = dict(required=False, default=[], type='list'),\n extra_opts = dict(required=False, default=[], type='list'),\n validate_certs = dict(required=False, default=True, type='bool'),\n ),\n add_file_common_args = True,\n# supports_check_mode = True,\n )\n\n # We screenscrape a huge amount of commands so use C locale anytime we do\n module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')\n\n src = os.path.expanduser(module.params['src'])\n dest = os.path.expanduser(module.params['dest'])\n copy = module.params['copy']\n file_args = module.load_file_common_arguments(module.params)\n # did tar file arrive?\n if not os.path.exists(src):\n if copy:\n module.fail_json(msg=\"Source '%s' failed to transfer\" % src)\n # If copy=false, and src= contains ://, try and download the file to a temp directory.\n elif '://' in src:\n tempdir = os.path.dirname(os.path.realpath(__file__))\n package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))\n try:\n rsp, info = fetch_url(module, src)\n # If download fails, raise a proper exception\n if rsp is None:\n raise Exception(info['msg'])\n f = open(package, 'w')\n # Read 1kb at a time to save on ram\n while True:\n data = rsp.read(BUFSIZE)\n\n if data == \"\":\n break # End of file, break while loop\n\n f.write(data)\n f.close()\n src = package\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Failure downloading %s, %s\" % (src, e))\n else:\n module.fail_json(msg=\"Source '%s' does not exist\" % src)\n if not os.access(src, os.R_OK):\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # skip working with 0 size archives\n try:\n if os.path.getsize(src) == 0:\n module.fail_json(msg=\"Invalid archive '%s', the file is 0 bytes\" % src)\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # is dest OK to receive tar file?\n if not os.path.isdir(dest):\n module.fail_json(msg=\"Destination '%s' is not a directory\" % dest)\n\n handler = pick_handler(src, dest, file_args, module)\n\n res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)\n\n # do we need to do unpack?\n check_results = handler.is_unarchived()\n\n # DEBUG\n# res_args['check_results'] = check_results\n\n if check_results['unarchived']:\n res_args['changed'] = False\n else:\n # do the unpack\n try:\n res_args['extract_results'] = handler.unarchive()\n if res_args['extract_results']['rc'] != 0:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n except IOError:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n else:\n res_args['changed'] = True\n\n if check_results.get('diff', False):\n res_args['diff'] = { 'prepared': check_results['diff'] }\n\n # Run only if we found differences (idempotence) or diff was missing\n if res_args.get('diff', True):\n # do we need to change perms?\n for filename in handler.files_in_archive:\n file_args['path'] = os.path.join(dest, filename)\n try:\n res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])\n except (IOError, OSError):\n e = get_exception()\n module.fail_json(msg=\"Unexpected error when accessing exploded file: %s\" % str(e), **res_args)\n\n if module.params['list_files']:\n res_args['files'] = handler.files_in_archive\n\n module.exit_json(**res_args)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nif __name__ == '__main__':\n main()\n", "path": "files/unarchive.py"}]} |
gh_patches_debug_1404 | rasdani/github-patches | git_diff | matrix-org__synapse-6563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/_synapse/admin/v2/users is broken
Running the /_synapse/admin/v2/users as documented produces an internal server error
Postgres logs:
```
STATEMENT: SELECT name, password_hash, is_guest, admin, user_type, deactivated FROM users WHERE is_guest = false AND deactivated = false ORDER BY name ASC LIMIT 10 OFFSET 0
ERROR: operator does not exist: smallint = boolean at character 95
```
`\d users` shows that `is_guest` and `deactivated` are type `smallint` not `boolean`.
```
Table "public.users"
Column | Type | Collation | Nullable | Default
----------------------------+----------+-----------+----------+---------
name | text | | |
password_hash | text | | |
creation_ts | bigint | | |
admin | smallint | | not null | 0
upgrade_ts | bigint | | |
is_guest | smallint | | not null | 0
appservice_id | text | | |
consent_version | text | | |
consent_server_notice_sent | text | | |
user_type | text | | |
deactivated | smallint | | not null | 0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/storage/data_stores/main/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2014-2016 OpenMarket Ltd
3 # Copyright 2018 New Vector Ltd
4 # Copyright 2019 The Matrix.org Foundation C.I.C.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 import calendar
19 import logging
20 import time
21
22 from synapse.api.constants import PresenceState
23 from synapse.storage.database import Database
24 from synapse.storage.engines import PostgresEngine
25 from synapse.storage.util.id_generators import (
26 ChainedIdGenerator,
27 IdGenerator,
28 StreamIdGenerator,
29 )
30 from synapse.util.caches.stream_change_cache import StreamChangeCache
31
32 from .account_data import AccountDataStore
33 from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
34 from .cache import CacheInvalidationStore
35 from .client_ips import ClientIpStore
36 from .deviceinbox import DeviceInboxStore
37 from .devices import DeviceStore
38 from .directory import DirectoryStore
39 from .e2e_room_keys import EndToEndRoomKeyStore
40 from .end_to_end_keys import EndToEndKeyStore
41 from .event_federation import EventFederationStore
42 from .event_push_actions import EventPushActionsStore
43 from .events import EventsStore
44 from .events_bg_updates import EventsBackgroundUpdatesStore
45 from .filtering import FilteringStore
46 from .group_server import GroupServerStore
47 from .keys import KeyStore
48 from .media_repository import MediaRepositoryStore
49 from .monthly_active_users import MonthlyActiveUsersStore
50 from .openid import OpenIdStore
51 from .presence import PresenceStore, UserPresenceState
52 from .profile import ProfileStore
53 from .push_rule import PushRuleStore
54 from .pusher import PusherStore
55 from .receipts import ReceiptsStore
56 from .registration import RegistrationStore
57 from .rejections import RejectionsStore
58 from .relations import RelationsStore
59 from .room import RoomStore
60 from .roommember import RoomMemberStore
61 from .search import SearchStore
62 from .signatures import SignatureStore
63 from .state import StateStore
64 from .stats import StatsStore
65 from .stream import StreamStore
66 from .tags import TagsStore
67 from .transactions import TransactionStore
68 from .user_directory import UserDirectoryStore
69 from .user_erasure_store import UserErasureStore
70
71 logger = logging.getLogger(__name__)
72
73
74 class DataStore(
75 EventsBackgroundUpdatesStore,
76 RoomMemberStore,
77 RoomStore,
78 RegistrationStore,
79 StreamStore,
80 ProfileStore,
81 PresenceStore,
82 TransactionStore,
83 DirectoryStore,
84 KeyStore,
85 StateStore,
86 SignatureStore,
87 ApplicationServiceStore,
88 EventsStore,
89 EventFederationStore,
90 MediaRepositoryStore,
91 RejectionsStore,
92 FilteringStore,
93 PusherStore,
94 PushRuleStore,
95 ApplicationServiceTransactionStore,
96 ReceiptsStore,
97 EndToEndKeyStore,
98 EndToEndRoomKeyStore,
99 SearchStore,
100 TagsStore,
101 AccountDataStore,
102 EventPushActionsStore,
103 OpenIdStore,
104 ClientIpStore,
105 DeviceStore,
106 DeviceInboxStore,
107 UserDirectoryStore,
108 GroupServerStore,
109 UserErasureStore,
110 MonthlyActiveUsersStore,
111 StatsStore,
112 RelationsStore,
113 CacheInvalidationStore,
114 ):
115 def __init__(self, database: Database, db_conn, hs):
116 self.hs = hs
117 self._clock = hs.get_clock()
118 self.database_engine = database.engine
119
120 all_users_native = are_all_users_on_domain(
121 db_conn.cursor(), database.engine, hs.hostname
122 )
123 if not all_users_native:
124 raise Exception(
125 "Found users in database not native to %s!\n"
126 "You cannot changed a synapse server_name after it's been configured"
127 % (hs.hostname,)
128 )
129
130 self._stream_id_gen = StreamIdGenerator(
131 db_conn,
132 "events",
133 "stream_ordering",
134 extra_tables=[("local_invites", "stream_id")],
135 )
136 self._backfill_id_gen = StreamIdGenerator(
137 db_conn,
138 "events",
139 "stream_ordering",
140 step=-1,
141 extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
142 )
143 self._presence_id_gen = StreamIdGenerator(
144 db_conn, "presence_stream", "stream_id"
145 )
146 self._device_inbox_id_gen = StreamIdGenerator(
147 db_conn, "device_max_stream_id", "stream_id"
148 )
149 self._public_room_id_gen = StreamIdGenerator(
150 db_conn, "public_room_list_stream", "stream_id"
151 )
152 self._device_list_id_gen = StreamIdGenerator(
153 db_conn,
154 "device_lists_stream",
155 "stream_id",
156 extra_tables=[("user_signature_stream", "stream_id")],
157 )
158 self._cross_signing_id_gen = StreamIdGenerator(
159 db_conn, "e2e_cross_signing_keys", "stream_id"
160 )
161
162 self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
163 self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
164 self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
165 self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id")
166 self._push_rules_stream_id_gen = ChainedIdGenerator(
167 self._stream_id_gen, db_conn, "push_rules_stream", "stream_id"
168 )
169 self._pushers_id_gen = StreamIdGenerator(
170 db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
171 )
172 self._group_updates_id_gen = StreamIdGenerator(
173 db_conn, "local_group_updates", "stream_id"
174 )
175
176 if isinstance(self.database_engine, PostgresEngine):
177 self._cache_id_gen = StreamIdGenerator(
178 db_conn, "cache_invalidation_stream", "stream_id"
179 )
180 else:
181 self._cache_id_gen = None
182
183 super(DataStore, self).__init__(database, db_conn, hs)
184
185 self._presence_on_startup = self._get_active_presence(db_conn)
186
187 presence_cache_prefill, min_presence_val = self.db.get_cache_dict(
188 db_conn,
189 "presence_stream",
190 entity_column="user_id",
191 stream_column="stream_id",
192 max_value=self._presence_id_gen.get_current_token(),
193 )
194 self.presence_stream_cache = StreamChangeCache(
195 "PresenceStreamChangeCache",
196 min_presence_val,
197 prefilled_cache=presence_cache_prefill,
198 )
199
200 max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
201 device_inbox_prefill, min_device_inbox_id = self.db.get_cache_dict(
202 db_conn,
203 "device_inbox",
204 entity_column="user_id",
205 stream_column="stream_id",
206 max_value=max_device_inbox_id,
207 limit=1000,
208 )
209 self._device_inbox_stream_cache = StreamChangeCache(
210 "DeviceInboxStreamChangeCache",
211 min_device_inbox_id,
212 prefilled_cache=device_inbox_prefill,
213 )
214 # The federation outbox and the local device inbox uses the same
215 # stream_id generator.
216 device_outbox_prefill, min_device_outbox_id = self.db.get_cache_dict(
217 db_conn,
218 "device_federation_outbox",
219 entity_column="destination",
220 stream_column="stream_id",
221 max_value=max_device_inbox_id,
222 limit=1000,
223 )
224 self._device_federation_outbox_stream_cache = StreamChangeCache(
225 "DeviceFederationOutboxStreamChangeCache",
226 min_device_outbox_id,
227 prefilled_cache=device_outbox_prefill,
228 )
229
230 device_list_max = self._device_list_id_gen.get_current_token()
231 self._device_list_stream_cache = StreamChangeCache(
232 "DeviceListStreamChangeCache", device_list_max
233 )
234 self._user_signature_stream_cache = StreamChangeCache(
235 "UserSignatureStreamChangeCache", device_list_max
236 )
237 self._device_list_federation_stream_cache = StreamChangeCache(
238 "DeviceListFederationStreamChangeCache", device_list_max
239 )
240
241 events_max = self._stream_id_gen.get_current_token()
242 curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
243 db_conn,
244 "current_state_delta_stream",
245 entity_column="room_id",
246 stream_column="stream_id",
247 max_value=events_max, # As we share the stream id with events token
248 limit=1000,
249 )
250 self._curr_state_delta_stream_cache = StreamChangeCache(
251 "_curr_state_delta_stream_cache",
252 min_curr_state_delta_id,
253 prefilled_cache=curr_state_delta_prefill,
254 )
255
256 _group_updates_prefill, min_group_updates_id = self.db.get_cache_dict(
257 db_conn,
258 "local_group_updates",
259 entity_column="user_id",
260 stream_column="stream_id",
261 max_value=self._group_updates_id_gen.get_current_token(),
262 limit=1000,
263 )
264 self._group_updates_stream_cache = StreamChangeCache(
265 "_group_updates_stream_cache",
266 min_group_updates_id,
267 prefilled_cache=_group_updates_prefill,
268 )
269
270 self._stream_order_on_start = self.get_room_max_stream_ordering()
271 self._min_stream_order_on_start = self.get_room_min_stream_ordering()
272
273 # Used in _generate_user_daily_visits to keep track of progress
274 self._last_user_visit_update = self._get_start_of_day()
275
276 def take_presence_startup_info(self):
277 active_on_startup = self._presence_on_startup
278 self._presence_on_startup = None
279 return active_on_startup
280
281 def _get_active_presence(self, db_conn):
282 """Fetch non-offline presence from the database so that we can register
283 the appropriate time outs.
284 """
285
286 sql = (
287 "SELECT user_id, state, last_active_ts, last_federation_update_ts,"
288 " last_user_sync_ts, status_msg, currently_active FROM presence_stream"
289 " WHERE state != ?"
290 )
291 sql = self.database_engine.convert_param_style(sql)
292
293 txn = db_conn.cursor()
294 txn.execute(sql, (PresenceState.OFFLINE,))
295 rows = self.db.cursor_to_dict(txn)
296 txn.close()
297
298 for row in rows:
299 row["currently_active"] = bool(row["currently_active"])
300
301 return [UserPresenceState(**row) for row in rows]
302
303 def count_daily_users(self):
304 """
305 Counts the number of users who used this homeserver in the last 24 hours.
306 """
307 yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
308 return self.db.runInteraction("count_daily_users", self._count_users, yesterday)
309
310 def count_monthly_users(self):
311 """
312 Counts the number of users who used this homeserver in the last 30 days.
313 Note this method is intended for phonehome metrics only and is different
314 from the mau figure in synapse.storage.monthly_active_users which,
315 amongst other things, includes a 3 day grace period before a user counts.
316 """
317 thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
318 return self.db.runInteraction(
319 "count_monthly_users", self._count_users, thirty_days_ago
320 )
321
322 def _count_users(self, txn, time_from):
323 """
324 Returns number of users seen in the past time_from period
325 """
326 sql = """
327 SELECT COALESCE(count(*), 0) FROM (
328 SELECT user_id FROM user_ips
329 WHERE last_seen > ?
330 GROUP BY user_id
331 ) u
332 """
333 txn.execute(sql, (time_from,))
334 (count,) = txn.fetchone()
335 return count
336
337 def count_r30_users(self):
338 """
339 Counts the number of 30 day retained users, defined as:-
340 * Users who have created their accounts more than 30 days ago
341 * Where last seen at most 30 days ago
342 * Where account creation and last_seen are > 30 days apart
343
344 Returns counts globaly for a given user as well as breaking
345 by platform
346 """
347
348 def _count_r30_users(txn):
349 thirty_days_in_secs = 86400 * 30
350 now = int(self._clock.time())
351 thirty_days_ago_in_secs = now - thirty_days_in_secs
352
353 sql = """
354 SELECT platform, COALESCE(count(*), 0) FROM (
355 SELECT
356 users.name, platform, users.creation_ts * 1000,
357 MAX(uip.last_seen)
358 FROM users
359 INNER JOIN (
360 SELECT
361 user_id,
362 last_seen,
363 CASE
364 WHEN user_agent LIKE '%%Android%%' THEN 'android'
365 WHEN user_agent LIKE '%%iOS%%' THEN 'ios'
366 WHEN user_agent LIKE '%%Electron%%' THEN 'electron'
367 WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'
368 WHEN user_agent LIKE '%%Gecko%%' THEN 'web'
369 ELSE 'unknown'
370 END
371 AS platform
372 FROM user_ips
373 ) uip
374 ON users.name = uip.user_id
375 AND users.appservice_id is NULL
376 AND users.creation_ts < ?
377 AND uip.last_seen/1000 > ?
378 AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
379 GROUP BY users.name, platform, users.creation_ts
380 ) u GROUP BY platform
381 """
382
383 results = {}
384 txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
385
386 for row in txn:
387 if row[0] == "unknown":
388 pass
389 results[row[0]] = row[1]
390
391 sql = """
392 SELECT COALESCE(count(*), 0) FROM (
393 SELECT users.name, users.creation_ts * 1000,
394 MAX(uip.last_seen)
395 FROM users
396 INNER JOIN (
397 SELECT
398 user_id,
399 last_seen
400 FROM user_ips
401 ) uip
402 ON users.name = uip.user_id
403 AND appservice_id is NULL
404 AND users.creation_ts < ?
405 AND uip.last_seen/1000 > ?
406 AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
407 GROUP BY users.name, users.creation_ts
408 ) u
409 """
410
411 txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
412
413 (count,) = txn.fetchone()
414 results["all"] = count
415
416 return results
417
418 return self.db.runInteraction("count_r30_users", _count_r30_users)
419
420 def _get_start_of_day(self):
421 """
422 Returns millisecond unixtime for start of UTC day.
423 """
424 now = time.gmtime()
425 today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))
426 return today_start * 1000
427
428 def generate_user_daily_visits(self):
429 """
430 Generates daily visit data for use in cohort/ retention analysis
431 """
432
433 def _generate_user_daily_visits(txn):
434 logger.info("Calling _generate_user_daily_visits")
435 today_start = self._get_start_of_day()
436 a_day_in_milliseconds = 24 * 60 * 60 * 1000
437 now = self.clock.time_msec()
438
439 sql = """
440 INSERT INTO user_daily_visits (user_id, device_id, timestamp)
441 SELECT u.user_id, u.device_id, ?
442 FROM user_ips AS u
443 LEFT JOIN (
444 SELECT user_id, device_id, timestamp FROM user_daily_visits
445 WHERE timestamp = ?
446 ) udv
447 ON u.user_id = udv.user_id AND u.device_id=udv.device_id
448 INNER JOIN users ON users.name=u.user_id
449 WHERE last_seen > ? AND last_seen <= ?
450 AND udv.timestamp IS NULL AND users.is_guest=0
451 AND users.appservice_id IS NULL
452 GROUP BY u.user_id, u.device_id
453 """
454
455 # This means that the day has rolled over but there could still
456 # be entries from the previous day. There is an edge case
457 # where if the user logs in at 23:59 and overwrites their
458 # last_seen at 00:01 then they will not be counted in the
459 # previous day's stats - it is important that the query is run
460 # often to minimise this case.
461 if today_start > self._last_user_visit_update:
462 yesterday_start = today_start - a_day_in_milliseconds
463 txn.execute(
464 sql,
465 (
466 yesterday_start,
467 yesterday_start,
468 self._last_user_visit_update,
469 today_start,
470 ),
471 )
472 self._last_user_visit_update = today_start
473
474 txn.execute(
475 sql, (today_start, today_start, self._last_user_visit_update, now)
476 )
477 # Update _last_user_visit_update to now. The reason to do this
478 # rather just clamping to the beginning of the day is to limit
479 # the size of the join - meaning that the query can be run more
480 # frequently
481 self._last_user_visit_update = now
482
483 return self.db.runInteraction(
484 "generate_user_daily_visits", _generate_user_daily_visits
485 )
486
487 def get_users(self):
488 """Function to retrieve a list of users in users table.
489
490 Args:
491 Returns:
492 defer.Deferred: resolves to list[dict[str, Any]]
493 """
494 return self.db.simple_select_list(
495 table="users",
496 keyvalues={},
497 retcols=[
498 "name",
499 "password_hash",
500 "is_guest",
501 "admin",
502 "user_type",
503 "deactivated",
504 ],
505 desc="get_users",
506 )
507
508 def get_users_paginate(
509 self, start, limit, name=None, guests=True, deactivated=False
510 ):
511 """Function to retrieve a paginated list of users from
512 users list. This will return a json list of users.
513
514 Args:
515 start (int): start number to begin the query from
516 limit (int): number of rows to retrieve
517 name (string): filter for user names
518 guests (bool): whether to in include guest users
519 deactivated (bool): whether to include deactivated users
520 Returns:
521 defer.Deferred: resolves to list[dict[str, Any]]
522 """
523 name_filter = {}
524 if name:
525 name_filter["name"] = "%" + name + "%"
526
527 attr_filter = {}
528 if not guests:
529 attr_filter["is_guest"] = False
530 if not deactivated:
531 attr_filter["deactivated"] = False
532
533 return self.db.simple_select_list_paginate(
534 desc="get_users_paginate",
535 table="users",
536 orderby="name",
537 start=start,
538 limit=limit,
539 filters=name_filter,
540 keyvalues=attr_filter,
541 retcols=[
542 "name",
543 "password_hash",
544 "is_guest",
545 "admin",
546 "user_type",
547 "deactivated",
548 ],
549 )
550
551 def search_users(self, term):
552 """Function to search users list for one or more users with
553 the matched term.
554
555 Args:
556 term (str): search term
557 col (str): column to query term should be matched to
558 Returns:
559 defer.Deferred: resolves to list[dict[str, Any]]
560 """
561 return self.db.simple_search_list(
562 table="users",
563 term=term,
564 col="name",
565 retcols=["name", "password_hash", "is_guest", "admin", "user_type"],
566 desc="search_users",
567 )
568
569
570 def are_all_users_on_domain(txn, database_engine, domain):
571 sql = database_engine.convert_param_style(
572 "SELECT COUNT(*) FROM users WHERE name NOT LIKE ?"
573 )
574 pat = "%:" + domain
575 txn.execute(sql, (pat,))
576 num_not_matching = txn.fetchall()[0][0]
577 if num_not_matching == 0:
578 return True
579 return False
580
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py
--- a/synapse/storage/data_stores/main/__init__.py
+++ b/synapse/storage/data_stores/main/__init__.py
@@ -526,9 +526,9 @@
attr_filter = {}
if not guests:
- attr_filter["is_guest"] = False
+ attr_filter["is_guest"] = 0
if not deactivated:
- attr_filter["deactivated"] = False
+ attr_filter["deactivated"] = 0
return self.db.simple_select_list_paginate(
desc="get_users_paginate",
| {"golden_diff": "diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py\n--- a/synapse/storage/data_stores/main/__init__.py\n+++ b/synapse/storage/data_stores/main/__init__.py\n@@ -526,9 +526,9 @@\n \n attr_filter = {}\n if not guests:\n- attr_filter[\"is_guest\"] = False\n+ attr_filter[\"is_guest\"] = 0\n if not deactivated:\n- attr_filter[\"deactivated\"] = False\n+ attr_filter[\"deactivated\"] = 0\n \n return self.db.simple_select_list_paginate(\n desc=\"get_users_paginate\",\n", "issue": "/_synapse/admin/v2/users is broken\nRunning the /_synapse/admin/v2/users as documented produces an internal server error\r\n\r\nPostgres logs:\r\n```\r\nSTATEMENT: SELECT name, password_hash, is_guest, admin, user_type, deactivated FROM users WHERE is_guest = false AND deactivated = false ORDER BY name ASC LIMIT 10 OFFSET 0\r\nERROR: operator does not exist: smallint = boolean at character 95\r\n```\r\n\r\n`\\d users` shows that `is_guest` and `deactivated` are type `smallint` not `boolean`.\r\n```\r\n\r\n Table \"public.users\"\r\n Column | Type | Collation | Nullable | Default\r\n----------------------------+----------+-----------+----------+---------\r\n name | text | | |\r\n password_hash | text | | |\r\n creation_ts | bigint | | |\r\n admin | smallint | | not null | 0\r\n upgrade_ts | bigint | | |\r\n is_guest | smallint | | not null | 0\r\n appservice_id | text | | |\r\n consent_version | text | | |\r\n consent_server_notice_sent | text | | |\r\n user_type | text | | |\r\n deactivated | smallint | | not null | 0\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2018 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport calendar\nimport logging\nimport time\n\nfrom synapse.api.constants import PresenceState\nfrom synapse.storage.database import Database\nfrom synapse.storage.engines import PostgresEngine\nfrom synapse.storage.util.id_generators import (\n ChainedIdGenerator,\n IdGenerator,\n StreamIdGenerator,\n)\nfrom synapse.util.caches.stream_change_cache import StreamChangeCache\n\nfrom .account_data import AccountDataStore\nfrom .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore\nfrom .cache import CacheInvalidationStore\nfrom .client_ips import ClientIpStore\nfrom .deviceinbox import DeviceInboxStore\nfrom .devices import DeviceStore\nfrom .directory import DirectoryStore\nfrom .e2e_room_keys import EndToEndRoomKeyStore\nfrom .end_to_end_keys import EndToEndKeyStore\nfrom .event_federation import EventFederationStore\nfrom .event_push_actions import EventPushActionsStore\nfrom .events import EventsStore\nfrom .events_bg_updates import EventsBackgroundUpdatesStore\nfrom .filtering import FilteringStore\nfrom .group_server import GroupServerStore\nfrom .keys import KeyStore\nfrom .media_repository import MediaRepositoryStore\nfrom .monthly_active_users import MonthlyActiveUsersStore\nfrom .openid import OpenIdStore\nfrom .presence import PresenceStore, UserPresenceState\nfrom .profile import ProfileStore\nfrom .push_rule import PushRuleStore\nfrom .pusher import PusherStore\nfrom .receipts import ReceiptsStore\nfrom .registration import RegistrationStore\nfrom .rejections import RejectionsStore\nfrom .relations import RelationsStore\nfrom .room import RoomStore\nfrom .roommember import RoomMemberStore\nfrom .search import SearchStore\nfrom .signatures import SignatureStore\nfrom .state import StateStore\nfrom .stats import StatsStore\nfrom .stream import StreamStore\nfrom .tags import TagsStore\nfrom .transactions import TransactionStore\nfrom .user_directory import UserDirectoryStore\nfrom .user_erasure_store import UserErasureStore\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataStore(\n EventsBackgroundUpdatesStore,\n RoomMemberStore,\n RoomStore,\n RegistrationStore,\n StreamStore,\n ProfileStore,\n PresenceStore,\n TransactionStore,\n DirectoryStore,\n KeyStore,\n StateStore,\n SignatureStore,\n ApplicationServiceStore,\n EventsStore,\n EventFederationStore,\n MediaRepositoryStore,\n RejectionsStore,\n FilteringStore,\n PusherStore,\n PushRuleStore,\n ApplicationServiceTransactionStore,\n ReceiptsStore,\n EndToEndKeyStore,\n EndToEndRoomKeyStore,\n SearchStore,\n TagsStore,\n AccountDataStore,\n EventPushActionsStore,\n OpenIdStore,\n ClientIpStore,\n DeviceStore,\n DeviceInboxStore,\n UserDirectoryStore,\n GroupServerStore,\n UserErasureStore,\n MonthlyActiveUsersStore,\n StatsStore,\n RelationsStore,\n CacheInvalidationStore,\n):\n def __init__(self, database: Database, db_conn, hs):\n self.hs = hs\n self._clock = hs.get_clock()\n self.database_engine = database.engine\n\n all_users_native = are_all_users_on_domain(\n db_conn.cursor(), database.engine, hs.hostname\n )\n if not all_users_native:\n raise Exception(\n \"Found users in database not native to %s!\\n\"\n \"You cannot changed a synapse server_name after it's been configured\"\n % (hs.hostname,)\n )\n\n self._stream_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n extra_tables=[(\"local_invites\", \"stream_id\")],\n )\n self._backfill_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n step=-1,\n extra_tables=[(\"ex_outlier_stream\", \"event_stream_ordering\")],\n )\n self._presence_id_gen = StreamIdGenerator(\n db_conn, \"presence_stream\", \"stream_id\"\n )\n self._device_inbox_id_gen = StreamIdGenerator(\n db_conn, \"device_max_stream_id\", \"stream_id\"\n )\n self._public_room_id_gen = StreamIdGenerator(\n db_conn, \"public_room_list_stream\", \"stream_id\"\n )\n self._device_list_id_gen = StreamIdGenerator(\n db_conn,\n \"device_lists_stream\",\n \"stream_id\",\n extra_tables=[(\"user_signature_stream\", \"stream_id\")],\n )\n self._cross_signing_id_gen = StreamIdGenerator(\n db_conn, \"e2e_cross_signing_keys\", \"stream_id\"\n )\n\n self._access_tokens_id_gen = IdGenerator(db_conn, \"access_tokens\", \"id\")\n self._event_reports_id_gen = IdGenerator(db_conn, \"event_reports\", \"id\")\n self._push_rule_id_gen = IdGenerator(db_conn, \"push_rules\", \"id\")\n self._push_rules_enable_id_gen = IdGenerator(db_conn, \"push_rules_enable\", \"id\")\n self._push_rules_stream_id_gen = ChainedIdGenerator(\n self._stream_id_gen, db_conn, \"push_rules_stream\", \"stream_id\"\n )\n self._pushers_id_gen = StreamIdGenerator(\n db_conn, \"pushers\", \"id\", extra_tables=[(\"deleted_pushers\", \"stream_id\")]\n )\n self._group_updates_id_gen = StreamIdGenerator(\n db_conn, \"local_group_updates\", \"stream_id\"\n )\n\n if isinstance(self.database_engine, PostgresEngine):\n self._cache_id_gen = StreamIdGenerator(\n db_conn, \"cache_invalidation_stream\", \"stream_id\"\n )\n else:\n self._cache_id_gen = None\n\n super(DataStore, self).__init__(database, db_conn, hs)\n\n self._presence_on_startup = self._get_active_presence(db_conn)\n\n presence_cache_prefill, min_presence_val = self.db.get_cache_dict(\n db_conn,\n \"presence_stream\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._presence_id_gen.get_current_token(),\n )\n self.presence_stream_cache = StreamChangeCache(\n \"PresenceStreamChangeCache\",\n min_presence_val,\n prefilled_cache=presence_cache_prefill,\n )\n\n max_device_inbox_id = self._device_inbox_id_gen.get_current_token()\n device_inbox_prefill, min_device_inbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_inbox\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_inbox_stream_cache = StreamChangeCache(\n \"DeviceInboxStreamChangeCache\",\n min_device_inbox_id,\n prefilled_cache=device_inbox_prefill,\n )\n # The federation outbox and the local device inbox uses the same\n # stream_id generator.\n device_outbox_prefill, min_device_outbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_federation_outbox\",\n entity_column=\"destination\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_federation_outbox_stream_cache = StreamChangeCache(\n \"DeviceFederationOutboxStreamChangeCache\",\n min_device_outbox_id,\n prefilled_cache=device_outbox_prefill,\n )\n\n device_list_max = self._device_list_id_gen.get_current_token()\n self._device_list_stream_cache = StreamChangeCache(\n \"DeviceListStreamChangeCache\", device_list_max\n )\n self._user_signature_stream_cache = StreamChangeCache(\n \"UserSignatureStreamChangeCache\", device_list_max\n )\n self._device_list_federation_stream_cache = StreamChangeCache(\n \"DeviceListFederationStreamChangeCache\", device_list_max\n )\n\n events_max = self._stream_id_gen.get_current_token()\n curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(\n db_conn,\n \"current_state_delta_stream\",\n entity_column=\"room_id\",\n stream_column=\"stream_id\",\n max_value=events_max, # As we share the stream id with events token\n limit=1000,\n )\n self._curr_state_delta_stream_cache = StreamChangeCache(\n \"_curr_state_delta_stream_cache\",\n min_curr_state_delta_id,\n prefilled_cache=curr_state_delta_prefill,\n )\n\n _group_updates_prefill, min_group_updates_id = self.db.get_cache_dict(\n db_conn,\n \"local_group_updates\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._group_updates_id_gen.get_current_token(),\n limit=1000,\n )\n self._group_updates_stream_cache = StreamChangeCache(\n \"_group_updates_stream_cache\",\n min_group_updates_id,\n prefilled_cache=_group_updates_prefill,\n )\n\n self._stream_order_on_start = self.get_room_max_stream_ordering()\n self._min_stream_order_on_start = self.get_room_min_stream_ordering()\n\n # Used in _generate_user_daily_visits to keep track of progress\n self._last_user_visit_update = self._get_start_of_day()\n\n def take_presence_startup_info(self):\n active_on_startup = self._presence_on_startup\n self._presence_on_startup = None\n return active_on_startup\n\n def _get_active_presence(self, db_conn):\n \"\"\"Fetch non-offline presence from the database so that we can register\n the appropriate time outs.\n \"\"\"\n\n sql = (\n \"SELECT user_id, state, last_active_ts, last_federation_update_ts,\"\n \" last_user_sync_ts, status_msg, currently_active FROM presence_stream\"\n \" WHERE state != ?\"\n )\n sql = self.database_engine.convert_param_style(sql)\n\n txn = db_conn.cursor()\n txn.execute(sql, (PresenceState.OFFLINE,))\n rows = self.db.cursor_to_dict(txn)\n txn.close()\n\n for row in rows:\n row[\"currently_active\"] = bool(row[\"currently_active\"])\n\n return [UserPresenceState(**row) for row in rows]\n\n def count_daily_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 24 hours.\n \"\"\"\n yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)\n return self.db.runInteraction(\"count_daily_users\", self._count_users, yesterday)\n\n def count_monthly_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 30 days.\n Note this method is intended for phonehome metrics only and is different\n from the mau figure in synapse.storage.monthly_active_users which,\n amongst other things, includes a 3 day grace period before a user counts.\n \"\"\"\n thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)\n return self.db.runInteraction(\n \"count_monthly_users\", self._count_users, thirty_days_ago\n )\n\n def _count_users(self, txn, time_from):\n \"\"\"\n Returns number of users seen in the past time_from period\n \"\"\"\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT user_id FROM user_ips\n WHERE last_seen > ?\n GROUP BY user_id\n ) u\n \"\"\"\n txn.execute(sql, (time_from,))\n (count,) = txn.fetchone()\n return count\n\n def count_r30_users(self):\n \"\"\"\n Counts the number of 30 day retained users, defined as:-\n * Users who have created their accounts more than 30 days ago\n * Where last seen at most 30 days ago\n * Where account creation and last_seen are > 30 days apart\n\n Returns counts globaly for a given user as well as breaking\n by platform\n \"\"\"\n\n def _count_r30_users(txn):\n thirty_days_in_secs = 86400 * 30\n now = int(self._clock.time())\n thirty_days_ago_in_secs = now - thirty_days_in_secs\n\n sql = \"\"\"\n SELECT platform, COALESCE(count(*), 0) FROM (\n SELECT\n users.name, platform, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen,\n CASE\n WHEN user_agent LIKE '%%Android%%' THEN 'android'\n WHEN user_agent LIKE '%%iOS%%' THEN 'ios'\n WHEN user_agent LIKE '%%Electron%%' THEN 'electron'\n WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'\n WHEN user_agent LIKE '%%Gecko%%' THEN 'web'\n ELSE 'unknown'\n END\n AS platform\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND users.appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, platform, users.creation_ts\n ) u GROUP BY platform\n \"\"\"\n\n results = {}\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n for row in txn:\n if row[0] == \"unknown\":\n pass\n results[row[0]] = row[1]\n\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT users.name, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, users.creation_ts\n ) u\n \"\"\"\n\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n (count,) = txn.fetchone()\n results[\"all\"] = count\n\n return results\n\n return self.db.runInteraction(\"count_r30_users\", _count_r30_users)\n\n def _get_start_of_day(self):\n \"\"\"\n Returns millisecond unixtime for start of UTC day.\n \"\"\"\n now = time.gmtime()\n today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))\n return today_start * 1000\n\n def generate_user_daily_visits(self):\n \"\"\"\n Generates daily visit data for use in cohort/ retention analysis\n \"\"\"\n\n def _generate_user_daily_visits(txn):\n logger.info(\"Calling _generate_user_daily_visits\")\n today_start = self._get_start_of_day()\n a_day_in_milliseconds = 24 * 60 * 60 * 1000\n now = self.clock.time_msec()\n\n sql = \"\"\"\n INSERT INTO user_daily_visits (user_id, device_id, timestamp)\n SELECT u.user_id, u.device_id, ?\n FROM user_ips AS u\n LEFT JOIN (\n SELECT user_id, device_id, timestamp FROM user_daily_visits\n WHERE timestamp = ?\n ) udv\n ON u.user_id = udv.user_id AND u.device_id=udv.device_id\n INNER JOIN users ON users.name=u.user_id\n WHERE last_seen > ? AND last_seen <= ?\n AND udv.timestamp IS NULL AND users.is_guest=0\n AND users.appservice_id IS NULL\n GROUP BY u.user_id, u.device_id\n \"\"\"\n\n # This means that the day has rolled over but there could still\n # be entries from the previous day. There is an edge case\n # where if the user logs in at 23:59 and overwrites their\n # last_seen at 00:01 then they will not be counted in the\n # previous day's stats - it is important that the query is run\n # often to minimise this case.\n if today_start > self._last_user_visit_update:\n yesterday_start = today_start - a_day_in_milliseconds\n txn.execute(\n sql,\n (\n yesterday_start,\n yesterday_start,\n self._last_user_visit_update,\n today_start,\n ),\n )\n self._last_user_visit_update = today_start\n\n txn.execute(\n sql, (today_start, today_start, self._last_user_visit_update, now)\n )\n # Update _last_user_visit_update to now. The reason to do this\n # rather just clamping to the beginning of the day is to limit\n # the size of the join - meaning that the query can be run more\n # frequently\n self._last_user_visit_update = now\n\n return self.db.runInteraction(\n \"generate_user_daily_visits\", _generate_user_daily_visits\n )\n\n def get_users(self):\n \"\"\"Function to retrieve a list of users in users table.\n\n Args:\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_select_list(\n table=\"users\",\n keyvalues={},\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n desc=\"get_users\",\n )\n\n def get_users_paginate(\n self, start, limit, name=None, guests=True, deactivated=False\n ):\n \"\"\"Function to retrieve a paginated list of users from\n users list. This will return a json list of users.\n\n Args:\n start (int): start number to begin the query from\n limit (int): number of rows to retrieve\n name (string): filter for user names\n guests (bool): whether to in include guest users\n deactivated (bool): whether to include deactivated users\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n name_filter = {}\n if name:\n name_filter[\"name\"] = \"%\" + name + \"%\"\n\n attr_filter = {}\n if not guests:\n attr_filter[\"is_guest\"] = False\n if not deactivated:\n attr_filter[\"deactivated\"] = False\n\n return self.db.simple_select_list_paginate(\n desc=\"get_users_paginate\",\n table=\"users\",\n orderby=\"name\",\n start=start,\n limit=limit,\n filters=name_filter,\n keyvalues=attr_filter,\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n )\n\n def search_users(self, term):\n \"\"\"Function to search users list for one or more users with\n the matched term.\n\n Args:\n term (str): search term\n col (str): column to query term should be matched to\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_search_list(\n table=\"users\",\n term=term,\n col=\"name\",\n retcols=[\"name\", \"password_hash\", \"is_guest\", \"admin\", \"user_type\"],\n desc=\"search_users\",\n )\n\n\ndef are_all_users_on_domain(txn, database_engine, domain):\n sql = database_engine.convert_param_style(\n \"SELECT COUNT(*) FROM users WHERE name NOT LIKE ?\"\n )\n pat = \"%:\" + domain\n txn.execute(sql, (pat,))\n num_not_matching = txn.fetchall()[0][0]\n if num_not_matching == 0:\n return True\n return False\n", "path": "synapse/storage/data_stores/main/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2018 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport calendar\nimport logging\nimport time\n\nfrom synapse.api.constants import PresenceState\nfrom synapse.storage.database import Database\nfrom synapse.storage.engines import PostgresEngine\nfrom synapse.storage.util.id_generators import (\n ChainedIdGenerator,\n IdGenerator,\n StreamIdGenerator,\n)\nfrom synapse.util.caches.stream_change_cache import StreamChangeCache\n\nfrom .account_data import AccountDataStore\nfrom .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore\nfrom .cache import CacheInvalidationStore\nfrom .client_ips import ClientIpStore\nfrom .deviceinbox import DeviceInboxStore\nfrom .devices import DeviceStore\nfrom .directory import DirectoryStore\nfrom .e2e_room_keys import EndToEndRoomKeyStore\nfrom .end_to_end_keys import EndToEndKeyStore\nfrom .event_federation import EventFederationStore\nfrom .event_push_actions import EventPushActionsStore\nfrom .events import EventsStore\nfrom .events_bg_updates import EventsBackgroundUpdatesStore\nfrom .filtering import FilteringStore\nfrom .group_server import GroupServerStore\nfrom .keys import KeyStore\nfrom .media_repository import MediaRepositoryStore\nfrom .monthly_active_users import MonthlyActiveUsersStore\nfrom .openid import OpenIdStore\nfrom .presence import PresenceStore, UserPresenceState\nfrom .profile import ProfileStore\nfrom .push_rule import PushRuleStore\nfrom .pusher import PusherStore\nfrom .receipts import ReceiptsStore\nfrom .registration import RegistrationStore\nfrom .rejections import RejectionsStore\nfrom .relations import RelationsStore\nfrom .room import RoomStore\nfrom .roommember import RoomMemberStore\nfrom .search import SearchStore\nfrom .signatures import SignatureStore\nfrom .state import StateStore\nfrom .stats import StatsStore\nfrom .stream import StreamStore\nfrom .tags import TagsStore\nfrom .transactions import TransactionStore\nfrom .user_directory import UserDirectoryStore\nfrom .user_erasure_store import UserErasureStore\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataStore(\n EventsBackgroundUpdatesStore,\n RoomMemberStore,\n RoomStore,\n RegistrationStore,\n StreamStore,\n ProfileStore,\n PresenceStore,\n TransactionStore,\n DirectoryStore,\n KeyStore,\n StateStore,\n SignatureStore,\n ApplicationServiceStore,\n EventsStore,\n EventFederationStore,\n MediaRepositoryStore,\n RejectionsStore,\n FilteringStore,\n PusherStore,\n PushRuleStore,\n ApplicationServiceTransactionStore,\n ReceiptsStore,\n EndToEndKeyStore,\n EndToEndRoomKeyStore,\n SearchStore,\n TagsStore,\n AccountDataStore,\n EventPushActionsStore,\n OpenIdStore,\n ClientIpStore,\n DeviceStore,\n DeviceInboxStore,\n UserDirectoryStore,\n GroupServerStore,\n UserErasureStore,\n MonthlyActiveUsersStore,\n StatsStore,\n RelationsStore,\n CacheInvalidationStore,\n):\n def __init__(self, database: Database, db_conn, hs):\n self.hs = hs\n self._clock = hs.get_clock()\n self.database_engine = database.engine\n\n all_users_native = are_all_users_on_domain(\n db_conn.cursor(), database.engine, hs.hostname\n )\n if not all_users_native:\n raise Exception(\n \"Found users in database not native to %s!\\n\"\n \"You cannot changed a synapse server_name after it's been configured\"\n % (hs.hostname,)\n )\n\n self._stream_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n extra_tables=[(\"local_invites\", \"stream_id\")],\n )\n self._backfill_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n step=-1,\n extra_tables=[(\"ex_outlier_stream\", \"event_stream_ordering\")],\n )\n self._presence_id_gen = StreamIdGenerator(\n db_conn, \"presence_stream\", \"stream_id\"\n )\n self._device_inbox_id_gen = StreamIdGenerator(\n db_conn, \"device_max_stream_id\", \"stream_id\"\n )\n self._public_room_id_gen = StreamIdGenerator(\n db_conn, \"public_room_list_stream\", \"stream_id\"\n )\n self._device_list_id_gen = StreamIdGenerator(\n db_conn,\n \"device_lists_stream\",\n \"stream_id\",\n extra_tables=[(\"user_signature_stream\", \"stream_id\")],\n )\n self._cross_signing_id_gen = StreamIdGenerator(\n db_conn, \"e2e_cross_signing_keys\", \"stream_id\"\n )\n\n self._access_tokens_id_gen = IdGenerator(db_conn, \"access_tokens\", \"id\")\n self._event_reports_id_gen = IdGenerator(db_conn, \"event_reports\", \"id\")\n self._push_rule_id_gen = IdGenerator(db_conn, \"push_rules\", \"id\")\n self._push_rules_enable_id_gen = IdGenerator(db_conn, \"push_rules_enable\", \"id\")\n self._push_rules_stream_id_gen = ChainedIdGenerator(\n self._stream_id_gen, db_conn, \"push_rules_stream\", \"stream_id\"\n )\n self._pushers_id_gen = StreamIdGenerator(\n db_conn, \"pushers\", \"id\", extra_tables=[(\"deleted_pushers\", \"stream_id\")]\n )\n self._group_updates_id_gen = StreamIdGenerator(\n db_conn, \"local_group_updates\", \"stream_id\"\n )\n\n if isinstance(self.database_engine, PostgresEngine):\n self._cache_id_gen = StreamIdGenerator(\n db_conn, \"cache_invalidation_stream\", \"stream_id\"\n )\n else:\n self._cache_id_gen = None\n\n super(DataStore, self).__init__(database, db_conn, hs)\n\n self._presence_on_startup = self._get_active_presence(db_conn)\n\n presence_cache_prefill, min_presence_val = self.db.get_cache_dict(\n db_conn,\n \"presence_stream\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._presence_id_gen.get_current_token(),\n )\n self.presence_stream_cache = StreamChangeCache(\n \"PresenceStreamChangeCache\",\n min_presence_val,\n prefilled_cache=presence_cache_prefill,\n )\n\n max_device_inbox_id = self._device_inbox_id_gen.get_current_token()\n device_inbox_prefill, min_device_inbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_inbox\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_inbox_stream_cache = StreamChangeCache(\n \"DeviceInboxStreamChangeCache\",\n min_device_inbox_id,\n prefilled_cache=device_inbox_prefill,\n )\n # The federation outbox and the local device inbox uses the same\n # stream_id generator.\n device_outbox_prefill, min_device_outbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_federation_outbox\",\n entity_column=\"destination\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_federation_outbox_stream_cache = StreamChangeCache(\n \"DeviceFederationOutboxStreamChangeCache\",\n min_device_outbox_id,\n prefilled_cache=device_outbox_prefill,\n )\n\n device_list_max = self._device_list_id_gen.get_current_token()\n self._device_list_stream_cache = StreamChangeCache(\n \"DeviceListStreamChangeCache\", device_list_max\n )\n self._user_signature_stream_cache = StreamChangeCache(\n \"UserSignatureStreamChangeCache\", device_list_max\n )\n self._device_list_federation_stream_cache = StreamChangeCache(\n \"DeviceListFederationStreamChangeCache\", device_list_max\n )\n\n events_max = self._stream_id_gen.get_current_token()\n curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(\n db_conn,\n \"current_state_delta_stream\",\n entity_column=\"room_id\",\n stream_column=\"stream_id\",\n max_value=events_max, # As we share the stream id with events token\n limit=1000,\n )\n self._curr_state_delta_stream_cache = StreamChangeCache(\n \"_curr_state_delta_stream_cache\",\n min_curr_state_delta_id,\n prefilled_cache=curr_state_delta_prefill,\n )\n\n _group_updates_prefill, min_group_updates_id = self.db.get_cache_dict(\n db_conn,\n \"local_group_updates\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._group_updates_id_gen.get_current_token(),\n limit=1000,\n )\n self._group_updates_stream_cache = StreamChangeCache(\n \"_group_updates_stream_cache\",\n min_group_updates_id,\n prefilled_cache=_group_updates_prefill,\n )\n\n self._stream_order_on_start = self.get_room_max_stream_ordering()\n self._min_stream_order_on_start = self.get_room_min_stream_ordering()\n\n # Used in _generate_user_daily_visits to keep track of progress\n self._last_user_visit_update = self._get_start_of_day()\n\n def take_presence_startup_info(self):\n active_on_startup = self._presence_on_startup\n self._presence_on_startup = None\n return active_on_startup\n\n def _get_active_presence(self, db_conn):\n \"\"\"Fetch non-offline presence from the database so that we can register\n the appropriate time outs.\n \"\"\"\n\n sql = (\n \"SELECT user_id, state, last_active_ts, last_federation_update_ts,\"\n \" last_user_sync_ts, status_msg, currently_active FROM presence_stream\"\n \" WHERE state != ?\"\n )\n sql = self.database_engine.convert_param_style(sql)\n\n txn = db_conn.cursor()\n txn.execute(sql, (PresenceState.OFFLINE,))\n rows = self.db.cursor_to_dict(txn)\n txn.close()\n\n for row in rows:\n row[\"currently_active\"] = bool(row[\"currently_active\"])\n\n return [UserPresenceState(**row) for row in rows]\n\n def count_daily_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 24 hours.\n \"\"\"\n yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)\n return self.db.runInteraction(\"count_daily_users\", self._count_users, yesterday)\n\n def count_monthly_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 30 days.\n Note this method is intended for phonehome metrics only and is different\n from the mau figure in synapse.storage.monthly_active_users which,\n amongst other things, includes a 3 day grace period before a user counts.\n \"\"\"\n thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)\n return self.db.runInteraction(\n \"count_monthly_users\", self._count_users, thirty_days_ago\n )\n\n def _count_users(self, txn, time_from):\n \"\"\"\n Returns number of users seen in the past time_from period\n \"\"\"\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT user_id FROM user_ips\n WHERE last_seen > ?\n GROUP BY user_id\n ) u\n \"\"\"\n txn.execute(sql, (time_from,))\n (count,) = txn.fetchone()\n return count\n\n def count_r30_users(self):\n \"\"\"\n Counts the number of 30 day retained users, defined as:-\n * Users who have created their accounts more than 30 days ago\n * Where last seen at most 30 days ago\n * Where account creation and last_seen are > 30 days apart\n\n Returns counts globaly for a given user as well as breaking\n by platform\n \"\"\"\n\n def _count_r30_users(txn):\n thirty_days_in_secs = 86400 * 30\n now = int(self._clock.time())\n thirty_days_ago_in_secs = now - thirty_days_in_secs\n\n sql = \"\"\"\n SELECT platform, COALESCE(count(*), 0) FROM (\n SELECT\n users.name, platform, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen,\n CASE\n WHEN user_agent LIKE '%%Android%%' THEN 'android'\n WHEN user_agent LIKE '%%iOS%%' THEN 'ios'\n WHEN user_agent LIKE '%%Electron%%' THEN 'electron'\n WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'\n WHEN user_agent LIKE '%%Gecko%%' THEN 'web'\n ELSE 'unknown'\n END\n AS platform\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND users.appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, platform, users.creation_ts\n ) u GROUP BY platform\n \"\"\"\n\n results = {}\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n for row in txn:\n if row[0] == \"unknown\":\n pass\n results[row[0]] = row[1]\n\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT users.name, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, users.creation_ts\n ) u\n \"\"\"\n\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n (count,) = txn.fetchone()\n results[\"all\"] = count\n\n return results\n\n return self.db.runInteraction(\"count_r30_users\", _count_r30_users)\n\n def _get_start_of_day(self):\n \"\"\"\n Returns millisecond unixtime for start of UTC day.\n \"\"\"\n now = time.gmtime()\n today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))\n return today_start * 1000\n\n def generate_user_daily_visits(self):\n \"\"\"\n Generates daily visit data for use in cohort/ retention analysis\n \"\"\"\n\n def _generate_user_daily_visits(txn):\n logger.info(\"Calling _generate_user_daily_visits\")\n today_start = self._get_start_of_day()\n a_day_in_milliseconds = 24 * 60 * 60 * 1000\n now = self.clock.time_msec()\n\n sql = \"\"\"\n INSERT INTO user_daily_visits (user_id, device_id, timestamp)\n SELECT u.user_id, u.device_id, ?\n FROM user_ips AS u\n LEFT JOIN (\n SELECT user_id, device_id, timestamp FROM user_daily_visits\n WHERE timestamp = ?\n ) udv\n ON u.user_id = udv.user_id AND u.device_id=udv.device_id\n INNER JOIN users ON users.name=u.user_id\n WHERE last_seen > ? AND last_seen <= ?\n AND udv.timestamp IS NULL AND users.is_guest=0\n AND users.appservice_id IS NULL\n GROUP BY u.user_id, u.device_id\n \"\"\"\n\n # This means that the day has rolled over but there could still\n # be entries from the previous day. There is an edge case\n # where if the user logs in at 23:59 and overwrites their\n # last_seen at 00:01 then they will not be counted in the\n # previous day's stats - it is important that the query is run\n # often to minimise this case.\n if today_start > self._last_user_visit_update:\n yesterday_start = today_start - a_day_in_milliseconds\n txn.execute(\n sql,\n (\n yesterday_start,\n yesterday_start,\n self._last_user_visit_update,\n today_start,\n ),\n )\n self._last_user_visit_update = today_start\n\n txn.execute(\n sql, (today_start, today_start, self._last_user_visit_update, now)\n )\n # Update _last_user_visit_update to now. The reason to do this\n # rather just clamping to the beginning of the day is to limit\n # the size of the join - meaning that the query can be run more\n # frequently\n self._last_user_visit_update = now\n\n return self.db.runInteraction(\n \"generate_user_daily_visits\", _generate_user_daily_visits\n )\n\n def get_users(self):\n \"\"\"Function to retrieve a list of users in users table.\n\n Args:\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_select_list(\n table=\"users\",\n keyvalues={},\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n desc=\"get_users\",\n )\n\n def get_users_paginate(\n self, start, limit, name=None, guests=True, deactivated=False\n ):\n \"\"\"Function to retrieve a paginated list of users from\n users list. This will return a json list of users.\n\n Args:\n start (int): start number to begin the query from\n limit (int): number of rows to retrieve\n name (string): filter for user names\n guests (bool): whether to in include guest users\n deactivated (bool): whether to include deactivated users\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n name_filter = {}\n if name:\n name_filter[\"name\"] = \"%\" + name + \"%\"\n\n attr_filter = {}\n if not guests:\n attr_filter[\"is_guest\"] = 0\n if not deactivated:\n attr_filter[\"deactivated\"] = 0\n\n return self.db.simple_select_list_paginate(\n desc=\"get_users_paginate\",\n table=\"users\",\n orderby=\"name\",\n start=start,\n limit=limit,\n filters=name_filter,\n keyvalues=attr_filter,\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n )\n\n def search_users(self, term):\n \"\"\"Function to search users list for one or more users with\n the matched term.\n\n Args:\n term (str): search term\n col (str): column to query term should be matched to\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_search_list(\n table=\"users\",\n term=term,\n col=\"name\",\n retcols=[\"name\", \"password_hash\", \"is_guest\", \"admin\", \"user_type\"],\n desc=\"search_users\",\n )\n\n\ndef are_all_users_on_domain(txn, database_engine, domain):\n sql = database_engine.convert_param_style(\n \"SELECT COUNT(*) FROM users WHERE name NOT LIKE ?\"\n )\n pat = \"%:\" + domain\n txn.execute(sql, (pat,))\n num_not_matching = txn.fetchall()[0][0]\n if num_not_matching == 0:\n return True\n return False\n", "path": "synapse/storage/data_stores/main/__init__.py"}]} |
gh_patches_debug_1405 | rasdani/github-patches | git_diff | keras-team__keras-core-348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
keras.layers.CenterCrop raises AttributeError when passed a list of images
With tf.keras, the following code works, but not in Keras-Core:
```python
import keras_core as keras
import numpy as np
images = [
np.random.rand(100, 100, 3),
np.random.rand(100, 100, 3),
]
keras.layers.CenterCrop(height=60, width=50)(images) #!!! AttributeError
```
Full stacktrace below:
<details>
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In[21], line 8
2 import numpy as np
4 images = [
5 np.random.rand(100, 100, 3),
6 np.random.rand(100, 100, 3),
7 ]
----> 8 keras.layers.CenterCrop(height=60, width=50)(images)
File ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/layers/preprocessing/tf_data_layer.py:36, in TFDataLayer.__call__(self, inputs, **kwargs)
34 self._convert_input_args = True
35 return outputs
---> 36 return super().__call__(inputs, **kwargs)
File ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/utils/traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs)
119 filtered_tb = _process_traceback_frames(e.__traceback__)
120 # To get the full stack trace, call:
121 # `keras_core.config.disable_traceback_filtering()`
--> 122 raise e.with_traceback(filtered_tb) from None
123 finally:
124 del filtered_tb
File ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/layers/preprocessing/center_crop.py:59, in CenterCrop.call(self, inputs)
57 init_width = inputs.shape[-1]
58 else:
---> 59 init_height = inputs.shape[-3]
60 init_width = inputs.shape[-2]
62 if init_height is None or init_width is None:
63 # Dynamic size case. TODO.
AttributeError: Exception encountered when calling CenterCrop.call().
'list' object has no attribute 'shape'
Arguments received by CenterCrop.call():
• inputs=['jnp.ndarray(shape=(100, 100, 3), dtype=float32)', 'jnp.ndarray(shape=(100, 100, 3), dtype=float32)']
```
</details>
A simple workaround is to stack the images:
```python
keras.layers.CenterCrop(height=60, width=50)(np.stack(images))
```
Not sure this can be considered a bug, but it's one of those little differences that may porting code from Keras 2.x to 3.0 a bit harder.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_core/layers/preprocessing/center_crop.py`
Content:
```
1 from keras_core import backend
2 from keras_core.api_export import keras_core_export
3 from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
4 from keras_core.utils import image_utils
5
6
7 @keras_core_export("keras_core.layers.CenterCrop")
8 class CenterCrop(TFDataLayer):
9 """A preprocessing layer which crops images.
10
11 This layers crops the central portion of the images to a target size. If an
12 image is smaller than the target size, it will be resized and cropped
13 so as to return the largest possible window in the image that matches
14 the target aspect ratio.
15
16 Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).
17
18 Input shape:
19 3D (unbatched) or 4D (batched) tensor with shape:
20 `(..., height, width, channels)`, in `"channels_last"` format,
21 or `(..., channels, height, width)`, in `"channels_first"` format.
22
23 Output shape:
24 3D (unbatched) or 4D (batched) tensor with shape:
25 `(..., target_height, target_width, channels)`,
26 or `(..., channels, target_height, target_width)`,
27 in `"channels_first"` format.
28
29 If the input height/width is even and the target height/width is odd (or
30 inversely), the input image is left-padded by 1 pixel.
31
32 **Note:** This layer is safe to use inside a `tf.data` pipeline
33 (independently of which backend you're using).
34
35 Args:
36 height: Integer, the height of the output shape.
37 width: Integer, the width of the output shape.
38 data_format: string, either `"channels_last"` or `"channels_first"`.
39 The ordering of the dimensions in the inputs. `"channels_last"`
40 corresponds to inputs with shape `(batch, height, width, channels)`
41 while `"channels_first"` corresponds to inputs with shape
42 `(batch, channels, height, width)`. It defaults to the
43 `image_data_format` value found in your Keras config file at
44 `~/.keras/keras.json`. If you never set it, then it will be
45 `"channels_last"`.
46 """
47
48 def __init__(self, height, width, data_format=None, **kwargs):
49 super().__init__(**kwargs)
50 self.height = height
51 self.width = width
52 self.data_format = backend.standardize_data_format(data_format)
53
54 def call(self, inputs):
55 if self.data_format == "channels_first":
56 init_height = inputs.shape[-2]
57 init_width = inputs.shape[-1]
58 else:
59 init_height = inputs.shape[-3]
60 init_width = inputs.shape[-2]
61
62 if init_height is None or init_width is None:
63 # Dynamic size case. TODO.
64 raise ValueError(
65 "At this time, CenterCrop can only "
66 "process images with a static spatial "
67 f"shape. Received: inputs.shape={inputs.shape}"
68 )
69
70 h_diff = init_height - self.height
71 w_diff = init_width - self.width
72
73 h_start = int(h_diff / 2)
74 w_start = int(w_diff / 2)
75
76 if h_diff >= 0 and w_diff >= 0:
77 if len(inputs.shape) == 4:
78 if self.data_format == "channels_first":
79 return inputs[
80 :,
81 :,
82 h_start : h_start + self.height,
83 w_start : w_start + self.width,
84 ]
85 return inputs[
86 :,
87 h_start : h_start + self.height,
88 w_start : w_start + self.width,
89 :,
90 ]
91 elif len(inputs.shape) == 3:
92 if self.data_format == "channels_first":
93 return inputs[
94 :,
95 h_start : h_start + self.height,
96 w_start : w_start + self.width,
97 ]
98 return inputs[
99 h_start : h_start + self.height,
100 w_start : w_start + self.width,
101 :,
102 ]
103
104 return image_utils.smart_resize(
105 inputs,
106 [self.height, self.width],
107 data_format=self.data_format,
108 backend_module=self.backend,
109 )
110
111 def compute_output_shape(self, input_shape):
112 input_shape = list(input_shape)
113 if len(input_shape) == 4:
114 if self.data_format == "channels_last":
115 input_shape[1] = self.height
116 input_shape[2] = self.width
117 else:
118 input_shape[2] = self.height
119 input_shape[3] = self.width
120 else:
121 if self.data_format == "channels_last":
122 input_shape[0] = self.height
123 input_shape[1] = self.width
124 else:
125 input_shape[1] = self.height
126 input_shape[2] = self.width
127 return tuple(input_shape)
128
129 def get_config(self):
130 base_config = super().get_config()
131 config = {
132 "height": self.height,
133 "width": self.width,
134 "data_format": self.data_format,
135 }
136 return {**base_config, **config}
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras_core/layers/preprocessing/center_crop.py b/keras_core/layers/preprocessing/center_crop.py
--- a/keras_core/layers/preprocessing/center_crop.py
+++ b/keras_core/layers/preprocessing/center_crop.py
@@ -52,6 +52,7 @@
self.data_format = backend.standardize_data_format(data_format)
def call(self, inputs):
+ inputs = self.backend.cast(inputs, self.compute_dtype)
if self.data_format == "channels_first":
init_height = inputs.shape[-2]
init_width = inputs.shape[-1]
| {"golden_diff": "diff --git a/keras_core/layers/preprocessing/center_crop.py b/keras_core/layers/preprocessing/center_crop.py\n--- a/keras_core/layers/preprocessing/center_crop.py\n+++ b/keras_core/layers/preprocessing/center_crop.py\n@@ -52,6 +52,7 @@\n self.data_format = backend.standardize_data_format(data_format)\n \n def call(self, inputs):\n+ inputs = self.backend.cast(inputs, self.compute_dtype)\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n", "issue": "keras.layers.CenterCrop raises AttributeError when passed a list of images\nWith tf.keras, the following code works, but not in Keras-Core:\r\n\r\n```python\r\nimport keras_core as keras\r\nimport numpy as np\r\n\r\nimages = [\r\n np.random.rand(100, 100, 3),\r\n np.random.rand(100, 100, 3),\r\n]\r\nkeras.layers.CenterCrop(height=60, width=50)(images) #!!! AttributeError\r\n```\r\n\r\nFull stacktrace below:\r\n\r\n<details>\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\nCell In[21], line 8\r\n 2 import numpy as np\r\n 4 images = [\r\n 5 np.random.rand(100, 100, 3),\r\n 6 np.random.rand(100, 100, 3),\r\n 7 ]\r\n----> 8 keras.layers.CenterCrop(height=60, width=50)(images)\r\n\r\nFile ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/layers/preprocessing/tf_data_layer.py:36, in TFDataLayer.__call__(self, inputs, **kwargs)\r\n 34 self._convert_input_args = True\r\n 35 return outputs\r\n---> 36 return super().__call__(inputs, **kwargs)\r\n\r\nFile ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/utils/traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs)\r\n 119 filtered_tb = _process_traceback_frames(e.__traceback__)\r\n 120 # To get the full stack trace, call:\r\n 121 # `keras_core.config.disable_traceback_filtering()`\r\n--> 122 raise e.with_traceback(filtered_tb) from None\r\n 123 finally:\r\n 124 del filtered_tb\r\n\r\nFile ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/layers/preprocessing/center_crop.py:59, in CenterCrop.call(self, inputs)\r\n 57 init_width = inputs.shape[-1]\r\n 58 else:\r\n---> 59 init_height = inputs.shape[-3]\r\n 60 init_width = inputs.shape[-2]\r\n 62 if init_height is None or init_width is None:\r\n 63 # Dynamic size case. TODO.\r\n\r\nAttributeError: Exception encountered when calling CenterCrop.call().\r\n\r\n'list' object has no attribute 'shape'\r\n\r\nArguments received by CenterCrop.call():\r\n \u2022 inputs=['jnp.ndarray(shape=(100, 100, 3), dtype=float32)', 'jnp.ndarray(shape=(100, 100, 3), dtype=float32)']\r\n```\r\n\r\n</details>\r\n\r\nA simple workaround is to stack the images:\r\n\r\n```python\r\nkeras.layers.CenterCrop(height=60, width=50)(np.stack(images))\r\n```\r\n\r\nNot sure this can be considered a bug, but it's one of those little differences that may porting code from Keras 2.x to 3.0 a bit harder.\n", "before_files": [{"content": "from keras_core import backend\nfrom keras_core.api_export import keras_core_export\nfrom keras_core.layers.preprocessing.tf_data_layer import TFDataLayer\nfrom keras_core.utils import image_utils\n\n\n@keras_core_export(\"keras_core.layers.CenterCrop\")\nclass CenterCrop(TFDataLayer):\n \"\"\"A preprocessing layer which crops images.\n\n This layers crops the central portion of the images to a target size. If an\n image is smaller than the target size, it will be resized and cropped\n so as to return the largest possible window in the image that matches\n the target aspect ratio.\n\n Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format,\n or `(..., channels, height, width)`, in `\"channels_first\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`,\n or `(..., channels, target_height, target_width)`,\n in `\"channels_first\"` format.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n **Note:** This layer is safe to use inside a `tf.data` pipeline\n (independently of which backend you're using).\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n \"\"\"\n\n def __init__(self, height, width, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.height = height\n self.width = width\n self.data_format = backend.standardize_data_format(data_format)\n\n def call(self, inputs):\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n else:\n init_height = inputs.shape[-3]\n init_width = inputs.shape[-2]\n\n if init_height is None or init_width is None:\n # Dynamic size case. TODO.\n raise ValueError(\n \"At this time, CenterCrop can only \"\n \"process images with a static spatial \"\n f\"shape. Received: inputs.shape={inputs.shape}\"\n )\n\n h_diff = init_height - self.height\n w_diff = init_width - self.width\n\n h_start = int(h_diff / 2)\n w_start = int(w_diff / 2)\n\n if h_diff >= 0 and w_diff >= 0:\n if len(inputs.shape) == 4:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n elif len(inputs.shape) == 3:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n\n return image_utils.smart_resize(\n inputs,\n [self.height, self.width],\n data_format=self.data_format,\n backend_module=self.backend,\n )\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n input_shape[2] = self.width\n else:\n input_shape[2] = self.height\n input_shape[3] = self.width\n else:\n if self.data_format == \"channels_last\":\n input_shape[0] = self.height\n input_shape[1] = self.width\n else:\n input_shape[1] = self.height\n input_shape[2] = self.width\n return tuple(input_shape)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n \"height\": self.height,\n \"width\": self.width,\n \"data_format\": self.data_format,\n }\n return {**base_config, **config}\n", "path": "keras_core/layers/preprocessing/center_crop.py"}], "after_files": [{"content": "from keras_core import backend\nfrom keras_core.api_export import keras_core_export\nfrom keras_core.layers.preprocessing.tf_data_layer import TFDataLayer\nfrom keras_core.utils import image_utils\n\n\n@keras_core_export(\"keras_core.layers.CenterCrop\")\nclass CenterCrop(TFDataLayer):\n \"\"\"A preprocessing layer which crops images.\n\n This layers crops the central portion of the images to a target size. If an\n image is smaller than the target size, it will be resized and cropped\n so as to return the largest possible window in the image that matches\n the target aspect ratio.\n\n Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format,\n or `(..., channels, height, width)`, in `\"channels_first\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`,\n or `(..., channels, target_height, target_width)`,\n in `\"channels_first\"` format.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n **Note:** This layer is safe to use inside a `tf.data` pipeline\n (independently of which backend you're using).\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n \"\"\"\n\n def __init__(self, height, width, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.height = height\n self.width = width\n self.data_format = backend.standardize_data_format(data_format)\n\n def call(self, inputs):\n inputs = self.backend.cast(inputs, self.compute_dtype)\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n else:\n init_height = inputs.shape[-3]\n init_width = inputs.shape[-2]\n\n if init_height is None or init_width is None:\n # Dynamic size case. TODO.\n raise ValueError(\n \"At this time, CenterCrop can only \"\n \"process images with a static spatial \"\n f\"shape. Received: inputs.shape={inputs.shape}\"\n )\n\n h_diff = init_height - self.height\n w_diff = init_width - self.width\n\n h_start = int(h_diff / 2)\n w_start = int(w_diff / 2)\n\n if h_diff >= 0 and w_diff >= 0:\n if len(inputs.shape) == 4:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n elif len(inputs.shape) == 3:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n\n return image_utils.smart_resize(\n inputs,\n [self.height, self.width],\n data_format=self.data_format,\n backend_module=self.backend,\n )\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n input_shape[2] = self.width\n else:\n input_shape[2] = self.height\n input_shape[3] = self.width\n else:\n if self.data_format == \"channels_last\":\n input_shape[0] = self.height\n input_shape[1] = self.width\n else:\n input_shape[1] = self.height\n input_shape[2] = self.width\n return tuple(input_shape)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n \"height\": self.height,\n \"width\": self.width,\n \"data_format\": self.data_format,\n }\n return {**base_config, **config}\n", "path": "keras_core/layers/preprocessing/center_crop.py"}]} |
gh_patches_debug_1406 | rasdani/github-patches | git_diff | streamlit__streamlit-6828 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`
### Checklist
- [x] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`.
In the past I submitted a similar issue for the left-side label (see https://github.com/streamlit/streamlit/issues/5898); now it is the right-side label that is misbehaving.
### Reproducible Code Example
[](https://issues.streamlitapp.com/?issue=gh-6297)
```Python
import streamlit as st
st.title("Right-side label of slider and select_slider overflows when inside expander")
with st.expander('Example st.expander'):
single_value = st.slider(
label='Example st.slider',
min_value=9_500_000,
max_value=10_000_000,
value=10_000_000
)
first_value,last_value = st.slider(
label='Example st.slider (range mode)',
min_value=9_500_000,
max_value=10_000_000,
value=(9_500_000,10_000_000)
)
single_value = st.select_slider(
label='Example st.select_slider',
options=['Maradona','Ronaldo','Pele','This is a very, very long label'],
value='This is a very, very long label'
)
first_value,last_value = st.select_slider(
label='Example st.select_slider (range mode)',
options=['Maradona','Ronaldo','Pele','This is a very, very long label'],
value=['Maradona','This is a very, very long label']
)
```
### Steps To Reproduce
1. Run the reproducible code example
2. Open the expander and see that all the right side labels are overflowing
### Expected Behavior
Labels should not overflow beyond the widgets width.
### Current Behavior
_No response_
### Is this a regression?
- [X] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.20.0
- Python version: 3.11
- Operating System: macOS
- Browser: Brave
- Virtual environment: conda
### Additional Information
_No response_
### Are you willing to submit a PR?
- [ ] Yes, I am willing to submit a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_select_slider.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import numpy as np
16 import pandas as pd
17
18 import streamlit as st
19 from streamlit import runtime
20
21 w1 = st.select_slider(
22 "Label 1",
23 value=("orange", "blue"),
24 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
25 )
26 st.write("Value 1:", w1)
27
28 w2 = st.select_slider(
29 "Label 2",
30 options=np.array([1, 2, 3, 4, 5]),
31 )
32 st.write("Value 2:", w2)
33
34 w3 = st.select_slider(
35 "Label 3",
36 value=[2, 5],
37 options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),
38 )
39 st.write("Value 3:", w3)
40
41 w4 = st.select_slider(
42 "Label 4",
43 value=5,
44 options=pd.DataFrame(
45 {
46 "first column": [1, 2, 3, 4, 5],
47 "second column": [10, 20, 30, 40, 50],
48 }
49 ),
50 )
51 st.write("Value 4:", w4)
52
53 w5 = st.select_slider(
54 "Label 5",
55 value=("orange", "blue"),
56 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
57 disabled=True,
58 )
59 st.write("Value 5:", w5)
60
61 w6 = st.select_slider(
62 "Label 6",
63 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
64 label_visibility="hidden",
65 )
66
67 st.write("Value 6:", w6)
68
69
70 w7 = st.select_slider(
71 "Label 7",
72 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
73 label_visibility="collapsed",
74 )
75
76 st.write("Value 7:", w7)
77
78 if runtime.exists():
79
80 def on_change():
81 st.session_state.select_slider_changed = True
82
83 st.select_slider(
84 "Label 8",
85 options=np.array([1, 2, 3, 4, 5]),
86 key="select_slider8",
87 on_change=on_change,
88 )
89 st.write("Value 8:", st.session_state.select_slider8)
90 st.write("Select slider changed:", "select_slider_changed" in st.session_state)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_select_slider.py b/e2e/scripts/st_select_slider.py
--- a/e2e/scripts/st_select_slider.py
+++ b/e2e/scripts/st_select_slider.py
@@ -88,3 +88,12 @@
)
st.write("Value 8:", st.session_state.select_slider8)
st.write("Select slider changed:", "select_slider_changed" in st.session_state)
+
+with st.expander("Expander", expanded=True):
+ w9 = st.select_slider(
+ label="Label 9",
+ options=["foo", "bar", "baz", "This is a very, very long option"],
+ value="This is a very, very long option",
+ )
+
+ st.write("Value 9:", w9)
| {"golden_diff": "diff --git a/e2e/scripts/st_select_slider.py b/e2e/scripts/st_select_slider.py\n--- a/e2e/scripts/st_select_slider.py\n+++ b/e2e/scripts/st_select_slider.py\n@@ -88,3 +88,12 @@\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n+\n+with st.expander(\"Expander\", expanded=True):\n+ w9 = st.select_slider(\n+ label=\"Label 9\",\n+ options=[\"foo\", \"bar\", \"baz\", \"This is a very, very long option\"],\n+ value=\"This is a very, very long option\",\n+ )\n+\n+ st.write(\"Value 9:\", w9)\n", "issue": "Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`\n### Checklist\r\n\r\n- [x] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nRight-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`.\r\n\r\nIn the past I submitted a similar issue for the left-side label (see https://github.com/streamlit/streamlit/issues/5898); now it is the right-side label that is misbehaving.\r\n\r\n### Reproducible Code Example\r\n\r\n[](https://issues.streamlitapp.com/?issue=gh-6297)\r\n\r\n```Python\r\nimport streamlit as st\r\n\r\nst.title(\"Right-side label of slider and select_slider overflows when inside expander\")\r\n\r\nwith st.expander('Example st.expander'):\r\n\r\n single_value = st.slider(\r\n label='Example st.slider',\r\n min_value=9_500_000,\r\n max_value=10_000_000,\r\n value=10_000_000\r\n )\r\n\r\n first_value,last_value = st.slider(\r\n label='Example st.slider (range mode)',\r\n min_value=9_500_000,\r\n max_value=10_000_000,\r\n value=(9_500_000,10_000_000)\r\n )\r\n\r\n single_value = st.select_slider(\r\n label='Example st.select_slider',\r\n options=['Maradona','Ronaldo','Pele','This is a very, very long label'],\r\n value='This is a very, very long label'\r\n )\r\n\r\n first_value,last_value = st.select_slider(\r\n label='Example st.select_slider (range mode)',\r\n options=['Maradona','Ronaldo','Pele','This is a very, very long label'],\r\n value=['Maradona','This is a very, very long label']\r\n )\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\n1. Run the reproducible code example\r\n2. Open the expander and see that all the right side labels are overflowing\r\n\r\n### Expected Behavior\r\n\r\nLabels should not overflow beyond the widgets width.\r\n\r\n### Current Behavior\r\n\r\n_No response_\r\n\r\n### Is this a regression?\r\n\r\n- [X] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.20.0\r\n- Python version: 3.11\r\n- Operating System: macOS\r\n- Browser: Brave\r\n- Virtual environment: conda\r\n\r\n### Additional Information\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import runtime\n\nw1 = st.select_slider(\n \"Label 1\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n)\nst.write(\"Value 1:\", w1)\n\nw2 = st.select_slider(\n \"Label 2\",\n options=np.array([1, 2, 3, 4, 5]),\n)\nst.write(\"Value 2:\", w2)\n\nw3 = st.select_slider(\n \"Label 3\",\n value=[2, 5],\n options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),\n)\nst.write(\"Value 3:\", w3)\n\nw4 = st.select_slider(\n \"Label 4\",\n value=5,\n options=pd.DataFrame(\n {\n \"first column\": [1, 2, 3, 4, 5],\n \"second column\": [10, 20, 30, 40, 50],\n }\n ),\n)\nst.write(\"Value 4:\", w4)\n\nw5 = st.select_slider(\n \"Label 5\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n disabled=True,\n)\nst.write(\"Value 5:\", w5)\n\nw6 = st.select_slider(\n \"Label 6\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"hidden\",\n)\n\nst.write(\"Value 6:\", w6)\n\n\nw7 = st.select_slider(\n \"Label 7\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"collapsed\",\n)\n\nst.write(\"Value 7:\", w7)\n\nif runtime.exists():\n\n def on_change():\n st.session_state.select_slider_changed = True\n\n st.select_slider(\n \"Label 8\",\n options=np.array([1, 2, 3, 4, 5]),\n key=\"select_slider8\",\n on_change=on_change,\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n", "path": "e2e/scripts/st_select_slider.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import runtime\n\nw1 = st.select_slider(\n \"Label 1\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n)\nst.write(\"Value 1:\", w1)\n\nw2 = st.select_slider(\n \"Label 2\",\n options=np.array([1, 2, 3, 4, 5]),\n)\nst.write(\"Value 2:\", w2)\n\nw3 = st.select_slider(\n \"Label 3\",\n value=[2, 5],\n options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),\n)\nst.write(\"Value 3:\", w3)\n\nw4 = st.select_slider(\n \"Label 4\",\n value=5,\n options=pd.DataFrame(\n {\n \"first column\": [1, 2, 3, 4, 5],\n \"second column\": [10, 20, 30, 40, 50],\n }\n ),\n)\nst.write(\"Value 4:\", w4)\n\nw5 = st.select_slider(\n \"Label 5\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n disabled=True,\n)\nst.write(\"Value 5:\", w5)\n\nw6 = st.select_slider(\n \"Label 6\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"hidden\",\n)\n\nst.write(\"Value 6:\", w6)\n\n\nw7 = st.select_slider(\n \"Label 7\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"collapsed\",\n)\n\nst.write(\"Value 7:\", w7)\n\nif runtime.exists():\n\n def on_change():\n st.session_state.select_slider_changed = True\n\n st.select_slider(\n \"Label 8\",\n options=np.array([1, 2, 3, 4, 5]),\n key=\"select_slider8\",\n on_change=on_change,\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n\nwith st.expander(\"Expander\", expanded=True):\n w9 = st.select_slider(\n label=\"Label 9\",\n options=[\"foo\", \"bar\", \"baz\", \"This is a very, very long option\"],\n value=\"This is a very, very long option\",\n )\n\n st.write(\"Value 9:\", w9)\n", "path": "e2e/scripts/st_select_slider.py"}]} |
gh_patches_debug_1407 | rasdani/github-patches | git_diff | apluslms__a-plus-1310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users should not be able to edit content hierarchy
Typically the A+ course content structure is configured from a JSON file generated by gitmanager. However, the teachers have possibility to edit the attributes also in the Edit course / Content view, including the parent learning object. By editing the parent selection it is possible to create a circular reference loop between two learning objects pointing to each other as a parent, that leads to excessive number of database operations, and as a result, leading the system to become unusable due to heavy database load.
Easy approach would be to just disable the possibility to modify the parent selection. Later, it might be useful to think more thoroughly how important it is allow editing the content structure in this view, while the main form of course configuration should be through the JSON configuration.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edit_course/exercise_forms.py`
Content:
```
1 import logging
2 from typing import Any, Dict, List
3
4 from django import forms
5 from django.utils.translation import gettext_lazy as _
6
7 from course.models import CourseModule, LearningObjectCategory
8 from exercise.models import LearningObject, CourseChapter, BaseExercise, \
9 LTIExercise, StaticExercise, ExerciseWithAttachment, RevealRule, \
10 LTI1p3Exercise
11 from lib.widgets import DateTimeLocalInput
12 from .course_forms import FieldsetModelForm
13
14 from exercise.exercisecollection_models import ExerciseCollection
15
16 logger = logging.getLogger("aplus.exercise")
17
18 COMMON_FIELDS = [
19 'status',
20 'audience',
21 'category',
22 'course_module',
23 'parent',
24 'order',
25 'url',
26 ]
27 SERVICE_FIELDS = [
28 'service_url',
29 'name',
30 'description',
31 ]
32 EXERCISE_FIELDS = [
33 'max_submissions',
34 'max_points',
35 'difficulty',
36 'points_to_pass',
37 'allow_assistant_viewing',
38 'allow_assistant_grading',
39 'min_group_size',
40 'max_group_size',
41 'model_answers',
42 'templates',
43 'grading_mode',
44 ]
45
46
47 class LearningObjectMixin:
48
49 def init_fields(self, **kwargs):
50 self.lobject = kwargs.get('instance')
51 self.fields["category"].queryset = LearningObjectCategory.objects.filter(
52 course_instance=self.lobject.course_instance)
53 self.fields["course_module"].queryset = CourseModule.objects.filter(
54 course_instance=self.lobject.course_instance)
55 self.fields["parent"].queryset = LearningObject.objects\
56 .exclude(id=self.lobject.id)\
57 .filter(course_module=self.lobject.course_module)
58
59 @property
60 def remote_service_head(self):
61 return True
62
63 def get_hierarchy_fieldset(self):
64 return { 'legend':_('HIERARCHY'), 'fields':self.get_fields('status',
65 'audience', 'category','course_module','parent','order','url') }
66
67 def get_content_fieldset(self, *add):
68 return { 'legend':_('CONTENT'), 'fields':self.get_fields('name',
69 'description', *add) }
70
71
72 class CourseChapterForm(LearningObjectMixin, FieldsetModelForm):
73
74 class Meta:
75 model = CourseChapter
76 fields = COMMON_FIELDS + SERVICE_FIELDS + [
77 'use_wide_column',
78 'generate_table_of_contents'
79 ]
80
81 def __init__(self, *args, **kwargs):
82 super().__init__(*args, **kwargs)
83 self.init_fields(**kwargs)
84
85 def get_fieldsets(self):
86 return [
87 self.get_hierarchy_fieldset(),
88 self.get_content_fieldset(
89 'use_wide_column', 'generate_table_of_contents'),
90 ]
91
92
93 class RevealRuleForm(FieldsetModelForm):
94 # This form is only used internally by BaseExerciseForm.
95
96 class Meta:
97 model = RevealRule
98 fields = ['trigger', 'delay_minutes', 'time', 'currently_revealed']
99 widgets = {'time': DateTimeLocalInput}
100
101 def __init__(self, *args: Any, **kwargs: Any) -> None:
102 super().__init__(*args, **kwargs)
103 self.fields['trigger'].widget.attrs['data-trigger'] = True
104 # Visibility rules for the form fields. Each of the following fields is
105 # only visible when one of their specified values is selected from the
106 # trigger dropdown. See edit_model.html.
107 self.fields['currently_revealed'].widget.attrs['data-visible-triggers'] = [
108 RevealRule.TRIGGER.MANUAL.value,
109 ]
110 self.fields['time'].widget.attrs['data-visible-triggers'] = [
111 RevealRule.TRIGGER.TIME.value,
112 ]
113 self.fields['delay_minutes'].widget.attrs['data-visible-triggers'] = [
114 RevealRule.TRIGGER.DEADLINE.value,
115 RevealRule.TRIGGER.DEADLINE_ALL.value,
116 RevealRule.TRIGGER.DEADLINE_OR_FULL_POINTS.value,
117 ]
118
119 def clean(self) -> Dict[str, Any]:
120 result = super().clean()
121 errors = {}
122 trigger = self.cleaned_data.get('trigger')
123 if trigger == RevealRule.TRIGGER.TIME:
124 time = self.cleaned_data.get('time')
125 if time is None:
126 errors['time'] = _(
127 'ERROR_REQUIRED_WITH_SELECTED_TRIGGER'
128 )
129 if errors:
130 raise forms.ValidationError(errors)
131 return result
132
133
134 class BaseExerciseForm(LearningObjectMixin, FieldsetModelForm):
135
136 class Meta:
137 model = BaseExercise
138 fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS
139
140 def __init__(self, *args: Any, **kwargs: Any) -> None:
141 super().__init__(*args, **kwargs)
142 self.init_fields(**kwargs)
143
144 # This form contains two embedded RevealRuleForms.
145 self.submission_feedback_form = RevealRuleForm(
146 data=kwargs.get('data'),
147 instance=self.instance.active_submission_feedback_reveal_rule,
148 prefix='submission_feedback',
149 )
150 self.model_solutions_form = RevealRuleForm(
151 data=kwargs.get('data'),
152 instance=self.instance.active_model_solutions_reveal_rule,
153 prefix='model_solutions',
154 )
155
156 def get_fieldsets(self) -> List[Dict[str, Any]]:
157 return [
158 self.get_hierarchy_fieldset(),
159 self.get_content_fieldset('model_answers', 'templates'),
160 { 'legend':_('GRADING'), 'fields':self.get_fields('max_submissions',
161 'max_points','points_to_pass', 'difficulty',
162 'allow_assistant_viewing','allow_assistant_grading','grading_mode') },
163 { 'legend':_('GROUPS'), 'fields':self.get_fields('min_group_size',
164 'max_group_size') },
165 { 'legend':_('REVEAL_SUBMISSION_FEEDBACK'), 'fields':self.submission_feedback_form },
166 { 'legend':_('REVEAL_MODEL_SOLUTIONS'), 'fields':self.model_solutions_form },
167 ]
168
169 def is_valid(self) -> bool:
170 return (
171 super().is_valid()
172 and self.submission_feedback_form.is_valid()
173 and self.model_solutions_form.is_valid()
174 )
175
176 def save(self, *args: Any, **kwargs: Any) -> Any:
177 # Save the reveal rules only if they have been changed.
178 # If they were not changed, we can keep using the default rule and
179 # there's no need to save a new RevealRule.
180 if self.submission_feedback_form.has_changed():
181 self.instance.submission_feedback_reveal_rule = (
182 self.submission_feedback_form.save(*args, **kwargs)
183 )
184 if self.model_solutions_form.has_changed():
185 self.instance.model_solutions_reveal_rule = (
186 self.model_solutions_form.save(*args, **kwargs)
187 )
188 return super().save(*args, **kwargs)
189
190
191 class LTIExerciseForm(BaseExerciseForm):
192
193 class Meta:
194 model = LTIExercise
195 fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [
196 'lti_service',
197 'context_id',
198 'resource_link_id',
199 'resource_link_title',
200 'aplus_get_and_post',
201 'open_in_iframe',
202 ]
203
204 @property
205 def remote_service_head(self):
206 return False
207
208 def get_content_fieldset(self, *add):
209 return super().get_content_fieldset('lti_service','context_id',
210 'resource_link_id','resource_link_title',
211 'aplus_get_and_post','open_in_iframe','service_url')
212
213
214 class LTI1p3ExerciseForm(BaseExerciseForm):
215
216 class Meta:
217 model = LTI1p3Exercise
218 fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [
219 'lti_service',
220 'custom',
221 'open_in_iframe',
222 ]
223
224 @property
225 def remote_service_head(self) -> bool:
226 return False
227
228 def get_content_fieldset(self, *add) -> Dict[str, Any]:
229 return super().get_content_fieldset('lti_service', 'custom', 'open_in_iframe')
230
231
232 class ExerciseWithAttachmentForm(BaseExerciseForm):
233 multipart = True
234
235 class Meta:
236 model = ExerciseWithAttachment
237 fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [
238 'content',
239 'files_to_submit',
240 'attachment',
241 ]
242
243 def get_content_fieldset(self, *add):
244 return super().get_content_fieldset(
245 'content', 'files_to_submit', 'attachment')
246
247
248 class StaticExerciseForm(BaseExerciseForm):
249
250 class Meta:
251 model = StaticExercise
252 fields = COMMON_FIELDS + EXERCISE_FIELDS + [
253 'name',
254 'description',
255 'exercise_page_content',
256 'submission_page_content',
257 ]
258
259 @property
260 def remote_service_head(self):
261 return False
262
263 def get_content_fieldset(self, *add):
264 return super().get_content_fieldset(
265 'exercise_page_content', 'submission_page_content')
266
267 class ExerciseCollectionExerciseForm(BaseExerciseForm):
268
269 class Meta:
270 model = ExerciseCollection
271 fields = COMMON_FIELDS + EXERCISE_FIELDS + SERVICE_FIELDS + \
272 ['target_category']
273
274 def get_content_fieldset(self, *add):
275 return super().get_content_fieldset('target_category')
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edit_course/exercise_forms.py b/edit_course/exercise_forms.py
--- a/edit_course/exercise_forms.py
+++ b/edit_course/exercise_forms.py
@@ -55,6 +55,8 @@
self.fields["parent"].queryset = LearningObject.objects\
.exclude(id=self.lobject.id)\
.filter(course_module=self.lobject.course_module)
+ self.fields['parent'].widget.attrs.update(
+ {'readonly': True, 'disabled': True})
@property
def remote_service_head(self):
| {"golden_diff": "diff --git a/edit_course/exercise_forms.py b/edit_course/exercise_forms.py\n--- a/edit_course/exercise_forms.py\n+++ b/edit_course/exercise_forms.py\n@@ -55,6 +55,8 @@\n self.fields[\"parent\"].queryset = LearningObject.objects\\\n .exclude(id=self.lobject.id)\\\n .filter(course_module=self.lobject.course_module)\n+ self.fields['parent'].widget.attrs.update(\n+ {'readonly': True, 'disabled': True})\n \n @property\n def remote_service_head(self):\n", "issue": "Users should not be able to edit content hierarchy\nTypically the A+ course content structure is configured from a JSON file generated by gitmanager. However, the teachers have possibility to edit the attributes also in the Edit course / Content view, including the parent learning object. By editing the parent selection it is possible to create a circular reference loop between two learning objects pointing to each other as a parent, that leads to excessive number of database operations, and as a result, leading the system to become unusable due to heavy database load.\r\n\r\nEasy approach would be to just disable the possibility to modify the parent selection. Later, it might be useful to think more thoroughly how important it is allow editing the content structure in this view, while the main form of course configuration should be through the JSON configuration.\n", "before_files": [{"content": "import logging\nfrom typing import Any, Dict, List\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom course.models import CourseModule, LearningObjectCategory\nfrom exercise.models import LearningObject, CourseChapter, BaseExercise, \\\n LTIExercise, StaticExercise, ExerciseWithAttachment, RevealRule, \\\n LTI1p3Exercise\nfrom lib.widgets import DateTimeLocalInput\nfrom .course_forms import FieldsetModelForm\n\nfrom exercise.exercisecollection_models import ExerciseCollection\n\nlogger = logging.getLogger(\"aplus.exercise\")\n\nCOMMON_FIELDS = [\n 'status',\n 'audience',\n 'category',\n 'course_module',\n 'parent',\n 'order',\n 'url',\n]\nSERVICE_FIELDS = [\n 'service_url',\n 'name',\n 'description',\n]\nEXERCISE_FIELDS = [\n 'max_submissions',\n 'max_points',\n 'difficulty',\n 'points_to_pass',\n 'allow_assistant_viewing',\n 'allow_assistant_grading',\n 'min_group_size',\n 'max_group_size',\n 'model_answers',\n 'templates',\n 'grading_mode',\n]\n\n\nclass LearningObjectMixin:\n\n def init_fields(self, **kwargs):\n self.lobject = kwargs.get('instance')\n self.fields[\"category\"].queryset = LearningObjectCategory.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"course_module\"].queryset = CourseModule.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"parent\"].queryset = LearningObject.objects\\\n .exclude(id=self.lobject.id)\\\n .filter(course_module=self.lobject.course_module)\n\n @property\n def remote_service_head(self):\n return True\n\n def get_hierarchy_fieldset(self):\n return { 'legend':_('HIERARCHY'), 'fields':self.get_fields('status',\n 'audience', 'category','course_module','parent','order','url') }\n\n def get_content_fieldset(self, *add):\n return { 'legend':_('CONTENT'), 'fields':self.get_fields('name',\n 'description', *add) }\n\n\nclass CourseChapterForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = CourseChapter\n fields = COMMON_FIELDS + SERVICE_FIELDS + [\n 'use_wide_column',\n 'generate_table_of_contents'\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n def get_fieldsets(self):\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset(\n 'use_wide_column', 'generate_table_of_contents'),\n ]\n\n\nclass RevealRuleForm(FieldsetModelForm):\n # This form is only used internally by BaseExerciseForm.\n\n class Meta:\n model = RevealRule\n fields = ['trigger', 'delay_minutes', 'time', 'currently_revealed']\n widgets = {'time': DateTimeLocalInput}\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.fields['trigger'].widget.attrs['data-trigger'] = True\n # Visibility rules for the form fields. Each of the following fields is\n # only visible when one of their specified values is selected from the\n # trigger dropdown. See edit_model.html.\n self.fields['currently_revealed'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.MANUAL.value,\n ]\n self.fields['time'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.TIME.value,\n ]\n self.fields['delay_minutes'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.DEADLINE.value,\n RevealRule.TRIGGER.DEADLINE_ALL.value,\n RevealRule.TRIGGER.DEADLINE_OR_FULL_POINTS.value,\n ]\n\n def clean(self) -> Dict[str, Any]:\n result = super().clean()\n errors = {}\n trigger = self.cleaned_data.get('trigger')\n if trigger == RevealRule.TRIGGER.TIME:\n time = self.cleaned_data.get('time')\n if time is None:\n errors['time'] = _(\n 'ERROR_REQUIRED_WITH_SELECTED_TRIGGER'\n )\n if errors:\n raise forms.ValidationError(errors)\n return result\n\n\nclass BaseExerciseForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = BaseExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n # This form contains two embedded RevealRuleForms.\n self.submission_feedback_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_submission_feedback_reveal_rule,\n prefix='submission_feedback',\n )\n self.model_solutions_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_model_solutions_reveal_rule,\n prefix='model_solutions',\n )\n\n def get_fieldsets(self) -> List[Dict[str, Any]]:\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset('model_answers', 'templates'),\n { 'legend':_('GRADING'), 'fields':self.get_fields('max_submissions',\n 'max_points','points_to_pass', 'difficulty',\n 'allow_assistant_viewing','allow_assistant_grading','grading_mode') },\n { 'legend':_('GROUPS'), 'fields':self.get_fields('min_group_size',\n 'max_group_size') },\n { 'legend':_('REVEAL_SUBMISSION_FEEDBACK'), 'fields':self.submission_feedback_form },\n { 'legend':_('REVEAL_MODEL_SOLUTIONS'), 'fields':self.model_solutions_form },\n ]\n\n def is_valid(self) -> bool:\n return (\n super().is_valid()\n and self.submission_feedback_form.is_valid()\n and self.model_solutions_form.is_valid()\n )\n\n def save(self, *args: Any, **kwargs: Any) -> Any:\n # Save the reveal rules only if they have been changed.\n # If they were not changed, we can keep using the default rule and\n # there's no need to save a new RevealRule.\n if self.submission_feedback_form.has_changed():\n self.instance.submission_feedback_reveal_rule = (\n self.submission_feedback_form.save(*args, **kwargs)\n )\n if self.model_solutions_form.has_changed():\n self.instance.model_solutions_reveal_rule = (\n self.model_solutions_form.save(*args, **kwargs)\n )\n return super().save(*args, **kwargs)\n\n\nclass LTIExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTIExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'context_id',\n 'resource_link_id',\n 'resource_link_title',\n 'aplus_get_and_post',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('lti_service','context_id',\n 'resource_link_id','resource_link_title',\n 'aplus_get_and_post','open_in_iframe','service_url')\n\n\nclass LTI1p3ExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTI1p3Exercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'custom',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self) -> bool:\n return False\n\n def get_content_fieldset(self, *add) -> Dict[str, Any]:\n return super().get_content_fieldset('lti_service', 'custom', 'open_in_iframe')\n\n\nclass ExerciseWithAttachmentForm(BaseExerciseForm):\n multipart = True\n\n class Meta:\n model = ExerciseWithAttachment\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'content',\n 'files_to_submit',\n 'attachment',\n ]\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'content', 'files_to_submit', 'attachment')\n\n\nclass StaticExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = StaticExercise\n fields = COMMON_FIELDS + EXERCISE_FIELDS + [\n 'name',\n 'description',\n 'exercise_page_content',\n 'submission_page_content',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'exercise_page_content', 'submission_page_content')\n\nclass ExerciseCollectionExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = ExerciseCollection\n fields = COMMON_FIELDS + EXERCISE_FIELDS + SERVICE_FIELDS + \\\n ['target_category']\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('target_category')\n", "path": "edit_course/exercise_forms.py"}], "after_files": [{"content": "import logging\nfrom typing import Any, Dict, List\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom course.models import CourseModule, LearningObjectCategory\nfrom exercise.models import LearningObject, CourseChapter, BaseExercise, \\\n LTIExercise, StaticExercise, ExerciseWithAttachment, RevealRule, \\\n LTI1p3Exercise\nfrom lib.widgets import DateTimeLocalInput\nfrom .course_forms import FieldsetModelForm\n\nfrom exercise.exercisecollection_models import ExerciseCollection\n\nlogger = logging.getLogger(\"aplus.exercise\")\n\nCOMMON_FIELDS = [\n 'status',\n 'audience',\n 'category',\n 'course_module',\n 'parent',\n 'order',\n 'url',\n]\nSERVICE_FIELDS = [\n 'service_url',\n 'name',\n 'description',\n]\nEXERCISE_FIELDS = [\n 'max_submissions',\n 'max_points',\n 'difficulty',\n 'points_to_pass',\n 'allow_assistant_viewing',\n 'allow_assistant_grading',\n 'min_group_size',\n 'max_group_size',\n 'model_answers',\n 'templates',\n 'grading_mode',\n]\n\n\nclass LearningObjectMixin:\n\n def init_fields(self, **kwargs):\n self.lobject = kwargs.get('instance')\n self.fields[\"category\"].queryset = LearningObjectCategory.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"course_module\"].queryset = CourseModule.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"parent\"].queryset = LearningObject.objects\\\n .exclude(id=self.lobject.id)\\\n .filter(course_module=self.lobject.course_module)\n self.fields['parent'].widget.attrs.update(\n {'readonly': True, 'disabled': True})\n\n @property\n def remote_service_head(self):\n return True\n\n def get_hierarchy_fieldset(self):\n return { 'legend':_('HIERARCHY'), 'fields':self.get_fields('status',\n 'audience', 'category','course_module','parent','order','url') }\n\n def get_content_fieldset(self, *add):\n return { 'legend':_('CONTENT'), 'fields':self.get_fields('name',\n 'description', *add) }\n\n\nclass CourseChapterForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = CourseChapter\n fields = COMMON_FIELDS + SERVICE_FIELDS + [\n 'use_wide_column',\n 'generate_table_of_contents'\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n def get_fieldsets(self):\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset(\n 'use_wide_column', 'generate_table_of_contents'),\n ]\n\n\nclass RevealRuleForm(FieldsetModelForm):\n # This form is only used internally by BaseExerciseForm.\n\n class Meta:\n model = RevealRule\n fields = ['trigger', 'delay_minutes', 'time', 'currently_revealed']\n widgets = {'time': DateTimeLocalInput}\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.fields['trigger'].widget.attrs['data-trigger'] = True\n # Visibility rules for the form fields. Each of the following fields is\n # only visible when one of their specified values is selected from the\n # trigger dropdown. See edit_model.html.\n self.fields['currently_revealed'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.MANUAL.value,\n ]\n self.fields['time'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.TIME.value,\n ]\n self.fields['delay_minutes'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.DEADLINE.value,\n RevealRule.TRIGGER.DEADLINE_ALL.value,\n RevealRule.TRIGGER.DEADLINE_OR_FULL_POINTS.value,\n ]\n\n def clean(self) -> Dict[str, Any]:\n result = super().clean()\n errors = {}\n trigger = self.cleaned_data.get('trigger')\n if trigger == RevealRule.TRIGGER.TIME:\n time = self.cleaned_data.get('time')\n if time is None:\n errors['time'] = _(\n 'ERROR_REQUIRED_WITH_SELECTED_TRIGGER'\n )\n if errors:\n raise forms.ValidationError(errors)\n return result\n\n\nclass BaseExerciseForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = BaseExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n # This form contains two embedded RevealRuleForms.\n self.submission_feedback_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_submission_feedback_reveal_rule,\n prefix='submission_feedback',\n )\n self.model_solutions_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_model_solutions_reveal_rule,\n prefix='model_solutions',\n )\n\n def get_fieldsets(self) -> List[Dict[str, Any]]:\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset('model_answers', 'templates'),\n { 'legend':_('GRADING'), 'fields':self.get_fields('max_submissions',\n 'max_points','points_to_pass', 'difficulty',\n 'allow_assistant_viewing','allow_assistant_grading','grading_mode') },\n { 'legend':_('GROUPS'), 'fields':self.get_fields('min_group_size',\n 'max_group_size') },\n { 'legend':_('REVEAL_SUBMISSION_FEEDBACK'), 'fields':self.submission_feedback_form },\n { 'legend':_('REVEAL_MODEL_SOLUTIONS'), 'fields':self.model_solutions_form },\n ]\n\n def is_valid(self) -> bool:\n return (\n super().is_valid()\n and self.submission_feedback_form.is_valid()\n and self.model_solutions_form.is_valid()\n )\n\n def save(self, *args: Any, **kwargs: Any) -> Any:\n # Save the reveal rules only if they have been changed.\n # If they were not changed, we can keep using the default rule and\n # there's no need to save a new RevealRule.\n if self.submission_feedback_form.has_changed():\n self.instance.submission_feedback_reveal_rule = (\n self.submission_feedback_form.save(*args, **kwargs)\n )\n if self.model_solutions_form.has_changed():\n self.instance.model_solutions_reveal_rule = (\n self.model_solutions_form.save(*args, **kwargs)\n )\n return super().save(*args, **kwargs)\n\n\nclass LTIExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTIExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'context_id',\n 'resource_link_id',\n 'resource_link_title',\n 'aplus_get_and_post',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('lti_service','context_id',\n 'resource_link_id','resource_link_title',\n 'aplus_get_and_post','open_in_iframe','service_url')\n\n\nclass LTI1p3ExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTI1p3Exercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'custom',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self) -> bool:\n return False\n\n def get_content_fieldset(self, *add) -> Dict[str, Any]:\n return super().get_content_fieldset('lti_service', 'custom', 'open_in_iframe')\n\n\nclass ExerciseWithAttachmentForm(BaseExerciseForm):\n multipart = True\n\n class Meta:\n model = ExerciseWithAttachment\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'content',\n 'files_to_submit',\n 'attachment',\n ]\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'content', 'files_to_submit', 'attachment')\n\n\nclass StaticExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = StaticExercise\n fields = COMMON_FIELDS + EXERCISE_FIELDS + [\n 'name',\n 'description',\n 'exercise_page_content',\n 'submission_page_content',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'exercise_page_content', 'submission_page_content')\n\nclass ExerciseCollectionExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = ExerciseCollection\n fields = COMMON_FIELDS + EXERCISE_FIELDS + SERVICE_FIELDS + \\\n ['target_category']\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('target_category')\n", "path": "edit_course/exercise_forms.py"}]} |
gh_patches_debug_1408 | rasdani/github-patches | git_diff | conan-io__conan-center-index-7774 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[request] sigslot/1.2.1
### Package Details
* Package Name/Version: **sigslot/1.2.1**
* Changelog: **https://github.com/palacaze/sigslot/releases/tag/v1.2.1**
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/sigslot/all/conanfile.py`
Content:
```
1 import os
2
3 from conans import ConanFile, tools
4 from conans.errors import ConanInvalidConfiguration
5
6
7 class SigslotConan(ConanFile):
8 name = "sigslot"
9 description = "Sigslot is a header-only, thread safe implementation of signal-slots for C++."
10 topics = ("signal", "slot", "c++14", "header-only")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://github.com/palacaze/sigslot"
13 license = "MIT"
14 settings = "compiler", "os"
15 no_copy_source = True
16
17 @property
18 def _source_subfolder(self):
19 return "source_subfolder"
20
21 def configure(self):
22 minimal_cpp_standard = "14"
23 if self.settings.compiler.cppstd:
24 tools.check_min_cppstd(self, minimal_cpp_standard)
25 minimal_version = {
26 "gcc": "5",
27 "clang": "3.4",
28 "apple-clang": "10",
29 "Visual Studio": "15" # 14 is not supported by the library
30 }
31 compiler = str(self.settings.compiler)
32 if compiler not in minimal_version:
33 self.output.warn(
34 "%s recipe lacks information about the %s compiler standard version support" % (self.name, compiler))
35 self.output.warn(
36 "%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
37 return
38 version = tools.Version(self.settings.compiler.version)
39 if version < minimal_version[compiler]:
40 raise ConanInvalidConfiguration("%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
41
42 def source(self):
43 tools.get(**self.conan_data["sources"][self.version])
44 extracted_dir = "sigslot-" + self.version
45 os.rename(extracted_dir, self._source_subfolder)
46
47 def package(self):
48 self.copy(pattern="LICENSE", src=self._source_subfolder, dst="licenses")
49 self.copy(pattern="signal.hpp", src=os.path.join(self._source_subfolder, "include", "sigslot"), dst=os.path.join("include", "sigslot"))
50
51 def package_id(self):
52 self.info.header_only()
53
54 def package_info(self):
55 self.cpp_info.filenames["cmake_find_package"] = "PalSigslot"
56 self.cpp_info.filenames["cmake_find_package_multi"] = "PalSigslot"
57 self.cpp_info.names["cmake_find_package"] = "Pal"
58 self.cpp_info.names["cmake_find_package_multi"] = "Pal"
59
60 self.cpp_info.components["_sigslot"].libs = []
61 self.cpp_info.components["_sigslot"].names["cmake_find_package"] = "Sigslot"
62 self.cpp_info.components["_sigslot"].names["cmake_find_package_multi"] = "Sigslot"
63
64 if self.settings.os == "Linux":
65 self.cpp_info.components["_sigslot"].system_libs.append("pthread")
66 if self.settings.os == "Windows":
67 if self.settings.compiler in ("Visual Studio", "clang"):
68 self.cpp_info.components["_sigslot"].exelinkflags.append('/OPT:NOICF')
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/sigslot/all/conanfile.py b/recipes/sigslot/all/conanfile.py
--- a/recipes/sigslot/all/conanfile.py
+++ b/recipes/sigslot/all/conanfile.py
@@ -65,4 +65,4 @@
self.cpp_info.components["_sigslot"].system_libs.append("pthread")
if self.settings.os == "Windows":
if self.settings.compiler in ("Visual Studio", "clang"):
- self.cpp_info.components["_sigslot"].exelinkflags.append('/OPT:NOICF')
+ self.cpp_info.components["_sigslot"].exelinkflags.append('-OPT:NOICF')
| {"golden_diff": "diff --git a/recipes/sigslot/all/conanfile.py b/recipes/sigslot/all/conanfile.py\n--- a/recipes/sigslot/all/conanfile.py\n+++ b/recipes/sigslot/all/conanfile.py\n@@ -65,4 +65,4 @@\n self.cpp_info.components[\"_sigslot\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Windows\":\n if self.settings.compiler in (\"Visual Studio\", \"clang\"):\n- self.cpp_info.components[\"_sigslot\"].exelinkflags.append('/OPT:NOICF')\n+ self.cpp_info.components[\"_sigslot\"].exelinkflags.append('-OPT:NOICF')\n", "issue": "[request] sigslot/1.2.1\n### Package Details\r\n * Package Name/Version: **sigslot/1.2.1**\r\n * Changelog: **https://github.com/palacaze/sigslot/releases/tag/v1.2.1**\r\n\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "import os\n\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass SigslotConan(ConanFile):\n name = \"sigslot\"\n description = \"Sigslot is a header-only, thread safe implementation of signal-slots for C++.\"\n topics = (\"signal\", \"slot\", \"c++14\", \"header-only\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/palacaze/sigslot\"\n license = \"MIT\"\n settings = \"compiler\", \"os\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n minimal_cpp_standard = \"14\"\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, minimal_cpp_standard)\n minimal_version = {\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\",\n \"Visual Studio\": \"15\" # 14 is not supported by the library\n }\n compiler = str(self.settings.compiler)\n if compiler not in minimal_version:\n self.output.warn(\n \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n self.output.warn(\n \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n version = tools.Version(self.settings.compiler.version)\n if version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"sigslot-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(pattern=\"signal.hpp\", src=os.path.join(self._source_subfolder, \"include\", \"sigslot\"), dst=os.path.join(\"include\", \"sigslot\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"PalSigslot\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"PalSigslot\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Pal\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Pal\"\n\n self.cpp_info.components[\"_sigslot\"].libs = []\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package\"] = \"Sigslot\"\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package_multi\"] = \"Sigslot\"\n\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"_sigslot\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Windows\":\n if self.settings.compiler in (\"Visual Studio\", \"clang\"):\n self.cpp_info.components[\"_sigslot\"].exelinkflags.append('/OPT:NOICF')\n", "path": "recipes/sigslot/all/conanfile.py"}], "after_files": [{"content": "import os\n\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass SigslotConan(ConanFile):\n name = \"sigslot\"\n description = \"Sigslot is a header-only, thread safe implementation of signal-slots for C++.\"\n topics = (\"signal\", \"slot\", \"c++14\", \"header-only\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/palacaze/sigslot\"\n license = \"MIT\"\n settings = \"compiler\", \"os\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n minimal_cpp_standard = \"14\"\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, minimal_cpp_standard)\n minimal_version = {\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\",\n \"Visual Studio\": \"15\" # 14 is not supported by the library\n }\n compiler = str(self.settings.compiler)\n if compiler not in minimal_version:\n self.output.warn(\n \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n self.output.warn(\n \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n version = tools.Version(self.settings.compiler.version)\n if version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"sigslot-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(pattern=\"signal.hpp\", src=os.path.join(self._source_subfolder, \"include\", \"sigslot\"), dst=os.path.join(\"include\", \"sigslot\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"PalSigslot\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"PalSigslot\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Pal\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Pal\"\n\n self.cpp_info.components[\"_sigslot\"].libs = []\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package\"] = \"Sigslot\"\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package_multi\"] = \"Sigslot\"\n\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"_sigslot\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Windows\":\n if self.settings.compiler in (\"Visual Studio\", \"clang\"):\n self.cpp_info.components[\"_sigslot\"].exelinkflags.append('-OPT:NOICF')\n", "path": "recipes/sigslot/all/conanfile.py"}]} |
gh_patches_debug_1409 | rasdani/github-patches | git_diff | qutip__qutip-1390 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation of qutip shows various warnings on ubuntu 20.04.1
**Describe the bug**
Installing the qutip on ubuntu 20.04.1 shows the following warnings:
```
$ sudo apt install python3-qutip
…
Entpacken von python3-qutip (4.4.1-6build1) ...
python3-qutip (4.4.1-6build1) wird eingerichtet ...
/usr/lib/python3/dist-packages/qutip/_mkl/spmv.py:53: SyntaxWarning: "is"
with a literal. Did you mean "=="?
if x.ndim is 1:
/usr/lib/python3/dist-packages/qutip/qobjevo.py:776: SyntaxWarning: "is no
t" with a literal. Did you mean "!="?
if self.compiled and self.compiled.split()[2] is not "cte":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1045: SyntaxWarning: "is"
with a literal. Did you mean "=="?
elif op1.type is "array":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1070: SyntaxWarning: "is"
with a literal. Did you mean "=="?
elif self.ops[_set[0]].type is "string":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1079: SyntaxWarning: "is"
with a literal. Did you mean "=="?
elif self.ops[_set[0]].type is "array":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1534: SyntaxWarning: "is n
ot" with a literal. Did you mean "!="?
for key in self.__dict__ if key is not "compiled_qobjevo"}
```
**To Reproduce**
Install qutip on ubuntu 20.04.1 via `sudo apt install python3-qutip`.
**Expected behavior**
No warnings during the installation of qutip.
**Your Environment**
```
>>> qutip.about()
QuTiP: Quantum Toolbox in Python
================================
Copyright (c) QuTiP team 2011 and later.
Original developers: R. J. Johansson & P. D. Nation.
Current admin team: Alexander Pitchford, Paul D. Nation, Nathan Shammah, Shahnawaz Ahmed, Neill Lambert, and Eric Giguère.
Project Manager: Franco Nori.
Currently developed through wide collaboration. See https://github.com/qutip for details.
QuTiP Version: 4.4.1
Numpy Version: 1.17.4
Scipy Version: 1.4.1
Cython Version: 0.29.14
Matplotlib Version: 3.1.2
Python Version: 3.8.5
Number of CPUs: 2
BLAS Info: OPENBLAS
OPENMP Installed: True
INTEL MKL Ext: False
Platform Info: Linux (x86_64)
Installation path: /usr/lib/python3/dist-packages/qutip
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutip/_mkl/spmv.py`
Content:
```
1 # This file is part of QuTiP: Quantum Toolbox in Python.
2 #
3 # Copyright (c) 2011 and later, Paul D. Nation.
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met:
9 #
10 # 1. Redistributions of source code must retain the above copyright notice,
11 # this list of conditions and the following disclaimer.
12 #
13 # 2. Redistributions in binary form must reproduce the above copyright
14 # notice, this list of conditions and the following disclaimer in the
15 # documentation and/or other materials provided with the distribution.
16 #
17 # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
18 # of its contributors may be used to endorse or promote products derived
19 # from this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
24 # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 ###############################################################################
33 import numpy as np
34 import scipy.sparse as sp
35 import ctypes
36 from ctypes import POINTER,c_int,c_char,c_double, byref
37 from numpy import ctypeslib
38 import qutip.settings as qset
39 zcsrgemv = qset.mkl_lib.mkl_cspblas_zcsrgemv
40
41 def mkl_spmv(A, x):
42 """
43 sparse csr_spmv using MKL
44 """
45 (m,n) = A.shape
46
47 # Pointers to data of the matrix
48 data = A.data.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))
49 indptr = A.indptr.ctypes.data_as(POINTER(c_int))
50 indices = A.indices.ctypes.data_as(POINTER(c_int))
51
52 # Allocate output, using same conventions as input
53 if x.ndim is 1:
54 y = np.empty(m,dtype=np.complex,order='C')
55 elif x.ndim==2 and x.shape[1]==1:
56 y = np.empty((m,1),dtype=np.complex,order='C')
57 else:
58 raise Exception('Input vector must be 1D row or 2D column vector')
59
60 np_x = x.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))
61 np_y = y.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))
62 # now call MKL. This returns the answer in np_y, which points to y
63 zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data ,indptr, indices, np_x, np_y )
64 return y
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutip/_mkl/spmv.py b/qutip/_mkl/spmv.py
--- a/qutip/_mkl/spmv.py
+++ b/qutip/_mkl/spmv.py
@@ -50,7 +50,7 @@
indices = A.indices.ctypes.data_as(POINTER(c_int))
# Allocate output, using same conventions as input
- if x.ndim is 1:
+ if x.ndim == 1:
y = np.empty(m,dtype=np.complex,order='C')
elif x.ndim==2 and x.shape[1]==1:
y = np.empty((m,1),dtype=np.complex,order='C')
| {"golden_diff": "diff --git a/qutip/_mkl/spmv.py b/qutip/_mkl/spmv.py\n--- a/qutip/_mkl/spmv.py\n+++ b/qutip/_mkl/spmv.py\n@@ -50,7 +50,7 @@\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n \n # Allocate output, using same conventions as input\n- if x.ndim is 1:\n+ if x.ndim == 1:\n y = np.empty(m,dtype=np.complex,order='C')\n elif x.ndim==2 and x.shape[1]==1:\n y = np.empty((m,1),dtype=np.complex,order='C')\n", "issue": "Installation of qutip shows various warnings on ubuntu 20.04.1\n**Describe the bug**\r\nInstalling the qutip on ubuntu 20.04.1 shows the following warnings:\r\n```\r\n$ sudo apt install python3-qutip \r\n\u2026\r\nEntpacken von python3-qutip (4.4.1-6build1) ...\r\npython3-qutip (4.4.1-6build1) wird eingerichtet ...\r\n/usr/lib/python3/dist-packages/qutip/_mkl/spmv.py:53: SyntaxWarning: \"is\" \r\nwith a literal. Did you mean \"==\"?\r\n if x.ndim is 1:\r\n/usr/lib/python3/dist-packages/qutip/qobjevo.py:776: SyntaxWarning: \"is no\r\nt\" with a literal. Did you mean \"!=\"?\r\n if self.compiled and self.compiled.split()[2] is not \"cte\":\r\n/usr/lib/python3/dist-packages/qutip/qobjevo.py:1045: SyntaxWarning: \"is\" \r\nwith a literal. Did you mean \"==\"?\r\n elif op1.type is \"array\":\r\n/usr/lib/python3/dist-packages/qutip/qobjevo.py:1070: SyntaxWarning: \"is\" \r\nwith a literal. Did you mean \"==\"?\r\n elif self.ops[_set[0]].type is \"string\":\r\n/usr/lib/python3/dist-packages/qutip/qobjevo.py:1079: SyntaxWarning: \"is\" \r\nwith a literal. Did you mean \"==\"?\r\n elif self.ops[_set[0]].type is \"array\":\r\n/usr/lib/python3/dist-packages/qutip/qobjevo.py:1534: SyntaxWarning: \"is n\r\not\" with a literal. Did you mean \"!=\"?\r\n for key in self.__dict__ if key is not \"compiled_qobjevo\"}\r\n```\r\n\r\n\r\n**To Reproduce**\r\nInstall qutip on ubuntu 20.04.1 via `sudo apt install python3-qutip`.\r\n\r\n**Expected behavior**\r\nNo warnings during the installation of qutip.\r\n\r\n**Your Environment**\r\n```\r\n>>> qutip.about()\r\n\r\nQuTiP: Quantum Toolbox in Python\r\n================================\r\nCopyright (c) QuTiP team 2011 and later.\r\nOriginal developers: R. J. Johansson & P. D. Nation.\r\nCurrent admin team: Alexander Pitchford, Paul D. Nation, Nathan Shammah, Shahnawaz Ahmed, Neill Lambert, and Eric Gigu\u00e8re.\r\nProject Manager: Franco Nori.\r\nCurrently developed through wide collaboration. See https://github.com/qutip for details.\r\n\r\nQuTiP Version: 4.4.1\r\nNumpy Version: 1.17.4\r\nScipy Version: 1.4.1\r\nCython Version: 0.29.14\r\nMatplotlib Version: 3.1.2\r\nPython Version: 3.8.5\r\nNumber of CPUs: 2\r\nBLAS Info: OPENBLAS\r\nOPENMP Installed: True\r\nINTEL MKL Ext: False\r\nPlatform Info: Linux (x86_64)\r\nInstallation path: /usr/lib/python3/dist-packages/qutip\r\n```\n", "before_files": [{"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\nimport numpy as np\nimport scipy.sparse as sp\nimport ctypes\nfrom ctypes import POINTER,c_int,c_char,c_double, byref\nfrom numpy import ctypeslib\nimport qutip.settings as qset\nzcsrgemv = qset.mkl_lib.mkl_cspblas_zcsrgemv\n\ndef mkl_spmv(A, x):\n \"\"\"\n sparse csr_spmv using MKL\n \"\"\"\n (m,n) = A.shape\n\n # Pointers to data of the matrix\n data = A.data.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n indptr = A.indptr.ctypes.data_as(POINTER(c_int))\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n\n # Allocate output, using same conventions as input\n if x.ndim is 1:\n y = np.empty(m,dtype=np.complex,order='C')\n elif x.ndim==2 and x.shape[1]==1:\n y = np.empty((m,1),dtype=np.complex,order='C')\n else:\n raise Exception('Input vector must be 1D row or 2D column vector')\n \n np_x = x.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n np_y = y.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n # now call MKL. This returns the answer in np_y, which points to y\n zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data ,indptr, indices, np_x, np_y ) \n return y\n", "path": "qutip/_mkl/spmv.py"}], "after_files": [{"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\nimport numpy as np\nimport scipy.sparse as sp\nimport ctypes\nfrom ctypes import POINTER,c_int,c_char,c_double, byref\nfrom numpy import ctypeslib\nimport qutip.settings as qset\nzcsrgemv = qset.mkl_lib.mkl_cspblas_zcsrgemv\n\ndef mkl_spmv(A, x):\n \"\"\"\n sparse csr_spmv using MKL\n \"\"\"\n (m,n) = A.shape\n\n # Pointers to data of the matrix\n data = A.data.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n indptr = A.indptr.ctypes.data_as(POINTER(c_int))\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n\n # Allocate output, using same conventions as input\n if x.ndim == 1:\n y = np.empty(m,dtype=np.complex,order='C')\n elif x.ndim==2 and x.shape[1]==1:\n y = np.empty((m,1),dtype=np.complex,order='C')\n else:\n raise Exception('Input vector must be 1D row or 2D column vector')\n \n np_x = x.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n np_y = y.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n # now call MKL. This returns the answer in np_y, which points to y\n zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data ,indptr, indices, np_x, np_y ) \n return y\n", "path": "qutip/_mkl/spmv.py"}]} |
gh_patches_debug_1410 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3848 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lint takes a long time
Fix that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from os import getpid
16 from socket import gethostname
17 from time import time
18
19 # pylint: disable=wrong-import-position
20 from google.protobuf.timestamp_pb2 import Timestamp
21 from opencensus.proto.agent.common.v1 import common_pb2
22 from opencensus.proto.trace.v1 import trace_pb2
23
24 from opentelemetry.exporter.opencensus.version import (
25 __version__ as opencensusexporter_exporter_version,
26 )
27 from opentelemetry.trace import SpanKind
28 from opentelemetry.util._importlib_metadata import version
29
30 OPENTELEMETRY_VERSION = version("opentelemetry-api")
31
32
33 def proto_timestamp_from_time_ns(time_ns):
34 """Converts datetime to protobuf timestamp.
35
36 Args:
37 time_ns: Time in nanoseconds
38
39 Returns:
40 Returns protobuf timestamp.
41 """
42 ts = Timestamp()
43 if time_ns is not None:
44 # pylint: disable=no-member
45 ts.FromNanoseconds(time_ns)
46 return ts
47
48
49 # pylint: disable=no-member
50 def get_collector_span_kind(kind: SpanKind):
51 if kind is SpanKind.SERVER:
52 return trace_pb2.Span.SpanKind.SERVER
53 if kind is SpanKind.CLIENT:
54 return trace_pb2.Span.SpanKind.CLIENT
55 return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED
56
57
58 def add_proto_attribute_value(pb_attributes, key, value):
59 """Sets string, int, boolean or float value on protobuf
60 span, link or annotation attributes.
61
62 Args:
63 pb_attributes: protobuf Span's attributes property.
64 key: attribute key to set.
65 value: attribute value
66 """
67
68 if isinstance(value, bool):
69 pb_attributes.attribute_map[key].bool_value = value
70 elif isinstance(value, int):
71 pb_attributes.attribute_map[key].int_value = value
72 elif isinstance(value, str):
73 pb_attributes.attribute_map[key].string_value.value = value
74 elif isinstance(value, float):
75 pb_attributes.attribute_map[key].double_value = value
76 else:
77 pb_attributes.attribute_map[key].string_value.value = str(value)
78
79
80 # pylint: disable=no-member
81 def get_node(service_name, host_name):
82 """Generates Node message from params and system information.
83
84 Args:
85 service_name: Name of Collector service.
86 host_name: Host name.
87 """
88 return common_pb2.Node(
89 identifier=common_pb2.ProcessIdentifier(
90 host_name=gethostname() if host_name is None else host_name,
91 pid=getpid(),
92 start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),
93 ),
94 library_info=common_pb2.LibraryInfo(
95 language=common_pb2.LibraryInfo.Language.Value("PYTHON"),
96 exporter_version=opencensusexporter_exporter_version,
97 core_library_version=OPENTELEMETRY_VERSION,
98 ),
99 service_info=common_pb2.ServiceInfo(name=service_name),
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
--- a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
+++ b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
@@ -17,7 +17,9 @@
from time import time
# pylint: disable=wrong-import-position
-from google.protobuf.timestamp_pb2 import Timestamp
+from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module
+ Timestamp,
+)
from opencensus.proto.agent.common.v1 import common_pb2
from opencensus.proto.trace.v1 import trace_pb2
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py\n--- a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py\n+++ b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py\n@@ -17,7 +17,9 @@\n from time import time\n \n # pylint: disable=wrong-import-position\n-from google.protobuf.timestamp_pb2 import Timestamp\n+from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module\n+ Timestamp,\n+)\n from opencensus.proto.agent.common.v1 import common_pb2\n from opencensus.proto.trace.v1 import trace_pb2\n", "issue": "lint takes a long time\nFix that.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import getpid\nfrom socket import gethostname\nfrom time import time\n\n# pylint: disable=wrong-import-position\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom opencensus.proto.agent.common.v1 import common_pb2\nfrom opencensus.proto.trace.v1 import trace_pb2\n\nfrom opentelemetry.exporter.opencensus.version import (\n __version__ as opencensusexporter_exporter_version,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.util._importlib_metadata import version\n\nOPENTELEMETRY_VERSION = version(\"opentelemetry-api\")\n\n\ndef proto_timestamp_from_time_ns(time_ns):\n \"\"\"Converts datetime to protobuf timestamp.\n\n Args:\n time_ns: Time in nanoseconds\n\n Returns:\n Returns protobuf timestamp.\n \"\"\"\n ts = Timestamp()\n if time_ns is not None:\n # pylint: disable=no-member\n ts.FromNanoseconds(time_ns)\n return ts\n\n\n# pylint: disable=no-member\ndef get_collector_span_kind(kind: SpanKind):\n if kind is SpanKind.SERVER:\n return trace_pb2.Span.SpanKind.SERVER\n if kind is SpanKind.CLIENT:\n return trace_pb2.Span.SpanKind.CLIENT\n return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED\n\n\ndef add_proto_attribute_value(pb_attributes, key, value):\n \"\"\"Sets string, int, boolean or float value on protobuf\n span, link or annotation attributes.\n\n Args:\n pb_attributes: protobuf Span's attributes property.\n key: attribute key to set.\n value: attribute value\n \"\"\"\n\n if isinstance(value, bool):\n pb_attributes.attribute_map[key].bool_value = value\n elif isinstance(value, int):\n pb_attributes.attribute_map[key].int_value = value\n elif isinstance(value, str):\n pb_attributes.attribute_map[key].string_value.value = value\n elif isinstance(value, float):\n pb_attributes.attribute_map[key].double_value = value\n else:\n pb_attributes.attribute_map[key].string_value.value = str(value)\n\n\n# pylint: disable=no-member\ndef get_node(service_name, host_name):\n \"\"\"Generates Node message from params and system information.\n\n Args:\n service_name: Name of Collector service.\n host_name: Host name.\n \"\"\"\n return common_pb2.Node(\n identifier=common_pb2.ProcessIdentifier(\n host_name=gethostname() if host_name is None else host_name,\n pid=getpid(),\n start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),\n ),\n library_info=common_pb2.LibraryInfo(\n language=common_pb2.LibraryInfo.Language.Value(\"PYTHON\"),\n exporter_version=opencensusexporter_exporter_version,\n core_library_version=OPENTELEMETRY_VERSION,\n ),\n service_info=common_pb2.ServiceInfo(name=service_name),\n )\n", "path": "exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import getpid\nfrom socket import gethostname\nfrom time import time\n\n# pylint: disable=wrong-import-position\nfrom google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module\n Timestamp,\n)\nfrom opencensus.proto.agent.common.v1 import common_pb2\nfrom opencensus.proto.trace.v1 import trace_pb2\n\nfrom opentelemetry.exporter.opencensus.version import (\n __version__ as opencensusexporter_exporter_version,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.util._importlib_metadata import version\n\nOPENTELEMETRY_VERSION = version(\"opentelemetry-api\")\n\n\ndef proto_timestamp_from_time_ns(time_ns):\n \"\"\"Converts datetime to protobuf timestamp.\n\n Args:\n time_ns: Time in nanoseconds\n\n Returns:\n Returns protobuf timestamp.\n \"\"\"\n ts = Timestamp()\n if time_ns is not None:\n # pylint: disable=no-member\n ts.FromNanoseconds(time_ns)\n return ts\n\n\n# pylint: disable=no-member\ndef get_collector_span_kind(kind: SpanKind):\n if kind is SpanKind.SERVER:\n return trace_pb2.Span.SpanKind.SERVER\n if kind is SpanKind.CLIENT:\n return trace_pb2.Span.SpanKind.CLIENT\n return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED\n\n\ndef add_proto_attribute_value(pb_attributes, key, value):\n \"\"\"Sets string, int, boolean or float value on protobuf\n span, link or annotation attributes.\n\n Args:\n pb_attributes: protobuf Span's attributes property.\n key: attribute key to set.\n value: attribute value\n \"\"\"\n\n if isinstance(value, bool):\n pb_attributes.attribute_map[key].bool_value = value\n elif isinstance(value, int):\n pb_attributes.attribute_map[key].int_value = value\n elif isinstance(value, str):\n pb_attributes.attribute_map[key].string_value.value = value\n elif isinstance(value, float):\n pb_attributes.attribute_map[key].double_value = value\n else:\n pb_attributes.attribute_map[key].string_value.value = str(value)\n\n\n# pylint: disable=no-member\ndef get_node(service_name, host_name):\n \"\"\"Generates Node message from params and system information.\n\n Args:\n service_name: Name of Collector service.\n host_name: Host name.\n \"\"\"\n return common_pb2.Node(\n identifier=common_pb2.ProcessIdentifier(\n host_name=gethostname() if host_name is None else host_name,\n pid=getpid(),\n start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),\n ),\n library_info=common_pb2.LibraryInfo(\n language=common_pb2.LibraryInfo.Language.Value(\"PYTHON\"),\n exporter_version=opencensusexporter_exporter_version,\n core_library_version=OPENTELEMETRY_VERSION,\n ),\n service_info=common_pb2.ServiceInfo(name=service_name),\n )\n", "path": "exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py"}]} |
gh_patches_debug_1411 | rasdani/github-patches | git_diff | kivy__kivy-4728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error: fromstring() in core/image/img_pil.py
Platform: Linux (OpenSuse, Ubuntu)
[INFO ] [Kivy ] v1.9.1
[INFO ] [Python ] v2.7.12 (default, Jul 01 2016, 15:36:53) [GCC]
Error:
File "/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py", line 105, in save
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
File "/usr/lib64/python2.7/site-packages/PIL/Image.py", line 2063, in fromstring
"Please call frombytes() instead.")
Exception: fromstring() has been removed. Please call frombytes() instead.
In File "/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py"
Line 105:
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
use...
image = PILImage.frombytes(fmt.upper(), (width, height), pixels)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/image/img_pil.py`
Content:
```
1 '''
2 PIL: PIL image loader
3 '''
4
5 __all__ = ('ImageLoaderPIL', )
6
7 try:
8 from PIL import Image as PILImage
9 except:
10 import Image as PILImage
11
12 from kivy.logger import Logger
13 from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
14
15
16 class ImageLoaderPIL(ImageLoaderBase):
17 '''Image loader based on the PIL library.
18
19 .. versionadded:: 1.0.8
20
21 Support for GIF animation added.
22
23 Gif animation has a lot of issues(transparency/color depths... etc).
24 In order to keep it simple, what is implemented here is what is
25 natively supported by the PIL library.
26
27 As a general rule, try to use gifs that have no transparency.
28 Gif's with transparency will work but be prepared for some
29 artifacts until transparency support is improved.
30
31 '''
32
33 @staticmethod
34 def can_save():
35 return True
36
37 @staticmethod
38 def can_load_memory():
39 return True
40
41 @staticmethod
42 def extensions():
43 '''Return accepted extensions for this loader'''
44 # See http://www.pythonware.com/library/pil/handbook/index.htm
45 return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',
46 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',
47 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',
48 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',
49 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',
50 'xv')
51
52 def _img_correct(self, _img_tmp):
53 '''Convert image to the correct format and orientation.
54 '''
55 # image loader work only with rgb/rgba image
56 if _img_tmp.mode.lower() not in ('rgb', 'rgba'):
57 try:
58 imc = _img_tmp.convert('RGBA')
59 except:
60 Logger.warning(
61 'Image: Unable to convert image to rgba (was %s)' %
62 (_img_tmp.mode.lower()))
63 raise
64 _img_tmp = imc
65
66 return _img_tmp
67
68 def _img_read(self, im):
69 '''Read images from an animated file.
70 '''
71 im.seek(0)
72
73 # Read all images inside
74 try:
75 img_ol = None
76 while True:
77 img_tmp = im
78 img_tmp = self._img_correct(img_tmp)
79 if img_ol and (hasattr(im, 'dispose') and not im.dispose):
80 # paste new frame over old so as to handle
81 # transparency properly
82 img_ol.paste(img_tmp, (0, 0), img_tmp)
83 img_tmp = img_ol
84 img_ol = img_tmp
85 yield ImageData(img_tmp.size[0], img_tmp.size[1],
86 img_tmp.mode.lower(), img_tmp.tobytes())
87 im.seek(im.tell() + 1)
88 except EOFError:
89 pass
90
91 def load(self, filename):
92 try:
93 im = PILImage.open(filename)
94 except:
95 Logger.warning('Image: Unable to load image <%s>' % filename)
96 raise
97 # update internals
98 if not self._inline:
99 self.filename = filename
100 # returns an array of type ImageData len 1 if not a sequence image
101 return list(self._img_read(im))
102
103 @staticmethod
104 def save(filename, width, height, fmt, pixels, flipped=False):
105 image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
106 if flipped:
107 image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
108 image.save(filename)
109 return True
110
111
112 # register
113 ImageLoader.register(ImageLoaderPIL)
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/image/img_pil.py b/kivy/core/image/img_pil.py
--- a/kivy/core/image/img_pil.py
+++ b/kivy/core/image/img_pil.py
@@ -102,7 +102,8 @@
@staticmethod
def save(filename, width, height, fmt, pixels, flipped=False):
- image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
+ image = PILImage.frombytes(fmt.upper(), (width, height), pixels)
+
if flipped:
image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
image.save(filename)
| {"golden_diff": "diff --git a/kivy/core/image/img_pil.py b/kivy/core/image/img_pil.py\n--- a/kivy/core/image/img_pil.py\n+++ b/kivy/core/image/img_pil.py\n@@ -102,7 +102,8 @@\n \n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped=False):\n- image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\n+ image = PILImage.frombytes(fmt.upper(), (width, height), pixels)\n+\n if flipped:\n image = image.transpose(PILImage.FLIP_TOP_BOTTOM)\n image.save(filename)\n", "issue": "Error: fromstring() in core/image/img_pil.py\nPlatform: Linux (OpenSuse, Ubuntu)\r\n\r\n[INFO ] [Kivy ] v1.9.1\r\n[INFO ] [Python ] v2.7.12 (default, Jul 01 2016, 15:36:53) [GCC]\r\n\r\nError:\r\n File \"/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py\", line 105, in save\r\n image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\r\n File \"/usr/lib64/python2.7/site-packages/PIL/Image.py\", line 2063, in fromstring\r\n \"Please call frombytes() instead.\")\r\nException: fromstring() has been removed. Please call frombytes() instead.\r\n\r\n\r\nIn File \"/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py\"\r\nLine 105:\r\nimage = PILImage.fromstring(fmt.upper(), (width, height), pixels)\r\n\r\nuse...\r\n\r\nimage = PILImage.frombytes(fmt.upper(), (width, height), pixels)\n", "before_files": [{"content": "'''\nPIL: PIL image loader\n'''\n\n__all__ = ('ImageLoaderPIL', )\n\ntry:\n from PIL import Image as PILImage\nexcept:\n import Image as PILImage\n\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\n\n\nclass ImageLoaderPIL(ImageLoaderBase):\n '''Image loader based on the PIL library.\n\n .. versionadded:: 1.0.8\n\n Support for GIF animation added.\n\n Gif animation has a lot of issues(transparency/color depths... etc).\n In order to keep it simple, what is implemented here is what is\n natively supported by the PIL library.\n\n As a general rule, try to use gifs that have no transparency.\n Gif's with transparency will work but be prepared for some\n artifacts until transparency support is improved.\n\n '''\n\n @staticmethod\n def can_save():\n return True\n\n @staticmethod\n def can_load_memory():\n return True\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n # See http://www.pythonware.com/library/pil/handbook/index.htm\n return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',\n 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',\n 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',\n 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',\n 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',\n 'xv')\n\n def _img_correct(self, _img_tmp):\n '''Convert image to the correct format and orientation.\n '''\n # image loader work only with rgb/rgba image\n if _img_tmp.mode.lower() not in ('rgb', 'rgba'):\n try:\n imc = _img_tmp.convert('RGBA')\n except:\n Logger.warning(\n 'Image: Unable to convert image to rgba (was %s)' %\n (_img_tmp.mode.lower()))\n raise\n _img_tmp = imc\n\n return _img_tmp\n\n def _img_read(self, im):\n '''Read images from an animated file.\n '''\n im.seek(0)\n\n # Read all images inside\n try:\n img_ol = None\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n if img_ol and (hasattr(im, 'dispose') and not im.dispose):\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n img_tmp = img_ol\n img_ol = img_tmp\n yield ImageData(img_tmp.size[0], img_tmp.size[1],\n img_tmp.mode.lower(), img_tmp.tobytes())\n im.seek(im.tell() + 1)\n except EOFError:\n pass\n\n def load(self, filename):\n try:\n im = PILImage.open(filename)\n except:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise\n # update internals\n if not self._inline:\n self.filename = filename\n # returns an array of type ImageData len 1 if not a sequence image\n return list(self._img_read(im))\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped=False):\n image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\n if flipped:\n image = image.transpose(PILImage.FLIP_TOP_BOTTOM)\n image.save(filename)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderPIL)\n", "path": "kivy/core/image/img_pil.py"}], "after_files": [{"content": "'''\nPIL: PIL image loader\n'''\n\n__all__ = ('ImageLoaderPIL', )\n\ntry:\n from PIL import Image as PILImage\nexcept:\n import Image as PILImage\n\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\n\n\nclass ImageLoaderPIL(ImageLoaderBase):\n '''Image loader based on the PIL library.\n\n .. versionadded:: 1.0.8\n\n Support for GIF animation added.\n\n Gif animation has a lot of issues(transparency/color depths... etc).\n In order to keep it simple, what is implemented here is what is\n natively supported by the PIL library.\n\n As a general rule, try to use gifs that have no transparency.\n Gif's with transparency will work but be prepared for some\n artifacts until transparency support is improved.\n\n '''\n\n @staticmethod\n def can_save():\n return True\n\n @staticmethod\n def can_load_memory():\n return True\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n # See http://www.pythonware.com/library/pil/handbook/index.htm\n return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',\n 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',\n 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',\n 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',\n 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',\n 'xv')\n\n def _img_correct(self, _img_tmp):\n '''Convert image to the correct format and orientation.\n '''\n # image loader work only with rgb/rgba image\n if _img_tmp.mode.lower() not in ('rgb', 'rgba'):\n try:\n imc = _img_tmp.convert('RGBA')\n except:\n Logger.warning(\n 'Image: Unable to convert image to rgba (was %s)' %\n (_img_tmp.mode.lower()))\n raise\n _img_tmp = imc\n\n return _img_tmp\n\n def _img_read(self, im):\n '''Read images from an animated file.\n '''\n im.seek(0)\n\n # Read all images inside\n try:\n img_ol = None\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n if img_ol and (hasattr(im, 'dispose') and not im.dispose):\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n img_tmp = img_ol\n img_ol = img_tmp\n yield ImageData(img_tmp.size[0], img_tmp.size[1],\n img_tmp.mode.lower(), img_tmp.tobytes())\n im.seek(im.tell() + 1)\n except EOFError:\n pass\n\n def load(self, filename):\n try:\n im = PILImage.open(filename)\n except:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise\n # update internals\n if not self._inline:\n self.filename = filename\n # returns an array of type ImageData len 1 if not a sequence image\n return list(self._img_read(im))\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped=False):\n image = PILImage.frombytes(fmt.upper(), (width, height), pixels)\n\n if flipped:\n image = image.transpose(PILImage.FLIP_TOP_BOTTOM)\n image.save(filename)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderPIL)\n", "path": "kivy/core/image/img_pil.py"}]} |
gh_patches_debug_1412 | rasdani/github-patches | git_diff | ethereum__consensus-specs-1131 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BLS and testing
Decided I wanted to get this out to explain the current state of testing, and **collect feedback** (implementers please comment) on what you need from testing, and your feelings about BLS usage in tests.
# BLS and testing
The two pain-points to get a pretty (and large) set of test-vectors out for clients are:
- BLS Signature creation
- BLS Signature verification
And side-issue, but easily resolved:
*efficient creation of a genesis state*:
When BLS functionality is implemented in test-code (creation of signed deposits, and verification).
Solution would be to either cache it, or create it directly, without going through the spec functions (current temporary solution on experiment branch).
## Status
Talking about the status on [`spectest-deco` PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052) here, based on the `v06x` branch, where we are developing 0.6 improvements. (to be merged back into dev later)
### The testing pipeline currently looks like:
- py-spec, calls BLS stub
- test-helpers, don't create self-signed objects with valid signatures
- py-test code, unified with test-vector-creation (see [PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052))
- py-test runner to run spec-tests, purely for assertions
- test-generator running the spec-tests, passing `generator_mode=true` to each of them, making them output a test-vector.
### Pytests status:
- move from `tests/` to `eth2spec/test`, i.e. part of package
- removed use of `pytest`
- annotated with `@spec_test` or similar (see PR 1052)
- as part of test-generation effort, yay for shared effort:
- expanded in block-operation testing: [coverage checklist here](https://github.com/ethereum/eth2.0-specs/issues/927)
- slightly faster, less deep-copies
- stuck on BLS stub (no sig creation/verification)
### Test-generation status:
- BLS, SSZ-generic, SSZ-static, shuffling test generators still all in place and up to date (`v06x` branch)
- `operations` test-gen uses test-package ability to output test-vectors for each test-case
- but no valid signatures
- lack of a definition how to handle this signature problem as a test-consumer
- there are no signature-related testcases
- turning BLS off would effectively let you check conformance, but it's hacky, and not remotely a good practice to have even an option for...
- it's approx. ~140MB worth (iirc) of yaml encoded state-transitions, covering many edge-cases. Worth to get in the hands of implementers quick.
- `sanity` tests updated and can be cleanly used for test-generation, but requires more work to define the format of the test-vectors, as they is more variety.
- `epoch` processing tests also updated, also can be used, not as complete as block-processing, lower priority.
## Possible ways forward:
- Simple but hacky: "turn BLS off for testing"
- No "BLS off", BLS ON on client side, but only partially on spec side. Rely on signature verification not being hit before anything else during testing
- valid test cases generated with valid signatures
- invalid test cases marked: does it error because of BLS? And runners should check the reason for aborting processing: if it doesn't match, the test should fail. Now these pytests don't need full BLS update work, and can be released somewhat quicker
- "BLS on", more work (~1 week)
- slower on test-generation, but we get the best kind of test-vectors: correct, BLS verification ON.
- blocker: what if a test case fails because of a signature error (test setup not creating the sig correctly), instead of a real assertion case. Spec will look correct, passes tests, but things are not right. We need to mark Sig-verification errors distinctly, so we can catch these problems when we turn BLS on in the pyspec. How: instead of `assert verify_...`, just `verify_...`, and make it raise a special `BLSVerificationError` (or something like that)
- We likely still want to mark tests as "signature related" or not, so implementers can catch it easily if their code is not aborting properly before signature verification, to assure invalid inputs are not costly.
A work-in-progress introduction of actual full BLS usage in the pytests is started here: [`tests-with-sigs` branch](https://github.com/ethereum/eth2.0-specs/tree/tests-with-sigs)
Suggestions welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/phase0/build_spec.py`
Content:
```
1 import sys
2 import function_puller
3
4
5 def build_phase0_spec(sourcefile, outfile):
6 code_lines = []
7 code_lines.append("""
8
9 from typing import (
10 Any,
11 Dict,
12 List,
13 NewType,
14 Tuple,
15 )
16 from eth2spec.utils.minimal_ssz import (
17 SSZType,
18 hash_tree_root,
19 signing_root,
20 )
21 from eth2spec.utils.bls_stub import (
22 bls_aggregate_pubkeys,
23 bls_verify,
24 bls_verify_multiple,
25 )
26 from eth2spec.utils.hash_function import hash
27
28
29 # stub, will get overwritten by real var
30 SLOTS_PER_EPOCH = 64
31
32 Slot = NewType('Slot', int) # uint64
33 Epoch = NewType('Epoch', int) # uint64
34 Shard = NewType('Shard', int) # uint64
35 ValidatorIndex = NewType('ValidatorIndex', int) # uint64
36 Gwei = NewType('Gwei', int) # uint64
37 Bytes32 = NewType('Bytes32', bytes) # bytes32
38 BLSPubkey = NewType('BLSPubkey', bytes) # bytes48
39 BLSSignature = NewType('BLSSignature', bytes) # bytes96
40 Store = None
41 """)
42
43 code_lines += function_puller.get_spec(sourcefile)
44
45 code_lines.append("""
46 # Monkey patch validator compute committee code
47 _compute_committee = compute_committee
48 committee_cache = {}
49
50
51 def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
52 param_hash = (hash_tree_root(indices), seed, index, count)
53
54 if param_hash in committee_cache:
55 return committee_cache[param_hash]
56 else:
57 ret = _compute_committee(indices, seed, index, count)
58 committee_cache[param_hash] = ret
59 return ret
60
61
62 # Monkey patch hash cache
63 _hash = hash
64 hash_cache = {}
65
66
67 def hash(x):
68 if x in hash_cache:
69 return hash_cache[x]
70 else:
71 ret = _hash(x)
72 hash_cache[x] = ret
73 return ret
74
75
76 # Access to overwrite spec constants based on configuration
77 def apply_constants_preset(preset: Dict[str, Any]):
78 global_vars = globals()
79 for k, v in preset.items():
80 global_vars[k] = v
81
82 # Deal with derived constants
83 global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)
84
85 # Initialize SSZ types again, to account for changed lengths
86 init_SSZ_types()
87 """)
88
89 with open(outfile, 'w') as out:
90 out.write("\n".join(code_lines))
91
92
93 if __name__ == '__main__':
94 if len(sys.argv) < 3:
95 print("Usage: <source phase0> <output phase0 pyspec>")
96 build_phase0_spec(sys.argv[1], sys.argv[2])
97
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py
--- a/scripts/phase0/build_spec.py
+++ b/scripts/phase0/build_spec.py
@@ -13,17 +13,9 @@
NewType,
Tuple,
)
-from eth2spec.utils.minimal_ssz import (
- SSZType,
- hash_tree_root,
- signing_root,
-)
-from eth2spec.utils.bls_stub import (
- bls_aggregate_pubkeys,
- bls_verify,
- bls_verify_multiple,
-)
+from eth2spec.utils.minimal_ssz import *
from eth2spec.utils.hash_function import hash
+from eth2spec.utils.bls import *
# stub, will get overwritten by real var
| {"golden_diff": "diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py\n--- a/scripts/phase0/build_spec.py\n+++ b/scripts/phase0/build_spec.py\n@@ -13,17 +13,9 @@\n NewType,\n Tuple,\n )\n-from eth2spec.utils.minimal_ssz import (\n- SSZType,\n- hash_tree_root,\n- signing_root,\n-)\n-from eth2spec.utils.bls_stub import (\n- bls_aggregate_pubkeys,\n- bls_verify,\n- bls_verify_multiple,\n-)\n+from eth2spec.utils.minimal_ssz import *\n from eth2spec.utils.hash_function import hash\n+from eth2spec.utils.bls import *\n \n \n # stub, will get overwritten by real var\n", "issue": "BLS and testing\nDecided I wanted to get this out to explain the current state of testing, and **collect feedback** (implementers please comment) on what you need from testing, and your feelings about BLS usage in tests.\r\n\r\n# BLS and testing\r\n\r\nThe two pain-points to get a pretty (and large) set of test-vectors out for clients are:\r\n- BLS Signature creation\r\n- BLS Signature verification\r\n\r\nAnd side-issue, but easily resolved:\r\n*efficient creation of a genesis state*:\r\nWhen BLS functionality is implemented in test-code (creation of signed deposits, and verification).\r\nSolution would be to either cache it, or create it directly, without going through the spec functions (current temporary solution on experiment branch).\r\n\r\n## Status\r\n\r\nTalking about the status on [`spectest-deco` PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052) here, based on the `v06x` branch, where we are developing 0.6 improvements. (to be merged back into dev later)\r\n\r\n### The testing pipeline currently looks like:\r\n\r\n- py-spec, calls BLS stub\r\n- test-helpers, don't create self-signed objects with valid signatures\r\n- py-test code, unified with test-vector-creation (see [PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052))\r\n- py-test runner to run spec-tests, purely for assertions\r\n- test-generator running the spec-tests, passing `generator_mode=true` to each of them, making them output a test-vector.\r\n\r\n### Pytests status:\r\n\r\n- move from `tests/` to `eth2spec/test`, i.e. part of package\r\n - removed use of `pytest`\r\n - annotated with `@spec_test` or similar (see PR 1052)\r\n- as part of test-generation effort, yay for shared effort:\r\n - expanded in block-operation testing: [coverage checklist here](https://github.com/ethereum/eth2.0-specs/issues/927)\r\n - slightly faster, less deep-copies\r\n- stuck on BLS stub (no sig creation/verification)\r\n\r\n### Test-generation status:\r\n\r\n- BLS, SSZ-generic, SSZ-static, shuffling test generators still all in place and up to date (`v06x` branch)\r\n- `operations` test-gen uses test-package ability to output test-vectors for each test-case\r\n - but no valid signatures\r\n - lack of a definition how to handle this signature problem as a test-consumer\r\n - there are no signature-related testcases\r\n - turning BLS off would effectively let you check conformance, but it's hacky, and not remotely a good practice to have even an option for...\r\n - it's approx. ~140MB worth (iirc) of yaml encoded state-transitions, covering many edge-cases. Worth to get in the hands of implementers quick.\r\n- `sanity` tests updated and can be cleanly used for test-generation, but requires more work to define the format of the test-vectors, as they is more variety.\r\n- `epoch` processing tests also updated, also can be used, not as complete as block-processing, lower priority.\r\n\r\n## Possible ways forward:\r\n\r\n- Simple but hacky: \"turn BLS off for testing\"\r\n- No \"BLS off\", BLS ON on client side, but only partially on spec side. Rely on signature verification not being hit before anything else during testing\r\n - valid test cases generated with valid signatures\r\n - invalid test cases marked: does it error because of BLS? And runners should check the reason for aborting processing: if it doesn't match, the test should fail. Now these pytests don't need full BLS update work, and can be released somewhat quicker\r\n- \"BLS on\", more work (~1 week)\r\n - slower on test-generation, but we get the best kind of test-vectors: correct, BLS verification ON.\r\n - blocker: what if a test case fails because of a signature error (test setup not creating the sig correctly), instead of a real assertion case. Spec will look correct, passes tests, but things are not right. We need to mark Sig-verification errors distinctly, so we can catch these problems when we turn BLS on in the pyspec. How: instead of `assert verify_...`, just `verify_...`, and make it raise a special `BLSVerificationError` (or something like that)\r\n - We likely still want to mark tests as \"signature related\" or not, so implementers can catch it easily if their code is not aborting properly before signature verification, to assure invalid inputs are not costly.\r\n\r\nA work-in-progress introduction of actual full BLS usage in the pytests is started here: [`tests-with-sigs` branch](https://github.com/ethereum/eth2.0-specs/tree/tests-with-sigs)\r\n\r\nSuggestions welcome.\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\n\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import (\n SSZType,\n hash_tree_root,\n signing_root,\n)\nfrom eth2spec.utils.bls_stub import (\n bls_aggregate_pubkeys,\n bls_verify,\n bls_verify_multiple,\n)\nfrom eth2spec.utils.hash_function import hash\n\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py"}], "after_files": [{"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\n\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.hash_function import hash\nfrom eth2spec.utils.bls import *\n\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py"}]} |
gh_patches_debug_1413 | rasdani/github-patches | git_diff | googleapis__python-spanner-django-124 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dbapi: properly parse and ensure WHERE clause
PR #111's task was to add a WHERE clause to missing ones. However, the code in there assumes a single statement not terminated by a SQL terminator `;` and we blindly add ` WHERE 1=1` for missing statements e.g. given
```sql
DELETE FROM basic_article;
```
we make it
```sql
DELETE FROM basic_article; WHERE 1=1
```
but really we should be making it
```sql
DELETE FROM basic_article WHERE 1=1;
```
by parsing out tokens of all the statements and affixing ` WHERE 1=1` per statement.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spanner/django/operations.py`
Content:
```
1 from datetime import datetime
2
3 from django.conf import settings
4 from django.db.backends.base.operations import BaseDatabaseOperations
5 from django.utils import timezone
6 from spanner.dbapi.parse_utils import TimestampStr
7
8
9 class DatabaseOperations(BaseDatabaseOperations):
10 # Django's lookup names that require a different name in Spanner's
11 # EXTRACT() function.
12 # https://cloud.google.com/spanner/docs/functions-and-operators#extract
13 extract_names = {
14 'week_day': 'dayofweek',
15 'iso_week': 'isoweek',
16 'iso_year': 'isoyear',
17 }
18
19 def quote_name(self, name):
20 if '-' in name:
21 return '`' + name + '`'
22 return name
23
24 def bulk_insert_sql(self, fields, placeholder_rows):
25 placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
26 values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
27 return "VALUES " + values_sql
28
29 def sql_flush(self, style, tables, sequences, allow_cascade=False):
30 # Cloud Spanner doesn't support TRUNCATE so DELETE instead.
31 # A dummy WHERE clause is required.
32 if tables:
33 delete_sql = '%s %s %%s;' % (
34 style.SQL_KEYWORD('DELETE'),
35 style.SQL_KEYWORD('FROM'),
36 )
37 return [
38 delete_sql % style.SQL_FIELD(self.quote_name(table))
39 for table in tables
40 ]
41 else:
42 return []
43
44 def adapt_datetimefield_value(self, value):
45 if value is None:
46 return None
47 # Expression values are adapted by the database.
48 if hasattr(value, 'resolve_expression'):
49 return value
50 # Cloud Spanner doesn't support tz-aware datetimes
51 if timezone.is_aware(value):
52 if settings.USE_TZ:
53 value = timezone.make_naive(value, self.connection.timezone)
54 else:
55 raise ValueError("Cloud Spanner does not support timezone-aware datetimes when USE_TZ is False.")
56 return TimestampStr(value.isoformat(timespec='microseconds') + 'Z')
57
58 def get_db_converters(self, expression):
59 converters = super().get_db_converters(expression)
60 internal_type = expression.output_field.get_internal_type()
61 if internal_type == 'DateTimeField':
62 converters.append(self.convert_datetimefield_value)
63 return converters
64
65 def convert_datetimefield_value(self, value, expression, connection):
66 if value is None:
67 return value
68 # Cloud Spanner returns the
69 # google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass
70 # of datetime with tzinfo=UTC (which should be replaced with the
71 # connection's timezone). Django doesn't support nanoseconds so that
72 # part is ignored.
73 return datetime(
74 value.year, value.month, value.day,
75 value.hour, value.minute, value.second, value.microsecond,
76 self.connection.timezone,
77 )
78
79 def date_extract_sql(self, lookup_type, field_name):
80 lookup_type = self.extract_names.get(lookup_type, lookup_type)
81 return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)
82
83 def datetime_extract_sql(self, lookup_type, field_name, tzname):
84 tzname = self.connection.timezone if settings.USE_TZ else 'UTC'
85 lookup_type = self.extract_names.get(lookup_type, lookup_type)
86 return 'EXTRACT(%s FROM %s AT TIME ZONE "%s")' % (lookup_type, field_name, tzname)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spanner/django/operations.py b/spanner/django/operations.py
--- a/spanner/django/operations.py
+++ b/spanner/django/operations.py
@@ -30,7 +30,7 @@
# Cloud Spanner doesn't support TRUNCATE so DELETE instead.
# A dummy WHERE clause is required.
if tables:
- delete_sql = '%s %s %%s;' % (
+ delete_sql = '%s %s %%s' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
)
| {"golden_diff": "diff --git a/spanner/django/operations.py b/spanner/django/operations.py\n--- a/spanner/django/operations.py\n+++ b/spanner/django/operations.py\n@@ -30,7 +30,7 @@\n # Cloud Spanner doesn't support TRUNCATE so DELETE instead.\n # A dummy WHERE clause is required.\n if tables:\n- delete_sql = '%s %s %%s;' % (\n+ delete_sql = '%s %s %%s' % (\n style.SQL_KEYWORD('DELETE'),\n style.SQL_KEYWORD('FROM'),\n )\n", "issue": "dbapi: properly parse and ensure WHERE clause\nPR #111's task was to add a WHERE clause to missing ones. However, the code in there assumes a single statement not terminated by a SQL terminator `;` and we blindly add ` WHERE 1=1` for missing statements e.g. given\r\n```sql\r\nDELETE FROM basic_article;\r\n``` \r\nwe make it\r\n```sql\r\nDELETE FROM basic_article; WHERE 1=1\r\n```\r\n\r\nbut really we should be making it\r\n```sql\r\nDELETE FROM basic_article WHERE 1=1;\r\n```\r\nby parsing out tokens of all the statements and affixing ` WHERE 1=1` per statement.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's lookup names that require a different name in Spanner's\n # EXTRACT() function.\n # https://cloud.google.com/spanner/docs/functions-and-operators#extract\n extract_names = {\n 'week_day': 'dayofweek',\n 'iso_week': 'isoweek',\n 'iso_year': 'isoyear',\n }\n\n def quote_name(self, name):\n if '-' in name:\n return '`' + name + '`'\n return name\n\n def bulk_insert_sql(self, fields, placeholder_rows):\n placeholder_rows_sql = (\", \".join(row) for row in placeholder_rows)\n values_sql = \", \".join(\"(%s)\" % sql for sql in placeholder_rows_sql)\n return \"VALUES \" + values_sql\n\n def sql_flush(self, style, tables, sequences, allow_cascade=False):\n # Cloud Spanner doesn't support TRUNCATE so DELETE instead.\n # A dummy WHERE clause is required.\n if tables:\n delete_sql = '%s %s %%s;' % (\n style.SQL_KEYWORD('DELETE'),\n style.SQL_KEYWORD('FROM'),\n )\n return [\n delete_sql % style.SQL_FIELD(self.quote_name(table))\n for table in tables\n ]\n else:\n return []\n\n def adapt_datetimefield_value(self, value):\n if value is None:\n return None\n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # Cloud Spanner doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\"Cloud Spanner does not support timezone-aware datetimes when USE_TZ is False.\")\n return TimestampStr(value.isoformat(timespec='microseconds') + 'Z')\n\n def get_db_converters(self, expression):\n converters = super().get_db_converters(expression)\n internal_type = expression.output_field.get_internal_type()\n if internal_type == 'DateTimeField':\n converters.append(self.convert_datetimefield_value)\n return converters\n\n def convert_datetimefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Cloud Spanner returns the\n # google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass\n # of datetime with tzinfo=UTC (which should be replaced with the\n # connection's timezone). Django doesn't support nanoseconds so that\n # part is ignored.\n return datetime(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second, value.microsecond,\n self.connection.timezone,\n )\n\n def date_extract_sql(self, lookup_type, field_name):\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)\n\n def datetime_extract_sql(self, lookup_type, field_name, tzname):\n tzname = self.connection.timezone if settings.USE_TZ else 'UTC'\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s AT TIME ZONE \"%s\")' % (lookup_type, field_name, tzname)\n", "path": "spanner/django/operations.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's lookup names that require a different name in Spanner's\n # EXTRACT() function.\n # https://cloud.google.com/spanner/docs/functions-and-operators#extract\n extract_names = {\n 'week_day': 'dayofweek',\n 'iso_week': 'isoweek',\n 'iso_year': 'isoyear',\n }\n\n def quote_name(self, name):\n if '-' in name:\n return '`' + name + '`'\n return name\n\n def bulk_insert_sql(self, fields, placeholder_rows):\n placeholder_rows_sql = (\", \".join(row) for row in placeholder_rows)\n values_sql = \", \".join(\"(%s)\" % sql for sql in placeholder_rows_sql)\n return \"VALUES \" + values_sql\n\n def sql_flush(self, style, tables, sequences, allow_cascade=False):\n # Cloud Spanner doesn't support TRUNCATE so DELETE instead.\n # A dummy WHERE clause is required.\n if tables:\n delete_sql = '%s %s %%s' % (\n style.SQL_KEYWORD('DELETE'),\n style.SQL_KEYWORD('FROM'),\n )\n return [\n delete_sql % style.SQL_FIELD(self.quote_name(table))\n for table in tables\n ]\n else:\n return []\n\n def adapt_datetimefield_value(self, value):\n if value is None:\n return None\n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # Cloud Spanner doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\"Cloud Spanner does not support timezone-aware datetimes when USE_TZ is False.\")\n return TimestampStr(value.isoformat(timespec='microseconds') + 'Z')\n\n def get_db_converters(self, expression):\n converters = super().get_db_converters(expression)\n internal_type = expression.output_field.get_internal_type()\n if internal_type == 'DateTimeField':\n converters.append(self.convert_datetimefield_value)\n return converters\n\n def convert_datetimefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Cloud Spanner returns the\n # google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass\n # of datetime with tzinfo=UTC (which should be replaced with the\n # connection's timezone). Django doesn't support nanoseconds so that\n # part is ignored.\n return datetime(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second, value.microsecond,\n self.connection.timezone,\n )\n\n def date_extract_sql(self, lookup_type, field_name):\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)\n\n def datetime_extract_sql(self, lookup_type, field_name, tzname):\n tzname = self.connection.timezone if settings.USE_TZ else 'UTC'\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s AT TIME ZONE \"%s\")' % (lookup_type, field_name, tzname)\n", "path": "spanner/django/operations.py"}]} |
gh_patches_debug_1414 | rasdani/github-patches | git_diff | tensorflow__tfx-2189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Project can't be cloned correctly on macOS due to case insensitivity
Under the `tfx` folder there's a folder called `build` and a bazel file called `BUILD`. Because macOS is by default case insensitive, only the folder is cloned when `git clone` is run. This means that when trying to build locally, bazel won't be able to find the `BUILD` file required to compile the protobuf schemas, and will fail.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Lint as: python2, python3
2 # Copyright 2019 Google LLC. All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Package Setup script for TFX."""
16
17 from __future__ import print_function
18
19 import os
20 import subprocess
21
22 import setuptools
23 from setuptools import find_packages
24 from setuptools import setup
25 from setuptools.command import develop
26 # pylint: disable=g-bad-import-order
27 # It is recommended to import setuptools prior to importing distutils to avoid
28 # using legacy behavior from distutils.
29 # https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0
30 from distutils import spawn
31 from distutils.command import build
32 # pylint: enable=g-bad-import-order
33
34 from tfx import dependencies
35 from tfx import version
36 from tfx.tools import resolve_deps
37
38
39 class _BuildCommand(build.build):
40 """Build everything that is needed to install.
41
42 This overrides the original distutils "build" command to to run gen_proto
43 command before any sub_commands.
44
45 build command is also invoked from bdist_wheel and install command, therefore
46 this implementation covers the following commands:
47 - pip install . (which invokes bdist_wheel)
48 - python setup.py install (which invokes install command)
49 - python setup.py bdist_wheel (which invokes bdist_wheel command)
50 """
51
52 def _should_generate_proto(self):
53 """Predicate method for running GenProto command or not."""
54 return True
55
56 # Add "gen_proto" command as the first sub_command of "build". Each
57 # sub_command of "build" (e.g. "build_py", "build_ext", etc.) is executed
58 # sequentially when running a "build" command, if the second item in the tuple
59 # (predicate method) is evaluated to true.
60 sub_commands = [
61 ('gen_proto', _should_generate_proto),
62 ] + build.build.sub_commands
63
64
65 class _DevelopCommand(develop.develop):
66 """Developmental install.
67
68 https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode
69 Unlike normal package installation where distribution is copied to the
70 site-packages folder, developmental install creates a symbolic link to the
71 source code directory, so that your local code change is immediately visible
72 in runtime without re-installation.
73
74 This is a setuptools-only (i.e. not included in distutils) command that is
75 also used in pip's editable install (pip install -e). Originally it only
76 invokes build_py and install_lib command, but we override it to run gen_proto
77 command in advance.
78
79 This implementation covers the following commands:
80 - pip install -e . (developmental install)
81 - python setup.py develop (which is invoked from developmental install)
82 """
83
84 def run(self):
85 self.run_command('gen_proto')
86 # Run super().initialize_options. Command is an old-style class (i.e.
87 # doesn't inherit object) and super() fails in python 2.
88 develop.develop.run(self)
89
90
91 class _GenProtoCommand(setuptools.Command):
92 """Generate proto stub files in python.
93
94 Running this command will populate foo_pb2.py file next to your foo.proto
95 file.
96 """
97
98 def initialize_options(self):
99 pass
100
101 def finalize_options(self):
102 self._bazel_cmd = spawn.find_executable('bazel')
103 if not self._bazel_cmd:
104 raise RuntimeError(
105 'Could not find "bazel" binary. Please visit '
106 'https://docs.bazel.build/versions/master/install.html for '
107 'installation instruction.')
108
109 def run(self):
110 subprocess.check_call(
111 [self._bazel_cmd, 'run', '//tfx/build:gen_proto'],
112 # Bazel should be invoked in a directory containing bazel WORKSPACE
113 # file, which is the root directory.
114 cwd=os.path.dirname(os.path.realpath(__file__)),)
115
116
117 # Get the long description from the README file.
118 with open('README.md') as fp:
119 _LONG_DESCRIPTION = fp.read()
120
121
122 setup(
123 name='tfx',
124 version=version.__version__,
125 author='Google LLC',
126 author_email='[email protected]',
127 license='Apache 2.0',
128 classifiers=[
129 'Development Status :: 4 - Beta',
130 'Intended Audience :: Developers',
131 'Intended Audience :: Education',
132 'Intended Audience :: Science/Research',
133 'License :: OSI Approved :: Apache Software License',
134 'Operating System :: OS Independent',
135 'Programming Language :: Python',
136 'Programming Language :: Python :: 3',
137 'Programming Language :: Python :: 3.5',
138 'Programming Language :: Python :: 3.6',
139 'Programming Language :: Python :: 3.7',
140 'Programming Language :: Python :: 3 :: Only',
141 'Topic :: Scientific/Engineering',
142 'Topic :: Scientific/Engineering :: Artificial Intelligence',
143 'Topic :: Scientific/Engineering :: Mathematics',
144 'Topic :: Software Development',
145 'Topic :: Software Development :: Libraries',
146 'Topic :: Software Development :: Libraries :: Python Modules',
147 ],
148 namespace_packages=[],
149 install_requires=dependencies.make_required_install_packages(),
150 extras_require={
151 # In order to use 'docker-image' or 'all', system libraries specified
152 # under 'tfx/tools/docker/Dockerfile' are required
153 'docker-image': dependencies.make_extra_packages_docker_image(),
154 'tfjs': dependencies.make_extra_packages_tfjs(),
155 'all': dependencies.make_all_dependency_packages(),
156 },
157 # TODO(b/158761800): Move to [build-system] requires in pyproject.toml.
158 setup_requires=[
159 'pytest-runner',
160 'poetry==1.0.9', # Required for ResolveDeps command.
161 # Poetry API is not officially documented and subject
162 # to change in the future. Thus fix the version.
163 'clikit>=0.4.3,<0.5', # Required for ResolveDeps command.
164 ],
165 cmdclass={
166 'build': _BuildCommand,
167 'develop': _DevelopCommand,
168 'gen_proto': _GenProtoCommand,
169 'resolve_deps': resolve_deps.ResolveDepsCommand,
170 },
171 python_requires='>=3.5,<4',
172 packages=find_packages(),
173 include_package_data=True,
174 description='TensorFlow Extended (TFX) is a TensorFlow-based general-purpose machine learning platform implemented at Google',
175 long_description=_LONG_DESCRIPTION,
176 long_description_content_type='text/markdown',
177 keywords='tensorflow tfx',
178 url='https://www.tensorflow.org/tfx',
179 download_url='https://github.com/tensorflow/tfx/tags',
180 requires=[],
181 # Below console_scripts, each line identifies one console script. The first
182 # part before the equals sign (=) which is 'tfx', is the name of the script
183 # that should be generated, the second part is the import path followed by a
184 # colon (:) with the Click command group. After installation, the user can
185 # invoke the CLI using "tfx <command_group> <sub_command> <flags>"
186 entry_points="""
187 [console_scripts]
188 tfx=tfx.tools.cli.cli_main:cli_group
189 """)
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -108,7 +108,7 @@
def run(self):
subprocess.check_call(
- [self._bazel_cmd, 'run', '//tfx/build:gen_proto'],
+ [self._bazel_cmd, 'run', '//build:gen_proto'],
# Bazel should be invoked in a directory containing bazel WORKSPACE
# file, which is the root directory.
cwd=os.path.dirname(os.path.realpath(__file__)),)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -108,7 +108,7 @@\n \n def run(self):\n subprocess.check_call(\n- [self._bazel_cmd, 'run', '//tfx/build:gen_proto'],\n+ [self._bazel_cmd, 'run', '//build:gen_proto'],\n # Bazel should be invoked in a directory containing bazel WORKSPACE\n # file, which is the root directory.\n cwd=os.path.dirname(os.path.realpath(__file__)),)\n", "issue": "Project can't be cloned correctly on macOS due to case insensitivity\nUnder the `tfx` folder there's a folder called `build` and a bazel file called `BUILD`. Because macOS is by default case insensitive, only the folder is cloned when `git clone` is run. This means that when trying to build locally, bazel won't be able to find the `BUILD` file required to compile the protobuf schemas, and will fail.\n", "before_files": [{"content": "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Package Setup script for TFX.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\n\nimport setuptools\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command import develop\n# pylint: disable=g-bad-import-order\n# It is recommended to import setuptools prior to importing distutils to avoid\n# using legacy behavior from distutils.\n# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0\nfrom distutils import spawn\nfrom distutils.command import build\n# pylint: enable=g-bad-import-order\n\nfrom tfx import dependencies\nfrom tfx import version\nfrom tfx.tools import resolve_deps\n\n\nclass _BuildCommand(build.build):\n \"\"\"Build everything that is needed to install.\n\n This overrides the original distutils \"build\" command to to run gen_proto\n command before any sub_commands.\n\n build command is also invoked from bdist_wheel and install command, therefore\n this implementation covers the following commands:\n - pip install . (which invokes bdist_wheel)\n - python setup.py install (which invokes install command)\n - python setup.py bdist_wheel (which invokes bdist_wheel command)\n \"\"\"\n\n def _should_generate_proto(self):\n \"\"\"Predicate method for running GenProto command or not.\"\"\"\n return True\n\n # Add \"gen_proto\" command as the first sub_command of \"build\". Each\n # sub_command of \"build\" (e.g. \"build_py\", \"build_ext\", etc.) is executed\n # sequentially when running a \"build\" command, if the second item in the tuple\n # (predicate method) is evaluated to true.\n sub_commands = [\n ('gen_proto', _should_generate_proto),\n ] + build.build.sub_commands\n\n\nclass _DevelopCommand(develop.develop):\n \"\"\"Developmental install.\n\n https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode\n Unlike normal package installation where distribution is copied to the\n site-packages folder, developmental install creates a symbolic link to the\n source code directory, so that your local code change is immediately visible\n in runtime without re-installation.\n\n This is a setuptools-only (i.e. not included in distutils) command that is\n also used in pip's editable install (pip install -e). Originally it only\n invokes build_py and install_lib command, but we override it to run gen_proto\n command in advance.\n\n This implementation covers the following commands:\n - pip install -e . (developmental install)\n - python setup.py develop (which is invoked from developmental install)\n \"\"\"\n\n def run(self):\n self.run_command('gen_proto')\n # Run super().initialize_options. Command is an old-style class (i.e.\n # doesn't inherit object) and super() fails in python 2.\n develop.develop.run(self)\n\n\nclass _GenProtoCommand(setuptools.Command):\n \"\"\"Generate proto stub files in python.\n\n Running this command will populate foo_pb2.py file next to your foo.proto\n file.\n \"\"\"\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n self._bazel_cmd = spawn.find_executable('bazel')\n if not self._bazel_cmd:\n raise RuntimeError(\n 'Could not find \"bazel\" binary. Please visit '\n 'https://docs.bazel.build/versions/master/install.html for '\n 'installation instruction.')\n\n def run(self):\n subprocess.check_call(\n [self._bazel_cmd, 'run', '//tfx/build:gen_proto'],\n # Bazel should be invoked in a directory containing bazel WORKSPACE\n # file, which is the root directory.\n cwd=os.path.dirname(os.path.realpath(__file__)),)\n\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n\nsetup(\n name='tfx',\n version=version.__version__,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n namespace_packages=[],\n install_requires=dependencies.make_required_install_packages(),\n extras_require={\n # In order to use 'docker-image' or 'all', system libraries specified\n # under 'tfx/tools/docker/Dockerfile' are required\n 'docker-image': dependencies.make_extra_packages_docker_image(),\n 'tfjs': dependencies.make_extra_packages_tfjs(),\n 'all': dependencies.make_all_dependency_packages(),\n },\n # TODO(b/158761800): Move to [build-system] requires in pyproject.toml.\n setup_requires=[\n 'pytest-runner',\n 'poetry==1.0.9', # Required for ResolveDeps command.\n # Poetry API is not officially documented and subject\n # to change in the future. Thus fix the version.\n 'clikit>=0.4.3,<0.5', # Required for ResolveDeps command.\n ],\n cmdclass={\n 'build': _BuildCommand,\n 'develop': _DevelopCommand,\n 'gen_proto': _GenProtoCommand,\n 'resolve_deps': resolve_deps.ResolveDepsCommand,\n },\n python_requires='>=3.5,<4',\n packages=find_packages(),\n include_package_data=True,\n description='TensorFlow Extended (TFX) is a TensorFlow-based general-purpose machine learning platform implemented at Google',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n keywords='tensorflow tfx',\n url='https://www.tensorflow.org/tfx',\n download_url='https://github.com/tensorflow/tfx/tags',\n requires=[],\n # Below console_scripts, each line identifies one console script. The first\n # part before the equals sign (=) which is 'tfx', is the name of the script\n # that should be generated, the second part is the import path followed by a\n # colon (:) with the Click command group. After installation, the user can\n # invoke the CLI using \"tfx <command_group> <sub_command> <flags>\"\n entry_points=\"\"\"\n [console_scripts]\n tfx=tfx.tools.cli.cli_main:cli_group\n \"\"\")\n", "path": "setup.py"}], "after_files": [{"content": "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Package Setup script for TFX.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\n\nimport setuptools\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command import develop\n# pylint: disable=g-bad-import-order\n# It is recommended to import setuptools prior to importing distutils to avoid\n# using legacy behavior from distutils.\n# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0\nfrom distutils import spawn\nfrom distutils.command import build\n# pylint: enable=g-bad-import-order\n\nfrom tfx import dependencies\nfrom tfx import version\nfrom tfx.tools import resolve_deps\n\n\nclass _BuildCommand(build.build):\n \"\"\"Build everything that is needed to install.\n\n This overrides the original distutils \"build\" command to to run gen_proto\n command before any sub_commands.\n\n build command is also invoked from bdist_wheel and install command, therefore\n this implementation covers the following commands:\n - pip install . (which invokes bdist_wheel)\n - python setup.py install (which invokes install command)\n - python setup.py bdist_wheel (which invokes bdist_wheel command)\n \"\"\"\n\n def _should_generate_proto(self):\n \"\"\"Predicate method for running GenProto command or not.\"\"\"\n return True\n\n # Add \"gen_proto\" command as the first sub_command of \"build\". Each\n # sub_command of \"build\" (e.g. \"build_py\", \"build_ext\", etc.) is executed\n # sequentially when running a \"build\" command, if the second item in the tuple\n # (predicate method) is evaluated to true.\n sub_commands = [\n ('gen_proto', _should_generate_proto),\n ] + build.build.sub_commands\n\n\nclass _DevelopCommand(develop.develop):\n \"\"\"Developmental install.\n\n https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode\n Unlike normal package installation where distribution is copied to the\n site-packages folder, developmental install creates a symbolic link to the\n source code directory, so that your local code change is immediately visible\n in runtime without re-installation.\n\n This is a setuptools-only (i.e. not included in distutils) command that is\n also used in pip's editable install (pip install -e). Originally it only\n invokes build_py and install_lib command, but we override it to run gen_proto\n command in advance.\n\n This implementation covers the following commands:\n - pip install -e . (developmental install)\n - python setup.py develop (which is invoked from developmental install)\n \"\"\"\n\n def run(self):\n self.run_command('gen_proto')\n # Run super().initialize_options. Command is an old-style class (i.e.\n # doesn't inherit object) and super() fails in python 2.\n develop.develop.run(self)\n\n\nclass _GenProtoCommand(setuptools.Command):\n \"\"\"Generate proto stub files in python.\n\n Running this command will populate foo_pb2.py file next to your foo.proto\n file.\n \"\"\"\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n self._bazel_cmd = spawn.find_executable('bazel')\n if not self._bazel_cmd:\n raise RuntimeError(\n 'Could not find \"bazel\" binary. Please visit '\n 'https://docs.bazel.build/versions/master/install.html for '\n 'installation instruction.')\n\n def run(self):\n subprocess.check_call(\n [self._bazel_cmd, 'run', '//build:gen_proto'],\n # Bazel should be invoked in a directory containing bazel WORKSPACE\n # file, which is the root directory.\n cwd=os.path.dirname(os.path.realpath(__file__)),)\n\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n\nsetup(\n name='tfx',\n version=version.__version__,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n namespace_packages=[],\n install_requires=dependencies.make_required_install_packages(),\n extras_require={\n # In order to use 'docker-image' or 'all', system libraries specified\n # under 'tfx/tools/docker/Dockerfile' are required\n 'docker-image': dependencies.make_extra_packages_docker_image(),\n 'tfjs': dependencies.make_extra_packages_tfjs(),\n 'all': dependencies.make_all_dependency_packages(),\n },\n # TODO(b/158761800): Move to [build-system] requires in pyproject.toml.\n setup_requires=[\n 'pytest-runner',\n 'poetry==1.0.9', # Required for ResolveDeps command.\n # Poetry API is not officially documented and subject\n # to change in the future. Thus fix the version.\n 'clikit>=0.4.3,<0.5', # Required for ResolveDeps command.\n ],\n cmdclass={\n 'build': _BuildCommand,\n 'develop': _DevelopCommand,\n 'gen_proto': _GenProtoCommand,\n 'resolve_deps': resolve_deps.ResolveDepsCommand,\n },\n python_requires='>=3.5,<4',\n packages=find_packages(),\n include_package_data=True,\n description='TensorFlow Extended (TFX) is a TensorFlow-based general-purpose machine learning platform implemented at Google',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n keywords='tensorflow tfx',\n url='https://www.tensorflow.org/tfx',\n download_url='https://github.com/tensorflow/tfx/tags',\n requires=[],\n # Below console_scripts, each line identifies one console script. The first\n # part before the equals sign (=) which is 'tfx', is the name of the script\n # that should be generated, the second part is the import path followed by a\n # colon (:) with the Click command group. After installation, the user can\n # invoke the CLI using \"tfx <command_group> <sub_command> <flags>\"\n entry_points=\"\"\"\n [console_scripts]\n tfx=tfx.tools.cli.cli_main:cli_group\n \"\"\")\n", "path": "setup.py"}]} |
gh_patches_debug_1415 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3604 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Organisation report shown in project reports page
The "Project overview" report is displayed on the project report page, which is an organisation report and should not be displayed on the project report page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/report.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db.models import Q
8 from django.shortcuts import get_object_or_404
9 from rest_framework import status
10 from rest_framework.decorators import api_view
11 from rest_framework.response import Response
12
13 from akvo.rsr.models import Report, ReportFormat, Project
14 from ..serializers import ReportSerializer, ReportFormatSerializer
15 from ..viewsets import BaseRSRViewSet
16
17
18 class ReportViewSet(BaseRSRViewSet):
19 """Viewset providing Result data."""
20
21 queryset = Report.objects.prefetch_related(
22 'organisations',
23 'formats',
24 )
25 serializer_class = ReportSerializer
26
27 def get_queryset(self):
28 """
29 Allow custom filter for sync_owner, since this field has been replaced by the
30 reporting org partnership.
31 """
32 reports = super(ReportViewSet, self).get_queryset()
33 user = self.request.user
34 is_admin = user.is_active and (user.is_superuser or user.is_admin)
35 if not is_admin:
36 # Show only those reports that the user is allowed to see
37 approved_orgs = user.approved_organisations() if not user.is_anonymous() else []
38 reports = reports.filter(
39 Q(organisations=None) | Q(organisations__in=approved_orgs)
40 ).distinct()
41 return reports
42
43
44 @api_view(['GET'])
45 def report_formats(request):
46 """
47 A view for displaying all report format information.
48 """
49 return Response({
50 'count': ReportFormat.objects.all().count(),
51 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],
52 })
53
54
55 @api_view(['GET'])
56 def project_reports(request, project_pk):
57 """A view for displaying project specific reports."""
58
59 project = get_object_or_404(Project, pk=project_pk)
60 reports = Report.objects.prefetch_related('formats', 'organisations')\
61 .filter(url__icontains='project')
62
63 user = request.user
64 if not user.has_perm('rsr.view_project', project):
65 return Response('Request not allowed', status=status.HTTP_403_FORBIDDEN)
66
67 is_admin = user.is_active and (user.is_superuser or user.is_admin)
68
69 if not is_admin:
70 partners_org = project.partner_organisation_pks()
71 reports = reports.filter(
72 Q(organisations=None) | Q(organisations__in=partners_org)
73 )
74
75 serializer = ReportSerializer(reports.distinct(), many=True)
76 return Response(serializer.data)
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py
--- a/akvo/rest/views/report.py
+++ b/akvo/rest/views/report.py
@@ -58,7 +58,7 @@
project = get_object_or_404(Project, pk=project_pk)
reports = Report.objects.prefetch_related('formats', 'organisations')\
- .filter(url__icontains='project')
+ .filter(url__icontains='{project}')
user = request.user
if not user.has_perm('rsr.view_project', project):
| {"golden_diff": "diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py\n--- a/akvo/rest/views/report.py\n+++ b/akvo/rest/views/report.py\n@@ -58,7 +58,7 @@\n \n project = get_object_or_404(Project, pk=project_pk)\n reports = Report.objects.prefetch_related('formats', 'organisations')\\\n- .filter(url__icontains='project')\n+ .filter(url__icontains='{project}')\n \n user = request.user\n if not user.has_perm('rsr.view_project', project):\n", "issue": "Organisation report shown in project reports page\nThe \"Project overview\" report is displayed on the project report page, which is an organisation report and should not be displayed on the project report page.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat, Project\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ReportViewSet(BaseRSRViewSet):\n \"\"\"Viewset providing Result data.\"\"\"\n\n queryset = Report.objects.prefetch_related(\n 'organisations',\n 'formats',\n )\n serializer_class = ReportSerializer\n\n def get_queryset(self):\n \"\"\"\n Allow custom filter for sync_owner, since this field has been replaced by the\n reporting org partnership.\n \"\"\"\n reports = super(ReportViewSet, self).get_queryset()\n user = self.request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if not is_admin:\n # Show only those reports that the user is allowed to see\n approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n return reports\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n\n\n@api_view(['GET'])\ndef project_reports(request, project_pk):\n \"\"\"A view for displaying project specific reports.\"\"\"\n\n project = get_object_or_404(Project, pk=project_pk)\n reports = Report.objects.prefetch_related('formats', 'organisations')\\\n .filter(url__icontains='project')\n\n user = request.user\n if not user.has_perm('rsr.view_project', project):\n return Response('Request not allowed', status=status.HTTP_403_FORBIDDEN)\n\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n\n if not is_admin:\n partners_org = project.partner_organisation_pks()\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=partners_org)\n )\n\n serializer = ReportSerializer(reports.distinct(), many=True)\n return Response(serializer.data)\n", "path": "akvo/rest/views/report.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat, Project\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ReportViewSet(BaseRSRViewSet):\n \"\"\"Viewset providing Result data.\"\"\"\n\n queryset = Report.objects.prefetch_related(\n 'organisations',\n 'formats',\n )\n serializer_class = ReportSerializer\n\n def get_queryset(self):\n \"\"\"\n Allow custom filter for sync_owner, since this field has been replaced by the\n reporting org partnership.\n \"\"\"\n reports = super(ReportViewSet, self).get_queryset()\n user = self.request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if not is_admin:\n # Show only those reports that the user is allowed to see\n approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n return reports\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n\n\n@api_view(['GET'])\ndef project_reports(request, project_pk):\n \"\"\"A view for displaying project specific reports.\"\"\"\n\n project = get_object_or_404(Project, pk=project_pk)\n reports = Report.objects.prefetch_related('formats', 'organisations')\\\n .filter(url__icontains='{project}')\n\n user = request.user\n if not user.has_perm('rsr.view_project', project):\n return Response('Request not allowed', status=status.HTTP_403_FORBIDDEN)\n\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n\n if not is_admin:\n partners_org = project.partner_organisation_pks()\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=partners_org)\n )\n\n serializer = ReportSerializer(reports.distinct(), many=True)\n return Response(serializer.data)\n", "path": "akvo/rest/views/report.py"}]} |
gh_patches_debug_1416 | rasdani/github-patches | git_diff | numba__numba-3241 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ellipsis indexing and assignment not working when needed number of `:` is zero
I was trying to index using `...` on both sides of assignment so I could use the same function for inputs with 1, 2, or N dimensions.
Here is code for the MCVE (using `numba` 0.39.0):
```python
import numpy as np
import numba
def func(A, B, indices):
rv = A.copy()
for i in range(indices.size):
index = indices[i]
rv[..., index] = B[..., index]
return rv
jitted = numba.njit(func)
A = np.ones((3, 5))
B = 2 * np.ones((3, 5))
indices = np.array([0, 2])
jitted(A[0], B[0], indices) # <-- raises. traceback below
```
Let's compare results using `numpy` and `numba`:
```python
In [2]: func(A, B, indices)
Out[2]:
array([[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.]])
In [3]: func(A[0], B[0], indices)
Out[3]: array([2., 1., 2., 1., 1.])
In [4]: jitted(A, B, indices)
Out[4]:
array([[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.]])
In [5]: jitted(A[0], B[0], indices) # <-- raises. traceback below
```
Traceback:
<details>
```python-traceback
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)
674 try:
--> 675 impl = self._casts.find((fromty, toty))
676 return impl(self, builder, fromty, toty, val)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in find(self, sig)
47 if out is None:
---> 48 out = self._find(sig)
49 self._cache[sig] = out
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in _find(self, sig)
56 else:
---> 57 raise NotImplementedError(self, sig)
58
NotImplementedError: (<numba.targets.base.OverloadSelector object at 0x1059269e8>, (array(float64, 0d, C), float64))
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
576 try:
--> 577 yield
578 except NumbaError as e:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
253 loc=self.loc, errcls_=defaulterrcls):
--> 254 self.lower_inst(inst)
255
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_inst(self, inst)
357 assert signature is not None
--> 358 return self.lower_setitem(inst.target, inst.index, inst.value, signature)
359
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_setitem(self, target_var, index_var, value_var, signature)
429
--> 430 return impl(self.builder, (target, index, value))
431
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in __call__(self, builder, args)
1078 def __call__(self, builder, args):
-> 1079 return self._imp(self._context, builder, self._sig, args)
1080
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/arrayobj.py in setitem_array(context, builder, sig, args)
481 # Store source value the given location
--> 482 val = context.cast(builder, val, valty, aryty.dtype)
483 store_item(context, builder, aryty, val, dataptr)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)
678 raise NotImplementedError(
--> 679 "Cannot cast %s to %s: %s" % (fromty, toty, val))
680
NotImplementedError: Cannot cast array(float64, 0d, C) to float64: %".417" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %"$22.9"
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-6-e6ce0775290a> in <module>()
----> 1 jitted(A[0], B[0], indices)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
366 e.patch_message(''.join(e.args) + help_msg)
367 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 368 raise e
369
370 def inspect_llvm(self, signature=None):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
323 argtypes.append(self.typeof_pyval(a))
324 try:
--> 325 return self.compile(tuple(argtypes))
326 except errors.TypingError as e:
327 # Intercept typing error that may be due to an argument
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, sig)
651
652 self._cache_misses[sig] += 1
--> 653 cres = self._compiler.compile(args, return_type)
654 self.add_overload(cres)
655 self._cache.save_overload(sig, cres)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, args, return_type)
81 args=args, return_type=return_type,
82 flags=flags, locals=self.locals,
---> 83 pipeline_class=self.pipeline_class)
84 # Check typing error if object mode is used
85 if cres.typing_error is not None and not flags.enable_pyobject:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
871 pipeline = pipeline_class(typingctx, targetctx, library,
872 args, return_type, flags, locals)
--> 873 return pipeline.compile_extra(func)
874
875
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(self, func)
365 self.lifted = ()
366 self.lifted_from = None
--> 367 return self._compile_bytecode()
368
369 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_bytecode(self)
802 """
803 assert self.func_ir is None
--> 804 return self._compile_core()
805
806 def _compile_ir(self):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_core(self)
789 self.define_pipelines(pm)
790 pm.finalize()
--> 791 res = pm.run(self.status)
792 if res is not None:
793 # Early pipeline completion
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
251 # No more fallback pipelines?
252 if is_final_pipeline:
--> 253 raise patched_exception
254 # Go to next fallback pipeline
255 else:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
243 try:
244 event(stage_name)
--> 245 stage()
246 except _EarlyPipelineCompletion as e:
247 return e.result
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in stage_nopython_backend(self)
676 """
677 lowerfn = self.backend_nopython_mode
--> 678 self._backend(lowerfn, objectmode=False)
679
680 def stage_compile_interp_mode(self):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
626 self.library.enable_object_caching()
627
--> 628 lowered = lowerfn()
629 signature = typing.signature(self.return_type, *self.args)
630 self.cr = compile_result(
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in backend_nopython_mode(self)
613 self.return_type,
614 self.calltypes,
--> 615 self.flags)
616
617 def _backend(self, lowerfn, objectmode):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags)
990
991 lower = lowering.Lower(targetctx, library, fndesc, interp)
--> 992 lower.lower()
993 if not flags.no_cpython_wrapper:
994 lower.create_cpython_wrapper(flags.release_gil)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower(self)
171 if self.generator_info is None:
172 self.genlower = None
--> 173 self.lower_normal_function(self.fndesc)
174 else:
175 self.genlower = self.GeneratorLower(self)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
212 # Init argument values
213 self.extract_function_arguments()
--> 214 entry_block_tail = self.lower_function_body()
215
216 # Close tail of entry block
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_function_body(self)
237 bb = self.blkmap[offset]
238 self.builder.position_at_end(bb)
--> 239 self.lower_block(block)
240
241 self.post_lower()
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
252 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
253 loc=self.loc, errcls_=defaulterrcls):
--> 254 self.lower_inst(inst)
255
256 def create_cpython_wrapper(self, release_gil=False):
~/miniconda3/envs/numba3/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
583 from numba import config
584 tb = sys.exc_info()[2] if config.FULL_TRACEBACKS else None
--> 585 six.reraise(type(newerr), newerr, tb)
586
587
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
657 if value.__traceback__ is not tb:
658 raise value.with_traceback(tb)
--> 659 raise value
660
661 else:
LoweringError: Failed at nopython (nopython mode backend)
Cannot cast array(float64, 0d, C) to float64: %".417" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %"$22.9"
File "<ipython-input-1-f6cc8d5fb861>", line 8:
def func(A, B, indices):
<source elided>
index = indices[i]
rv[..., index] = B[..., index]
^
[1] During: lowering "rv[$22.13] = $22.9" at <ipython-input-1-f6cc8d5fb861> (8)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/typing/arraydecl.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 import numpy as np
4
5 from collections import namedtuple
6
7 from numba import types, utils
8 from numba.typing.templates import (AttributeTemplate, AbstractTemplate,
9 infer, infer_getattr, signature,
10 bound_function)
11 # import time side effect: array operations requires typing support of sequence
12 # defined in collections: e.g. array.shape[i]
13 from numba.typing import collections
14 from numba.errors import TypingError
15
16 Indexing = namedtuple("Indexing", ("index", "result", "advanced"))
17
18
19 def get_array_index_type(ary, idx):
20 """
21 Returns None or a tuple-3 for the types of the input array, index, and
22 resulting type of ``array[index]``.
23
24 Note: This is shared logic for ndarray getitem and setitem.
25 """
26 if not isinstance(ary, types.Buffer):
27 return
28
29 ndim = ary.ndim
30
31 left_indices = []
32 right_indices = []
33 ellipsis_met = False
34 advanced = False
35 has_integer = False
36
37 if not isinstance(idx, types.BaseTuple):
38 idx = [idx]
39
40 # Walk indices
41 for ty in idx:
42 if ty is types.ellipsis:
43 if ellipsis_met:
44 raise TypeError("only one ellipsis allowed in array index "
45 "(got %s)" % (idx,))
46 ellipsis_met = True
47 elif isinstance(ty, types.SliceType):
48 pass
49 elif isinstance(ty, types.Integer):
50 # Normalize integer index
51 ty = types.intp if ty.signed else types.uintp
52 # Integer indexing removes the given dimension
53 ndim -= 1
54 has_integer = True
55 elif (isinstance(ty, types.Array) and ty.ndim == 0
56 and isinstance(ty.dtype, types.Integer)):
57 # 0-d array used as integer index
58 ndim -= 1
59 has_integer = True
60 elif (isinstance(ty, types.Array)
61 and ty.ndim == 1
62 and isinstance(ty.dtype, (types.Integer, types.Boolean))):
63 if advanced or has_integer:
64 # We don't support the complicated combination of
65 # advanced indices (and integers are considered part
66 # of them by Numpy).
67 raise NotImplementedError("only one advanced index supported")
68 advanced = True
69 else:
70 raise TypeError("unsupported array index type %s in %s"
71 % (ty, idx))
72 (right_indices if ellipsis_met else left_indices).append(ty)
73
74 # Only Numpy arrays support advanced indexing
75 if advanced and not isinstance(ary, types.Array):
76 return
77
78 # Check indices and result dimensionality
79 all_indices = left_indices + right_indices
80 if ellipsis_met:
81 assert right_indices[0] is types.ellipsis
82 del right_indices[0]
83
84 n_indices = len(all_indices) - ellipsis_met
85 if n_indices > ary.ndim:
86 raise TypeError("cannot index %s with %d indices: %s"
87 % (ary, n_indices, idx))
88 if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:
89 # Full integer indexing => scalar result
90 # (note if ellipsis is present, a 0-d view is returned instead)
91 res = ary.dtype
92
93 elif advanced:
94 # Result is a copy
95 res = ary.copy(ndim=ndim, layout='C', readonly=False)
96
97 else:
98 # Result is a view
99 if ary.slice_is_copy:
100 # Avoid view semantics when the original type creates a copy
101 # when slicing.
102 return
103
104 # Infer layout
105 layout = ary.layout
106
107 def keeps_contiguity(ty, is_innermost):
108 # A slice can only keep an array contiguous if it is the
109 # innermost index and it is not strided
110 return (ty is types.ellipsis or isinstance(ty, types.Integer)
111 or (is_innermost and isinstance(ty, types.SliceType)
112 and not ty.has_step))
113
114 def check_contiguity(outer_indices):
115 """
116 Whether indexing with the given indices (from outer to inner in
117 physical layout order) can keep an array contiguous.
118 """
119 for ty in outer_indices[:-1]:
120 if not keeps_contiguity(ty, False):
121 return False
122 if outer_indices and not keeps_contiguity(outer_indices[-1], True):
123 return False
124 return True
125
126 if layout == 'C':
127 # Integer indexing on the left keeps the array C-contiguous
128 if n_indices == ary.ndim:
129 # If all indices are there, ellipsis's place is indifferent
130 left_indices = left_indices + right_indices
131 right_indices = []
132 if right_indices:
133 layout = 'A'
134 elif not check_contiguity(left_indices):
135 layout = 'A'
136 elif layout == 'F':
137 # Integer indexing on the right keeps the array F-contiguous
138 if n_indices == ary.ndim:
139 # If all indices are there, ellipsis's place is indifferent
140 right_indices = left_indices + right_indices
141 left_indices = []
142 if left_indices:
143 layout = 'A'
144 elif not check_contiguity(right_indices[::-1]):
145 layout = 'A'
146
147 res = ary.copy(ndim=ndim, layout=layout)
148
149 # Re-wrap indices
150 if isinstance(idx, types.BaseTuple):
151 idx = types.BaseTuple.from_types(all_indices)
152 else:
153 idx, = all_indices
154
155 return Indexing(idx, res, advanced)
156
157
158 @infer
159 class GetItemBuffer(AbstractTemplate):
160 key = "getitem"
161
162 def generic(self, args, kws):
163 assert not kws
164 [ary, idx] = args
165 out = get_array_index_type(ary, idx)
166 if out is not None:
167 return signature(out.result, ary, out.index)
168
169 @infer
170 class SetItemBuffer(AbstractTemplate):
171 key = "setitem"
172
173 def generic(self, args, kws):
174 assert not kws
175 ary, idx, val = args
176 if not isinstance(ary, types.Buffer):
177 return
178 if not ary.mutable:
179 raise TypeError("Cannot modify value of type %s" %(ary,))
180 out = get_array_index_type(ary, idx)
181 if out is None:
182 return
183
184 idx = out.index
185 res = out.result
186 if isinstance(res, types.Array):
187 # Indexing produces an array
188 if isinstance(val, types.Array):
189 if not self.context.can_convert(val.dtype, res.dtype):
190 # DType conversion not possible
191 return
192 else:
193 res = val
194 elif isinstance(val, types.Sequence):
195 if (res.ndim == 1 and
196 self.context.can_convert(val.dtype, res.dtype)):
197 # Allow assignement of sequence to 1d array
198 res = val
199 else:
200 # NOTE: sequence-to-array broadcasting is unsupported
201 return
202 else:
203 # Allow scalar broadcasting
204 if self.context.can_convert(val, res.dtype):
205 res = res.dtype
206 else:
207 # Incompatible scalar type
208 return
209 elif not isinstance(val, types.Array):
210 # Single item assignment
211 if not self.context.can_convert(val, res):
212 # if the array dtype is not yet defined
213 if not res.is_precise():
214 # set the array type to use the dtype of value (RHS)
215 newary = ary.copy(dtype=val)
216 return signature(types.none, newary, idx, res)
217 else:
218 return
219 res = val
220 else:
221 return
222 return signature(types.none, ary, idx, res)
223
224
225 def normalize_shape(shape):
226 if isinstance(shape, types.UniTuple):
227 if isinstance(shape.dtype, types.Integer):
228 dimtype = types.intp if shape.dtype.signed else types.uintp
229 return types.UniTuple(dimtype, len(shape))
230
231 elif isinstance(shape, types.Tuple) and shape.count == 0:
232 # Force (0 x intp) for consistency with other shapes
233 return types.UniTuple(types.intp, 0)
234
235
236 @infer_getattr
237 class ArrayAttribute(AttributeTemplate):
238 key = types.Array
239
240 def resolve_dtype(self, ary):
241 return types.DType(ary.dtype)
242
243 def resolve_itemsize(self, ary):
244 return types.intp
245
246 def resolve_shape(self, ary):
247 return types.UniTuple(types.intp, ary.ndim)
248
249 def resolve_strides(self, ary):
250 return types.UniTuple(types.intp, ary.ndim)
251
252 def resolve_ndim(self, ary):
253 return types.intp
254
255 def resolve_size(self, ary):
256 return types.intp
257
258 def resolve_flat(self, ary):
259 return types.NumpyFlatType(ary)
260
261 def resolve_ctypes(self, ary):
262 return types.ArrayCTypes(ary)
263
264 def resolve_flags(self, ary):
265 return types.ArrayFlags(ary)
266
267 def resolve_T(self, ary):
268 if ary.ndim <= 1:
269 retty = ary
270 else:
271 layout = {"C": "F", "F": "C"}.get(ary.layout, "A")
272 retty = ary.copy(layout=layout)
273 return retty
274
275 def resolve_real(self, ary):
276 return self._resolve_real_imag(ary, attr='real')
277
278 def resolve_imag(self, ary):
279 return self._resolve_real_imag(ary, attr='imag')
280
281 def _resolve_real_imag(self, ary, attr):
282 if ary.dtype in types.complex_domain:
283 return ary.copy(dtype=ary.dtype.underlying_float, layout='A')
284 elif ary.dtype in types.number_domain:
285 res = ary.copy(dtype=ary.dtype)
286 if attr == 'imag':
287 res = res.copy(readonly=True)
288 return res
289 else:
290 msg = "cannot access .{} of array of {}"
291 raise TypingError(msg.format(attr, ary.dtype))
292
293 @bound_function("array.transpose")
294 def resolve_transpose(self, ary, args, kws):
295 def sentry_shape_scalar(ty):
296 if ty in types.number_domain:
297 # Guard against non integer type
298 if not isinstance(ty, types.Integer):
299 raise TypeError("transpose() arg cannot be {0}".format(ty))
300 return True
301 else:
302 return False
303
304 assert not kws
305 if len(args) == 0:
306 return signature(self.resolve_T(ary))
307
308 if len(args) == 1:
309 shape, = args
310
311 if sentry_shape_scalar(shape):
312 assert ary.ndim == 1
313 return signature(ary, *args)
314
315 shape = normalize_shape(shape)
316 if shape is None:
317 return
318
319 assert ary.ndim == shape.count
320 return signature(self.resolve_T(ary), shape)
321
322 else:
323 if any(not sentry_shape_scalar(a) for a in args):
324 raise TypeError("transpose({0}) is not supported".format(
325 ', '.join(args)))
326 assert ary.ndim == len(args)
327 return signature(self.resolve_T(ary), *args)
328
329 @bound_function("array.copy")
330 def resolve_copy(self, ary, args, kws):
331 assert not args
332 assert not kws
333 retty = ary.copy(layout="C", readonly=False)
334 return signature(retty)
335
336 @bound_function("array.item")
337 def resolve_item(self, ary, args, kws):
338 assert not kws
339 # We don't support explicit arguments as that's exactly equivalent
340 # to regular indexing. The no-argument form is interesting to
341 # allow some degree of genericity when writing functions.
342 if not args:
343 return signature(ary.dtype)
344
345 @bound_function("array.itemset")
346 def resolve_itemset(self, ary, args, kws):
347 assert not kws
348 # We don't support explicit arguments as that's exactly equivalent
349 # to regular indexing. The no-argument form is interesting to
350 # allow some degree of genericity when writing functions.
351 if len(args) == 1:
352 return signature(types.none, ary.dtype)
353
354 @bound_function("array.nonzero")
355 def resolve_nonzero(self, ary, args, kws):
356 assert not args
357 assert not kws
358 # 0-dim arrays return one result array
359 ndim = max(ary.ndim, 1)
360 retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)
361 return signature(retty)
362
363 @bound_function("array.reshape")
364 def resolve_reshape(self, ary, args, kws):
365 def sentry_shape_scalar(ty):
366 if ty in types.number_domain:
367 # Guard against non integer type
368 if not isinstance(ty, types.Integer):
369 raise TypeError("reshape() arg cannot be {0}".format(ty))
370 return True
371 else:
372 return False
373
374 assert not kws
375 if ary.layout not in 'CF':
376 # only work for contiguous array
377 raise TypeError("reshape() supports contiguous array only")
378
379 if len(args) == 1:
380 # single arg
381 shape, = args
382
383 if sentry_shape_scalar(shape):
384 ndim = 1
385 else:
386 shape = normalize_shape(shape)
387 if shape is None:
388 return
389 ndim = shape.count
390 retty = ary.copy(ndim=ndim)
391 return signature(retty, shape)
392
393 elif len(args) == 0:
394 # no arg
395 raise TypeError("reshape() take at least one arg")
396
397 else:
398 # vararg case
399 if any(not sentry_shape_scalar(a) for a in args):
400 raise TypeError("reshape({0}) is not supported".format(
401 ', '.join(args)))
402
403 retty = ary.copy(ndim=len(args))
404 return signature(retty, *args)
405
406 @bound_function("array.sort")
407 def resolve_sort(self, ary, args, kws):
408 assert not args
409 assert not kws
410 if ary.ndim == 1:
411 return signature(types.none)
412
413 @bound_function("array.argsort")
414 def resolve_argsort(self, ary, args, kws):
415 assert not args
416 kwargs = dict(kws)
417 kind = kwargs.pop('kind', types.Const('quicksort'))
418 if kwargs:
419 msg = "Unsupported keywords: {!r}"
420 raise TypingError(msg.format([k for k in kwargs.keys()]))
421 if ary.ndim == 1:
422 def argsort_stub(kind='quicksort'):
423 pass
424 pysig = utils.pysignature(argsort_stub)
425 sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)
426 return sig
427
428 @bound_function("array.view")
429 def resolve_view(self, ary, args, kws):
430 from .npydecl import _parse_dtype
431 assert not kws
432 dtype, = args
433 dtype = _parse_dtype(dtype)
434 if dtype is None:
435 return
436 retty = ary.copy(dtype=dtype)
437 return signature(retty, *args)
438
439 @bound_function("array.astype")
440 def resolve_astype(self, ary, args, kws):
441 from .npydecl import _parse_dtype
442 assert not kws
443 dtype, = args
444 dtype = _parse_dtype(dtype)
445 if dtype is None:
446 return
447 if not self.context.can_convert(ary.dtype, dtype):
448 raise TypeError("astype(%s) not supported on %s: "
449 "cannot convert from %s to %s"
450 % (dtype, ary, ary.dtype, dtype))
451 layout = ary.layout if ary.layout in 'CF' else 'C'
452 retty = ary.copy(dtype=dtype, layout=layout)
453 return signature(retty, *args)
454
455 @bound_function("array.ravel")
456 def resolve_ravel(self, ary, args, kws):
457 # Only support no argument version (default order='C')
458 assert not kws
459 assert not args
460 return signature(ary.copy(ndim=1, layout='C'))
461
462 @bound_function("array.flatten")
463 def resolve_flatten(self, ary, args, kws):
464 # Only support no argument version (default order='C')
465 assert not kws
466 assert not args
467 return signature(ary.copy(ndim=1, layout='C'))
468
469 @bound_function("array.take")
470 def resolve_take(self, ary, args, kws):
471 assert not kws
472 argty, = args
473 if isinstance(argty, types.Integer):
474 sig = signature(ary.dtype, *args)
475 elif isinstance(argty, types.Array):
476 sig = signature(argty.copy(layout='C', dtype=ary.dtype), *args)
477 elif isinstance(argty, types.List): # 1d lists only
478 sig = signature(types.Array(ary.dtype, 1, 'C'), *args)
479 elif isinstance(argty, types.BaseTuple):
480 sig = signature(types.Array(ary.dtype, np.ndim(argty), 'C'), *args)
481 else:
482 raise TypeError("take(%s) not supported for %s" % argty)
483 return sig
484
485 def generic_resolve(self, ary, attr):
486 # Resolution of other attributes, for record arrays
487 if isinstance(ary.dtype, types.Record):
488 if attr in ary.dtype.fields:
489 return ary.copy(dtype=ary.dtype.typeof(attr), layout='A')
490
491
492 @infer_getattr
493 class DTypeAttr(AttributeTemplate):
494 key = types.DType
495
496 def resolve_type(self, ary):
497 # Wrap the numeric type in NumberClass
498 return types.NumberClass(ary.dtype)
499
500 def resolve_kind(self, ary):
501 if isinstance(ary.key, types.scalars.Float):
502 val = 'f'
503 elif isinstance(ary.key, types.scalars.Integer):
504 val = 'i'
505 else:
506 return None # other types not supported yet
507 return types.Const(val)
508
509 @infer
510 class StaticGetItemArray(AbstractTemplate):
511 key = "static_getitem"
512
513 def generic(self, args, kws):
514 # Resolution of members for record and structured arrays
515 ary, idx = args
516 if (isinstance(ary, types.Array) and isinstance(idx, str) and
517 isinstance(ary.dtype, types.Record)):
518 if idx in ary.dtype.fields:
519 return ary.copy(dtype=ary.dtype.typeof(idx), layout='A')
520
521
522 @infer_getattr
523 class RecordAttribute(AttributeTemplate):
524 key = types.Record
525
526 def generic_resolve(self, record, attr):
527 ret = record.typeof(attr)
528 assert ret
529 return ret
530
531 @infer
532 class StaticGetItemRecord(AbstractTemplate):
533 key = "static_getitem"
534
535 def generic(self, args, kws):
536 # Resolution of members for records
537 record, idx = args
538 if isinstance(record, types.Record) and isinstance(idx, str):
539 ret = record.typeof(idx)
540 assert ret
541 return ret
542
543 @infer
544 class StaticSetItemRecord(AbstractTemplate):
545 key = "static_setitem"
546
547 def generic(self, args, kws):
548 # Resolution of members for record and structured arrays
549 record, idx, value = args
550 if isinstance(record, types.Record) and isinstance(idx, str):
551 expectedty = record.typeof(idx)
552 if self.context.can_convert(value, expectedty) is not None:
553 return signature(types.void, record, types.Const(idx), value)
554
555
556 @infer_getattr
557 class ArrayCTypesAttribute(AttributeTemplate):
558 key = types.ArrayCTypes
559
560 def resolve_data(self, ctinfo):
561 return types.uintp
562
563
564 @infer_getattr
565 class ArrayFlagsAttribute(AttributeTemplate):
566 key = types.ArrayFlags
567
568 def resolve_contiguous(self, ctflags):
569 return types.boolean
570
571 def resolve_c_contiguous(self, ctflags):
572 return types.boolean
573
574 def resolve_f_contiguous(self, ctflags):
575 return types.boolean
576
577
578 @infer_getattr
579 class NestedArrayAttribute(ArrayAttribute):
580 key = types.NestedArray
581
582
583 def _expand_integer(ty):
584 """
585 If *ty* is an integer, expand it to a machine int (like Numpy).
586 """
587 if isinstance(ty, types.Integer):
588 if ty.signed:
589 return max(types.intp, ty)
590 else:
591 return max(types.uintp, ty)
592 elif isinstance(ty, types.Boolean):
593 return types.intp
594 else:
595 return ty
596
597 def generic_homog(self, args, kws):
598 assert not args
599 assert not kws
600 return signature(self.this.dtype, recvr=self.this)
601
602 def generic_expand(self, args, kws):
603 assert not args
604 assert not kws
605 return signature(_expand_integer(self.this.dtype), recvr=self.this)
606
607 def sum_expand(self, args, kws):
608 """
609 sum can be called with or without an axis parameter.
610 """
611 pysig = None
612 if kws:
613 def sum_stub(axis):
614 pass
615 pysig = utils.pysignature(sum_stub)
616 # rewrite args
617 args = list(args) + [kws['axis']]
618 kws = None
619 args_len = len(args)
620 assert args_len <= 1
621 if args_len == 0:
622 # No axis parameter so the return type of the summation is a scalar
623 # of the type of the array.
624 out = signature(_expand_integer(self.this.dtype), *args,
625 recvr=self.this)
626 else:
627 # There is an axis paramter so the return type of this summation is
628 # an array of dimension one less than the input array.
629 return_type = types.Array(dtype=_expand_integer(self.this.dtype),
630 ndim=self.this.ndim-1, layout='C')
631 out = signature(return_type, *args, recvr=self.this)
632 return out.replace(pysig=pysig)
633
634 def generic_expand_cumulative(self, args, kws):
635 assert not args
636 assert not kws
637 assert isinstance(self.this, types.Array)
638 return_type = types.Array(dtype=_expand_integer(self.this.dtype),
639 ndim=1, layout='C')
640 return signature(return_type, recvr=self.this)
641
642 def generic_hetero_real(self, args, kws):
643 assert not args
644 assert not kws
645 if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
646 return signature(types.float64, recvr=self.this)
647 return signature(self.this.dtype, recvr=self.this)
648
649 def generic_hetero_always_real(self, args, kws):
650 assert not args
651 assert not kws
652 if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
653 return signature(types.float64, recvr=self.this)
654 if isinstance(self.this.dtype, types.Complex):
655 return signature(self.this.dtype.underlying_float, recvr=self.this)
656 return signature(self.this.dtype, recvr=self.this)
657
658 def generic_index(self, args, kws):
659 assert not args
660 assert not kws
661 return signature(types.intp, recvr=self.this)
662
663 def install_array_method(name, generic, support_literals=False):
664 my_attr = {"key": "array." + name, "generic": generic}
665 temp_class = type("Array_" + name, (AbstractTemplate,), my_attr)
666 if support_literals:
667 temp_class.support_literals = support_literals
668 def array_attribute_attachment(self, ary):
669 return types.BoundFunction(temp_class, ary)
670
671 setattr(ArrayAttribute, "resolve_" + name, array_attribute_attachment)
672
673 # Functions that return the same type as the array
674 for fname in ["min", "max"]:
675 install_array_method(fname, generic_homog)
676
677 # Functions that return a machine-width type, to avoid overflows
678 install_array_method("prod", generic_expand)
679 install_array_method("sum", sum_expand, support_literals=True)
680
681 # Functions that return a machine-width type, to avoid overflows
682 for fname in ["cumsum", "cumprod"]:
683 install_array_method(fname, generic_expand_cumulative)
684
685 # Functions that require integer arrays get promoted to float64 return
686 for fName in ["mean"]:
687 install_array_method(fName, generic_hetero_real)
688
689 # var and std by definition return in real space and int arrays
690 # get promoted to float64 return
691 for fName in ["var", "std"]:
692 install_array_method(fName, generic_hetero_always_real)
693
694
695 # Functions that return an index (intp)
696 install_array_method("argmin", generic_index)
697 install_array_method("argmax", generic_index)
698
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numba/typing/arraydecl.py b/numba/typing/arraydecl.py
--- a/numba/typing/arraydecl.py
+++ b/numba/typing/arraydecl.py
@@ -144,7 +144,11 @@
elif not check_contiguity(right_indices[::-1]):
layout = 'A'
- res = ary.copy(ndim=ndim, layout=layout)
+ if ndim == 0:
+ # Implicitly convert to a scalar if the output ndim==0
+ res = ary.dtype
+ else:
+ res = ary.copy(ndim=ndim, layout=layout)
# Re-wrap indices
if isinstance(idx, types.BaseTuple):
| {"golden_diff": "diff --git a/numba/typing/arraydecl.py b/numba/typing/arraydecl.py\n--- a/numba/typing/arraydecl.py\n+++ b/numba/typing/arraydecl.py\n@@ -144,7 +144,11 @@\n elif not check_contiguity(right_indices[::-1]):\n layout = 'A'\n \n- res = ary.copy(ndim=ndim, layout=layout)\n+ if ndim == 0:\n+ # Implicitly convert to a scalar if the output ndim==0\n+ res = ary.dtype\n+ else:\n+ res = ary.copy(ndim=ndim, layout=layout)\n \n # Re-wrap indices\n if isinstance(idx, types.BaseTuple):\n", "issue": "Ellipsis indexing and assignment not working when needed number of `:` is zero\nI was trying to index using `...` on both sides of assignment so I could use the same function for inputs with 1, 2, or N dimensions.\r\n\r\nHere is code for the MCVE (using `numba` 0.39.0):\r\n```python\r\nimport numpy as np\r\nimport numba\r\n\r\ndef func(A, B, indices):\r\n rv = A.copy()\r\n for i in range(indices.size):\r\n index = indices[i]\r\n rv[..., index] = B[..., index]\r\n return rv\r\n\r\njitted = numba.njit(func)\r\n\r\nA = np.ones((3, 5))\r\nB = 2 * np.ones((3, 5))\r\nindices = np.array([0, 2])\r\n\r\njitted(A[0], B[0], indices) # <-- raises. traceback below\r\n```\r\nLet's compare results using `numpy` and `numba`:\r\n```python\r\nIn [2]: func(A, B, indices)\r\nOut[2]: \r\narray([[2., 1., 2., 1., 1.],\r\n [2., 1., 2., 1., 1.],\r\n [2., 1., 2., 1., 1.]])\r\n\r\nIn [3]: func(A[0], B[0], indices)\r\nOut[3]: array([2., 1., 2., 1., 1.])\r\n\r\nIn [4]: jitted(A, B, indices)\r\nOut[4]: \r\narray([[2., 1., 2., 1., 1.],\r\n [2., 1., 2., 1., 1.],\r\n [2., 1., 2., 1., 1.]])\r\n\r\nIn [5]: jitted(A[0], B[0], indices) # <-- raises. traceback below\r\n```\r\nTraceback:\r\n<details>\r\n\r\n```python-traceback\r\n---------------------------------------------------------------------------\r\nNotImplementedError Traceback (most recent call last)\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)\r\n 674 try:\r\n--> 675 impl = self._casts.find((fromty, toty))\r\n 676 return impl(self, builder, fromty, toty, val)\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in find(self, sig)\r\n 47 if out is None:\r\n---> 48 out = self._find(sig)\r\n 49 self._cache[sig] = out\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in _find(self, sig)\r\n 56 else:\r\n---> 57 raise NotImplementedError(self, sig)\r\n 58 \r\n\r\nNotImplementedError: (<numba.targets.base.OverloadSelector object at 0x1059269e8>, (array(float64, 0d, C), float64))\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nNotImplementedError Traceback (most recent call last)\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)\r\n 576 try:\r\n--> 577 yield\r\n 578 except NumbaError as e:\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)\r\n 253 loc=self.loc, errcls_=defaulterrcls):\r\n--> 254 self.lower_inst(inst)\r\n 255 \r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_inst(self, inst)\r\n 357 assert signature is not None\r\n--> 358 return self.lower_setitem(inst.target, inst.index, inst.value, signature)\r\n 359 \r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_setitem(self, target_var, index_var, value_var, signature)\r\n 429 \r\n--> 430 return impl(self.builder, (target, index, value))\r\n 431 \r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in __call__(self, builder, args)\r\n 1078 def __call__(self, builder, args):\r\n-> 1079 return self._imp(self._context, builder, self._sig, args)\r\n 1080 \r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/arrayobj.py in setitem_array(context, builder, sig, args)\r\n 481 # Store source value the given location\r\n--> 482 val = context.cast(builder, val, valty, aryty.dtype)\r\n 483 store_item(context, builder, aryty, val, dataptr)\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)\r\n 678 raise NotImplementedError(\r\n--> 679 \"Cannot cast %s to %s: %s\" % (fromty, toty, val))\r\n 680 \r\n\r\nNotImplementedError: Cannot cast array(float64, 0d, C) to float64: %\".417\" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %\"$22.9\"\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nLoweringError Traceback (most recent call last)\r\n<ipython-input-6-e6ce0775290a> in <module>()\r\n----> 1 jitted(A[0], B[0], indices)\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)\r\n 366 e.patch_message(''.join(e.args) + help_msg)\r\n 367 # ignore the FULL_TRACEBACKS config, this needs reporting!\r\n--> 368 raise e\r\n 369 \r\n 370 def inspect_llvm(self, signature=None):\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)\r\n 323 argtypes.append(self.typeof_pyval(a))\r\n 324 try:\r\n--> 325 return self.compile(tuple(argtypes))\r\n 326 except errors.TypingError as e:\r\n 327 # Intercept typing error that may be due to an argument\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, sig)\r\n 651 \r\n 652 self._cache_misses[sig] += 1\r\n--> 653 cres = self._compiler.compile(args, return_type)\r\n 654 self.add_overload(cres)\r\n 655 self._cache.save_overload(sig, cres)\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, args, return_type)\r\n 81 args=args, return_type=return_type,\r\n 82 flags=flags, locals=self.locals,\r\n---> 83 pipeline_class=self.pipeline_class)\r\n 84 # Check typing error if object mode is used\r\n 85 if cres.typing_error is not None and not flags.enable_pyobject:\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)\r\n 871 pipeline = pipeline_class(typingctx, targetctx, library,\r\n 872 args, return_type, flags, locals)\r\n--> 873 return pipeline.compile_extra(func)\r\n 874 \r\n 875 \r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(self, func)\r\n 365 self.lifted = ()\r\n 366 self.lifted_from = None\r\n--> 367 return self._compile_bytecode()\r\n 368 \r\n 369 def compile_ir(self, func_ir, lifted=(), lifted_from=None):\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_bytecode(self)\r\n 802 \"\"\"\r\n 803 assert self.func_ir is None\r\n--> 804 return self._compile_core()\r\n 805 \r\n 806 def _compile_ir(self):\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_core(self)\r\n 789 self.define_pipelines(pm)\r\n 790 pm.finalize()\r\n--> 791 res = pm.run(self.status)\r\n 792 if res is not None:\r\n 793 # Early pipeline completion\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)\r\n 251 # No more fallback pipelines?\r\n 252 if is_final_pipeline:\r\n--> 253 raise patched_exception\r\n 254 # Go to next fallback pipeline\r\n 255 else:\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)\r\n 243 try:\r\n 244 event(stage_name)\r\n--> 245 stage()\r\n 246 except _EarlyPipelineCompletion as e:\r\n 247 return e.result\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in stage_nopython_backend(self)\r\n 676 \"\"\"\r\n 677 lowerfn = self.backend_nopython_mode\r\n--> 678 self._backend(lowerfn, objectmode=False)\r\n 679 \r\n 680 def stage_compile_interp_mode(self):\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)\r\n 626 self.library.enable_object_caching()\r\n 627 \r\n--> 628 lowered = lowerfn()\r\n 629 signature = typing.signature(self.return_type, *self.args)\r\n 630 self.cr = compile_result(\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in backend_nopython_mode(self)\r\n 613 self.return_type,\r\n 614 self.calltypes,\r\n--> 615 self.flags)\r\n 616 \r\n 617 def _backend(self, lowerfn, objectmode):\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags)\r\n 990 \r\n 991 lower = lowering.Lower(targetctx, library, fndesc, interp)\r\n--> 992 lower.lower()\r\n 993 if not flags.no_cpython_wrapper:\r\n 994 lower.create_cpython_wrapper(flags.release_gil)\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower(self)\r\n 171 if self.generator_info is None:\r\n 172 self.genlower = None\r\n--> 173 self.lower_normal_function(self.fndesc)\r\n 174 else:\r\n 175 self.genlower = self.GeneratorLower(self)\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)\r\n 212 # Init argument values\r\n 213 self.extract_function_arguments()\r\n--> 214 entry_block_tail = self.lower_function_body()\r\n 215 \r\n 216 # Close tail of entry block\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_function_body(self)\r\n 237 bb = self.blkmap[offset]\r\n 238 self.builder.position_at_end(bb)\r\n--> 239 self.lower_block(block)\r\n 240 \r\n 241 self.post_lower()\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)\r\n 252 with new_error_context('lowering \"{inst}\" at {loc}', inst=inst,\r\n 253 loc=self.loc, errcls_=defaulterrcls):\r\n--> 254 self.lower_inst(inst)\r\n 255 \r\n 256 def create_cpython_wrapper(self, release_gil=False):\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)\r\n 128 value = type()\r\n 129 try:\r\n--> 130 self.gen.throw(type, value, traceback)\r\n 131 except StopIteration as exc:\r\n 132 # Suppress StopIteration *unless* it's the same exception that\r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)\r\n 583 from numba import config\r\n 584 tb = sys.exc_info()[2] if config.FULL_TRACEBACKS else None\r\n--> 585 six.reraise(type(newerr), newerr, tb)\r\n 586 \r\n 587 \r\n\r\n~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)\r\n 657 if value.__traceback__ is not tb:\r\n 658 raise value.with_traceback(tb)\r\n--> 659 raise value\r\n 660 \r\n 661 else:\r\n\r\nLoweringError: Failed at nopython (nopython mode backend)\r\nCannot cast array(float64, 0d, C) to float64: %\".417\" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %\"$22.9\"\r\n\r\nFile \"<ipython-input-1-f6cc8d5fb861>\", line 8:\r\ndef func(A, B, indices):\r\n <source elided>\r\n index = indices[i]\r\n rv[..., index] = B[..., index]\r\n ^\r\n[1] During: lowering \"rv[$22.13] = $22.9\" at <ipython-input-1-f6cc8d5fb861> (8)\r\n-------------------------------------------------------------------------------\r\nThis should not have happened, a problem has occurred in Numba's internals.\r\n\r\nPlease report the error message and traceback, along with a minimal reproducer\r\nat: https://github.com/numba/numba/issues/new\r\n\r\nIf more help is needed please feel free to speak to the Numba core developers\r\ndirectly at: https://gitter.im/numba/numba\r\n\r\nThanks in advance for your help in improving Numba!\r\n```\r\n\r\n</details>\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nfrom numba import types, utils\nfrom numba.typing.templates import (AttributeTemplate, AbstractTemplate,\n infer, infer_getattr, signature,\n bound_function)\n# import time side effect: array operations requires typing support of sequence\n# defined in collections: e.g. array.shape[i]\nfrom numba.typing import collections\nfrom numba.errors import TypingError\n\nIndexing = namedtuple(\"Indexing\", (\"index\", \"result\", \"advanced\"))\n\n\ndef get_array_index_type(ary, idx):\n \"\"\"\n Returns None or a tuple-3 for the types of the input array, index, and\n resulting type of ``array[index]``.\n\n Note: This is shared logic for ndarray getitem and setitem.\n \"\"\"\n if not isinstance(ary, types.Buffer):\n return\n\n ndim = ary.ndim\n\n left_indices = []\n right_indices = []\n ellipsis_met = False\n advanced = False\n has_integer = False\n\n if not isinstance(idx, types.BaseTuple):\n idx = [idx]\n\n # Walk indices\n for ty in idx:\n if ty is types.ellipsis:\n if ellipsis_met:\n raise TypeError(\"only one ellipsis allowed in array index \"\n \"(got %s)\" % (idx,))\n ellipsis_met = True\n elif isinstance(ty, types.SliceType):\n pass\n elif isinstance(ty, types.Integer):\n # Normalize integer index\n ty = types.intp if ty.signed else types.uintp\n # Integer indexing removes the given dimension\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array) and ty.ndim == 0\n and isinstance(ty.dtype, types.Integer)):\n # 0-d array used as integer index\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array)\n and ty.ndim == 1\n and isinstance(ty.dtype, (types.Integer, types.Boolean))):\n if advanced or has_integer:\n # We don't support the complicated combination of\n # advanced indices (and integers are considered part\n # of them by Numpy).\n raise NotImplementedError(\"only one advanced index supported\")\n advanced = True\n else:\n raise TypeError(\"unsupported array index type %s in %s\"\n % (ty, idx))\n (right_indices if ellipsis_met else left_indices).append(ty)\n\n # Only Numpy arrays support advanced indexing\n if advanced and not isinstance(ary, types.Array):\n return\n\n # Check indices and result dimensionality\n all_indices = left_indices + right_indices\n if ellipsis_met:\n assert right_indices[0] is types.ellipsis\n del right_indices[0]\n\n n_indices = len(all_indices) - ellipsis_met\n if n_indices > ary.ndim:\n raise TypeError(\"cannot index %s with %d indices: %s\"\n % (ary, n_indices, idx))\n if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:\n # Full integer indexing => scalar result\n # (note if ellipsis is present, a 0-d view is returned instead)\n res = ary.dtype\n\n elif advanced:\n # Result is a copy\n res = ary.copy(ndim=ndim, layout='C', readonly=False)\n\n else:\n # Result is a view\n if ary.slice_is_copy:\n # Avoid view semantics when the original type creates a copy\n # when slicing.\n return\n\n # Infer layout\n layout = ary.layout\n\n def keeps_contiguity(ty, is_innermost):\n # A slice can only keep an array contiguous if it is the\n # innermost index and it is not strided\n return (ty is types.ellipsis or isinstance(ty, types.Integer)\n or (is_innermost and isinstance(ty, types.SliceType)\n and not ty.has_step))\n\n def check_contiguity(outer_indices):\n \"\"\"\n Whether indexing with the given indices (from outer to inner in\n physical layout order) can keep an array contiguous.\n \"\"\"\n for ty in outer_indices[:-1]:\n if not keeps_contiguity(ty, False):\n return False\n if outer_indices and not keeps_contiguity(outer_indices[-1], True):\n return False\n return True\n\n if layout == 'C':\n # Integer indexing on the left keeps the array C-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n left_indices = left_indices + right_indices\n right_indices = []\n if right_indices:\n layout = 'A'\n elif not check_contiguity(left_indices):\n layout = 'A'\n elif layout == 'F':\n # Integer indexing on the right keeps the array F-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n right_indices = left_indices + right_indices\n left_indices = []\n if left_indices:\n layout = 'A'\n elif not check_contiguity(right_indices[::-1]):\n layout = 'A'\n\n res = ary.copy(ndim=ndim, layout=layout)\n\n # Re-wrap indices\n if isinstance(idx, types.BaseTuple):\n idx = types.BaseTuple.from_types(all_indices)\n else:\n idx, = all_indices\n\n return Indexing(idx, res, advanced)\n\n\n@infer\nclass GetItemBuffer(AbstractTemplate):\n key = \"getitem\"\n\n def generic(self, args, kws):\n assert not kws\n [ary, idx] = args\n out = get_array_index_type(ary, idx)\n if out is not None:\n return signature(out.result, ary, out.index)\n\n@infer\nclass SetItemBuffer(AbstractTemplate):\n key = \"setitem\"\n\n def generic(self, args, kws):\n assert not kws\n ary, idx, val = args\n if not isinstance(ary, types.Buffer):\n return\n if not ary.mutable:\n raise TypeError(\"Cannot modify value of type %s\" %(ary,))\n out = get_array_index_type(ary, idx)\n if out is None:\n return\n\n idx = out.index\n res = out.result\n if isinstance(res, types.Array):\n # Indexing produces an array\n if isinstance(val, types.Array):\n if not self.context.can_convert(val.dtype, res.dtype):\n # DType conversion not possible\n return\n else:\n res = val\n elif isinstance(val, types.Sequence):\n if (res.ndim == 1 and\n self.context.can_convert(val.dtype, res.dtype)):\n # Allow assignement of sequence to 1d array\n res = val\n else:\n # NOTE: sequence-to-array broadcasting is unsupported\n return\n else:\n # Allow scalar broadcasting\n if self.context.can_convert(val, res.dtype):\n res = res.dtype\n else:\n # Incompatible scalar type\n return\n elif not isinstance(val, types.Array):\n # Single item assignment\n if not self.context.can_convert(val, res):\n # if the array dtype is not yet defined\n if not res.is_precise():\n # set the array type to use the dtype of value (RHS)\n newary = ary.copy(dtype=val)\n return signature(types.none, newary, idx, res)\n else:\n return\n res = val\n else:\n return\n return signature(types.none, ary, idx, res)\n\n\ndef normalize_shape(shape):\n if isinstance(shape, types.UniTuple):\n if isinstance(shape.dtype, types.Integer):\n dimtype = types.intp if shape.dtype.signed else types.uintp\n return types.UniTuple(dimtype, len(shape))\n\n elif isinstance(shape, types.Tuple) and shape.count == 0:\n # Force (0 x intp) for consistency with other shapes\n return types.UniTuple(types.intp, 0)\n\n\n@infer_getattr\nclass ArrayAttribute(AttributeTemplate):\n key = types.Array\n\n def resolve_dtype(self, ary):\n return types.DType(ary.dtype)\n\n def resolve_itemsize(self, ary):\n return types.intp\n\n def resolve_shape(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_strides(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_ndim(self, ary):\n return types.intp\n\n def resolve_size(self, ary):\n return types.intp\n\n def resolve_flat(self, ary):\n return types.NumpyFlatType(ary)\n\n def resolve_ctypes(self, ary):\n return types.ArrayCTypes(ary)\n\n def resolve_flags(self, ary):\n return types.ArrayFlags(ary)\n\n def resolve_T(self, ary):\n if ary.ndim <= 1:\n retty = ary\n else:\n layout = {\"C\": \"F\", \"F\": \"C\"}.get(ary.layout, \"A\")\n retty = ary.copy(layout=layout)\n return retty\n\n def resolve_real(self, ary):\n return self._resolve_real_imag(ary, attr='real')\n\n def resolve_imag(self, ary):\n return self._resolve_real_imag(ary, attr='imag')\n\n def _resolve_real_imag(self, ary, attr):\n if ary.dtype in types.complex_domain:\n return ary.copy(dtype=ary.dtype.underlying_float, layout='A')\n elif ary.dtype in types.number_domain:\n res = ary.copy(dtype=ary.dtype)\n if attr == 'imag':\n res = res.copy(readonly=True)\n return res\n else:\n msg = \"cannot access .{} of array of {}\"\n raise TypingError(msg.format(attr, ary.dtype))\n\n @bound_function(\"array.transpose\")\n def resolve_transpose(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"transpose() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if len(args) == 0:\n return signature(self.resolve_T(ary))\n\n if len(args) == 1:\n shape, = args\n\n if sentry_shape_scalar(shape):\n assert ary.ndim == 1\n return signature(ary, *args)\n\n shape = normalize_shape(shape)\n if shape is None:\n return\n\n assert ary.ndim == shape.count\n return signature(self.resolve_T(ary), shape)\n\n else:\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"transpose({0}) is not supported\".format(\n ', '.join(args)))\n assert ary.ndim == len(args)\n return signature(self.resolve_T(ary), *args)\n\n @bound_function(\"array.copy\")\n def resolve_copy(self, ary, args, kws):\n assert not args\n assert not kws\n retty = ary.copy(layout=\"C\", readonly=False)\n return signature(retty)\n\n @bound_function(\"array.item\")\n def resolve_item(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if not args:\n return signature(ary.dtype)\n\n @bound_function(\"array.itemset\")\n def resolve_itemset(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if len(args) == 1:\n return signature(types.none, ary.dtype)\n\n @bound_function(\"array.nonzero\")\n def resolve_nonzero(self, ary, args, kws):\n assert not args\n assert not kws\n # 0-dim arrays return one result array\n ndim = max(ary.ndim, 1)\n retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)\n return signature(retty)\n\n @bound_function(\"array.reshape\")\n def resolve_reshape(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"reshape() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if ary.layout not in 'CF':\n # only work for contiguous array\n raise TypeError(\"reshape() supports contiguous array only\")\n\n if len(args) == 1:\n # single arg\n shape, = args\n\n if sentry_shape_scalar(shape):\n ndim = 1\n else:\n shape = normalize_shape(shape)\n if shape is None:\n return\n ndim = shape.count\n retty = ary.copy(ndim=ndim)\n return signature(retty, shape)\n\n elif len(args) == 0:\n # no arg\n raise TypeError(\"reshape() take at least one arg\")\n\n else:\n # vararg case\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"reshape({0}) is not supported\".format(\n ', '.join(args)))\n\n retty = ary.copy(ndim=len(args))\n return signature(retty, *args)\n\n @bound_function(\"array.sort\")\n def resolve_sort(self, ary, args, kws):\n assert not args\n assert not kws\n if ary.ndim == 1:\n return signature(types.none)\n\n @bound_function(\"array.argsort\")\n def resolve_argsort(self, ary, args, kws):\n assert not args\n kwargs = dict(kws)\n kind = kwargs.pop('kind', types.Const('quicksort'))\n if kwargs:\n msg = \"Unsupported keywords: {!r}\"\n raise TypingError(msg.format([k for k in kwargs.keys()]))\n if ary.ndim == 1:\n def argsort_stub(kind='quicksort'):\n pass\n pysig = utils.pysignature(argsort_stub)\n sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)\n return sig\n\n @bound_function(\"array.view\")\n def resolve_view(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n retty = ary.copy(dtype=dtype)\n return signature(retty, *args)\n\n @bound_function(\"array.astype\")\n def resolve_astype(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n if not self.context.can_convert(ary.dtype, dtype):\n raise TypeError(\"astype(%s) not supported on %s: \"\n \"cannot convert from %s to %s\"\n % (dtype, ary, ary.dtype, dtype))\n layout = ary.layout if ary.layout in 'CF' else 'C'\n retty = ary.copy(dtype=dtype, layout=layout)\n return signature(retty, *args)\n\n @bound_function(\"array.ravel\")\n def resolve_ravel(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.flatten\")\n def resolve_flatten(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.take\")\n def resolve_take(self, ary, args, kws):\n assert not kws\n argty, = args\n if isinstance(argty, types.Integer):\n sig = signature(ary.dtype, *args)\n elif isinstance(argty, types.Array):\n sig = signature(argty.copy(layout='C', dtype=ary.dtype), *args)\n elif isinstance(argty, types.List): # 1d lists only\n sig = signature(types.Array(ary.dtype, 1, 'C'), *args)\n elif isinstance(argty, types.BaseTuple):\n sig = signature(types.Array(ary.dtype, np.ndim(argty), 'C'), *args)\n else:\n raise TypeError(\"take(%s) not supported for %s\" % argty)\n return sig\n\n def generic_resolve(self, ary, attr):\n # Resolution of other attributes, for record arrays\n if isinstance(ary.dtype, types.Record):\n if attr in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(attr), layout='A')\n\n\n@infer_getattr\nclass DTypeAttr(AttributeTemplate):\n key = types.DType\n\n def resolve_type(self, ary):\n # Wrap the numeric type in NumberClass\n return types.NumberClass(ary.dtype)\n\n def resolve_kind(self, ary):\n if isinstance(ary.key, types.scalars.Float):\n val = 'f'\n elif isinstance(ary.key, types.scalars.Integer):\n val = 'i'\n else:\n return None # other types not supported yet\n return types.Const(val)\n\n@infer\nclass StaticGetItemArray(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n ary, idx = args\n if (isinstance(ary, types.Array) and isinstance(idx, str) and\n isinstance(ary.dtype, types.Record)):\n if idx in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(idx), layout='A')\n\n\n@infer_getattr\nclass RecordAttribute(AttributeTemplate):\n key = types.Record\n\n def generic_resolve(self, record, attr):\n ret = record.typeof(attr)\n assert ret\n return ret\n\n@infer\nclass StaticGetItemRecord(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for records\n record, idx = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n ret = record.typeof(idx)\n assert ret\n return ret\n\n@infer\nclass StaticSetItemRecord(AbstractTemplate):\n key = \"static_setitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n record, idx, value = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n expectedty = record.typeof(idx)\n if self.context.can_convert(value, expectedty) is not None:\n return signature(types.void, record, types.Const(idx), value)\n\n\n@infer_getattr\nclass ArrayCTypesAttribute(AttributeTemplate):\n key = types.ArrayCTypes\n\n def resolve_data(self, ctinfo):\n return types.uintp\n\n\n@infer_getattr\nclass ArrayFlagsAttribute(AttributeTemplate):\n key = types.ArrayFlags\n\n def resolve_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_c_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_f_contiguous(self, ctflags):\n return types.boolean\n\n\n@infer_getattr\nclass NestedArrayAttribute(ArrayAttribute):\n key = types.NestedArray\n\n\ndef _expand_integer(ty):\n \"\"\"\n If *ty* is an integer, expand it to a machine int (like Numpy).\n \"\"\"\n if isinstance(ty, types.Integer):\n if ty.signed:\n return max(types.intp, ty)\n else:\n return max(types.uintp, ty)\n elif isinstance(ty, types.Boolean):\n return types.intp\n else:\n return ty\n\ndef generic_homog(self, args, kws):\n assert not args\n assert not kws\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_expand(self, args, kws):\n assert not args\n assert not kws\n return signature(_expand_integer(self.this.dtype), recvr=self.this)\n\ndef sum_expand(self, args, kws):\n \"\"\"\n sum can be called with or without an axis parameter.\n \"\"\"\n pysig = None\n if kws:\n def sum_stub(axis):\n pass\n pysig = utils.pysignature(sum_stub)\n # rewrite args\n args = list(args) + [kws['axis']]\n kws = None\n args_len = len(args)\n assert args_len <= 1\n if args_len == 0:\n # No axis parameter so the return type of the summation is a scalar\n # of the type of the array.\n out = signature(_expand_integer(self.this.dtype), *args,\n recvr=self.this)\n else:\n # There is an axis paramter so the return type of this summation is\n # an array of dimension one less than the input array.\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=self.this.ndim-1, layout='C')\n out = signature(return_type, *args, recvr=self.this)\n return out.replace(pysig=pysig)\n\ndef generic_expand_cumulative(self, args, kws):\n assert not args\n assert not kws\n assert isinstance(self.this, types.Array)\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=1, layout='C')\n return signature(return_type, recvr=self.this)\n\ndef generic_hetero_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_hetero_always_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n if isinstance(self.this.dtype, types.Complex):\n return signature(self.this.dtype.underlying_float, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_index(self, args, kws):\n assert not args\n assert not kws\n return signature(types.intp, recvr=self.this)\n\ndef install_array_method(name, generic, support_literals=False):\n my_attr = {\"key\": \"array.\" + name, \"generic\": generic}\n temp_class = type(\"Array_\" + name, (AbstractTemplate,), my_attr)\n if support_literals:\n temp_class.support_literals = support_literals\n def array_attribute_attachment(self, ary):\n return types.BoundFunction(temp_class, ary)\n\n setattr(ArrayAttribute, \"resolve_\" + name, array_attribute_attachment)\n\n# Functions that return the same type as the array\nfor fname in [\"min\", \"max\"]:\n install_array_method(fname, generic_homog)\n\n# Functions that return a machine-width type, to avoid overflows\ninstall_array_method(\"prod\", generic_expand)\ninstall_array_method(\"sum\", sum_expand, support_literals=True)\n\n# Functions that return a machine-width type, to avoid overflows\nfor fname in [\"cumsum\", \"cumprod\"]:\n install_array_method(fname, generic_expand_cumulative)\n\n# Functions that require integer arrays get promoted to float64 return\nfor fName in [\"mean\"]:\n install_array_method(fName, generic_hetero_real)\n\n# var and std by definition return in real space and int arrays\n# get promoted to float64 return\nfor fName in [\"var\", \"std\"]:\n install_array_method(fName, generic_hetero_always_real)\n\n\n# Functions that return an index (intp)\ninstall_array_method(\"argmin\", generic_index)\ninstall_array_method(\"argmax\", generic_index)\n", "path": "numba/typing/arraydecl.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nfrom numba import types, utils\nfrom numba.typing.templates import (AttributeTemplate, AbstractTemplate,\n infer, infer_getattr, signature,\n bound_function)\n# import time side effect: array operations requires typing support of sequence\n# defined in collections: e.g. array.shape[i]\nfrom numba.typing import collections\nfrom numba.errors import TypingError\n\nIndexing = namedtuple(\"Indexing\", (\"index\", \"result\", \"advanced\"))\n\n\ndef get_array_index_type(ary, idx):\n \"\"\"\n Returns None or a tuple-3 for the types of the input array, index, and\n resulting type of ``array[index]``.\n\n Note: This is shared logic for ndarray getitem and setitem.\n \"\"\"\n if not isinstance(ary, types.Buffer):\n return\n\n ndim = ary.ndim\n\n left_indices = []\n right_indices = []\n ellipsis_met = False\n advanced = False\n has_integer = False\n\n if not isinstance(idx, types.BaseTuple):\n idx = [idx]\n\n # Walk indices\n for ty in idx:\n if ty is types.ellipsis:\n if ellipsis_met:\n raise TypeError(\"only one ellipsis allowed in array index \"\n \"(got %s)\" % (idx,))\n ellipsis_met = True\n elif isinstance(ty, types.SliceType):\n pass\n elif isinstance(ty, types.Integer):\n # Normalize integer index\n ty = types.intp if ty.signed else types.uintp\n # Integer indexing removes the given dimension\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array) and ty.ndim == 0\n and isinstance(ty.dtype, types.Integer)):\n # 0-d array used as integer index\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array)\n and ty.ndim == 1\n and isinstance(ty.dtype, (types.Integer, types.Boolean))):\n if advanced or has_integer:\n # We don't support the complicated combination of\n # advanced indices (and integers are considered part\n # of them by Numpy).\n raise NotImplementedError(\"only one advanced index supported\")\n advanced = True\n else:\n raise TypeError(\"unsupported array index type %s in %s\"\n % (ty, idx))\n (right_indices if ellipsis_met else left_indices).append(ty)\n\n # Only Numpy arrays support advanced indexing\n if advanced and not isinstance(ary, types.Array):\n return\n\n # Check indices and result dimensionality\n all_indices = left_indices + right_indices\n if ellipsis_met:\n assert right_indices[0] is types.ellipsis\n del right_indices[0]\n\n n_indices = len(all_indices) - ellipsis_met\n if n_indices > ary.ndim:\n raise TypeError(\"cannot index %s with %d indices: %s\"\n % (ary, n_indices, idx))\n if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:\n # Full integer indexing => scalar result\n # (note if ellipsis is present, a 0-d view is returned instead)\n res = ary.dtype\n\n elif advanced:\n # Result is a copy\n res = ary.copy(ndim=ndim, layout='C', readonly=False)\n\n else:\n # Result is a view\n if ary.slice_is_copy:\n # Avoid view semantics when the original type creates a copy\n # when slicing.\n return\n\n # Infer layout\n layout = ary.layout\n\n def keeps_contiguity(ty, is_innermost):\n # A slice can only keep an array contiguous if it is the\n # innermost index and it is not strided\n return (ty is types.ellipsis or isinstance(ty, types.Integer)\n or (is_innermost and isinstance(ty, types.SliceType)\n and not ty.has_step))\n\n def check_contiguity(outer_indices):\n \"\"\"\n Whether indexing with the given indices (from outer to inner in\n physical layout order) can keep an array contiguous.\n \"\"\"\n for ty in outer_indices[:-1]:\n if not keeps_contiguity(ty, False):\n return False\n if outer_indices and not keeps_contiguity(outer_indices[-1], True):\n return False\n return True\n\n if layout == 'C':\n # Integer indexing on the left keeps the array C-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n left_indices = left_indices + right_indices\n right_indices = []\n if right_indices:\n layout = 'A'\n elif not check_contiguity(left_indices):\n layout = 'A'\n elif layout == 'F':\n # Integer indexing on the right keeps the array F-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n right_indices = left_indices + right_indices\n left_indices = []\n if left_indices:\n layout = 'A'\n elif not check_contiguity(right_indices[::-1]):\n layout = 'A'\n\n if ndim == 0:\n # Implicitly convert to a scalar if the output ndim==0\n res = ary.dtype\n else:\n res = ary.copy(ndim=ndim, layout=layout)\n\n # Re-wrap indices\n if isinstance(idx, types.BaseTuple):\n idx = types.BaseTuple.from_types(all_indices)\n else:\n idx, = all_indices\n\n return Indexing(idx, res, advanced)\n\n\n@infer\nclass GetItemBuffer(AbstractTemplate):\n key = \"getitem\"\n\n def generic(self, args, kws):\n assert not kws\n [ary, idx] = args\n out = get_array_index_type(ary, idx)\n if out is not None:\n return signature(out.result, ary, out.index)\n\n@infer\nclass SetItemBuffer(AbstractTemplate):\n key = \"setitem\"\n\n def generic(self, args, kws):\n assert not kws\n ary, idx, val = args\n if not isinstance(ary, types.Buffer):\n return\n if not ary.mutable:\n raise TypeError(\"Cannot modify value of type %s\" %(ary,))\n out = get_array_index_type(ary, idx)\n if out is None:\n return\n\n idx = out.index\n res = out.result\n if isinstance(res, types.Array):\n # Indexing produces an array\n if isinstance(val, types.Array):\n if not self.context.can_convert(val.dtype, res.dtype):\n # DType conversion not possible\n return\n else:\n res = val\n elif isinstance(val, types.Sequence):\n if (res.ndim == 1 and\n self.context.can_convert(val.dtype, res.dtype)):\n # Allow assignement of sequence to 1d array\n res = val\n else:\n # NOTE: sequence-to-array broadcasting is unsupported\n return\n else:\n # Allow scalar broadcasting\n if self.context.can_convert(val, res.dtype):\n res = res.dtype\n else:\n # Incompatible scalar type\n return\n elif not isinstance(val, types.Array):\n # Single item assignment\n if not self.context.can_convert(val, res):\n # if the array dtype is not yet defined\n if not res.is_precise():\n # set the array type to use the dtype of value (RHS)\n newary = ary.copy(dtype=val)\n return signature(types.none, newary, idx, res)\n else:\n return\n res = val\n else:\n return\n return signature(types.none, ary, idx, res)\n\n\ndef normalize_shape(shape):\n if isinstance(shape, types.UniTuple):\n if isinstance(shape.dtype, types.Integer):\n dimtype = types.intp if shape.dtype.signed else types.uintp\n return types.UniTuple(dimtype, len(shape))\n\n elif isinstance(shape, types.Tuple) and shape.count == 0:\n # Force (0 x intp) for consistency with other shapes\n return types.UniTuple(types.intp, 0)\n\n\n@infer_getattr\nclass ArrayAttribute(AttributeTemplate):\n key = types.Array\n\n def resolve_dtype(self, ary):\n return types.DType(ary.dtype)\n\n def resolve_itemsize(self, ary):\n return types.intp\n\n def resolve_shape(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_strides(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_ndim(self, ary):\n return types.intp\n\n def resolve_size(self, ary):\n return types.intp\n\n def resolve_flat(self, ary):\n return types.NumpyFlatType(ary)\n\n def resolve_ctypes(self, ary):\n return types.ArrayCTypes(ary)\n\n def resolve_flags(self, ary):\n return types.ArrayFlags(ary)\n\n def resolve_T(self, ary):\n if ary.ndim <= 1:\n retty = ary\n else:\n layout = {\"C\": \"F\", \"F\": \"C\"}.get(ary.layout, \"A\")\n retty = ary.copy(layout=layout)\n return retty\n\n def resolve_real(self, ary):\n return self._resolve_real_imag(ary, attr='real')\n\n def resolve_imag(self, ary):\n return self._resolve_real_imag(ary, attr='imag')\n\n def _resolve_real_imag(self, ary, attr):\n if ary.dtype in types.complex_domain:\n return ary.copy(dtype=ary.dtype.underlying_float, layout='A')\n elif ary.dtype in types.number_domain:\n res = ary.copy(dtype=ary.dtype)\n if attr == 'imag':\n res = res.copy(readonly=True)\n return res\n else:\n msg = \"cannot access .{} of array of {}\"\n raise TypingError(msg.format(attr, ary.dtype))\n\n @bound_function(\"array.transpose\")\n def resolve_transpose(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"transpose() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if len(args) == 0:\n return signature(self.resolve_T(ary))\n\n if len(args) == 1:\n shape, = args\n\n if sentry_shape_scalar(shape):\n assert ary.ndim == 1\n return signature(ary, *args)\n\n shape = normalize_shape(shape)\n if shape is None:\n return\n\n assert ary.ndim == shape.count\n return signature(self.resolve_T(ary), shape)\n\n else:\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"transpose({0}) is not supported\".format(\n ', '.join(args)))\n assert ary.ndim == len(args)\n return signature(self.resolve_T(ary), *args)\n\n @bound_function(\"array.copy\")\n def resolve_copy(self, ary, args, kws):\n assert not args\n assert not kws\n retty = ary.copy(layout=\"C\", readonly=False)\n return signature(retty)\n\n @bound_function(\"array.item\")\n def resolve_item(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if not args:\n return signature(ary.dtype)\n\n @bound_function(\"array.itemset\")\n def resolve_itemset(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if len(args) == 1:\n return signature(types.none, ary.dtype)\n\n @bound_function(\"array.nonzero\")\n def resolve_nonzero(self, ary, args, kws):\n assert not args\n assert not kws\n # 0-dim arrays return one result array\n ndim = max(ary.ndim, 1)\n retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)\n return signature(retty)\n\n @bound_function(\"array.reshape\")\n def resolve_reshape(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"reshape() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if ary.layout not in 'CF':\n # only work for contiguous array\n raise TypeError(\"reshape() supports contiguous array only\")\n\n if len(args) == 1:\n # single arg\n shape, = args\n\n if sentry_shape_scalar(shape):\n ndim = 1\n else:\n shape = normalize_shape(shape)\n if shape is None:\n return\n ndim = shape.count\n retty = ary.copy(ndim=ndim)\n return signature(retty, shape)\n\n elif len(args) == 0:\n # no arg\n raise TypeError(\"reshape() take at least one arg\")\n\n else:\n # vararg case\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"reshape({0}) is not supported\".format(\n ', '.join(args)))\n\n retty = ary.copy(ndim=len(args))\n return signature(retty, *args)\n\n @bound_function(\"array.sort\")\n def resolve_sort(self, ary, args, kws):\n assert not args\n assert not kws\n if ary.ndim == 1:\n return signature(types.none)\n\n @bound_function(\"array.argsort\")\n def resolve_argsort(self, ary, args, kws):\n assert not args\n kwargs = dict(kws)\n kind = kwargs.pop('kind', types.Const('quicksort'))\n if kwargs:\n msg = \"Unsupported keywords: {!r}\"\n raise TypingError(msg.format([k for k in kwargs.keys()]))\n if ary.ndim == 1:\n def argsort_stub(kind='quicksort'):\n pass\n pysig = utils.pysignature(argsort_stub)\n sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)\n return sig\n\n @bound_function(\"array.view\")\n def resolve_view(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n retty = ary.copy(dtype=dtype)\n return signature(retty, *args)\n\n @bound_function(\"array.astype\")\n def resolve_astype(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n if not self.context.can_convert(ary.dtype, dtype):\n raise TypeError(\"astype(%s) not supported on %s: \"\n \"cannot convert from %s to %s\"\n % (dtype, ary, ary.dtype, dtype))\n layout = ary.layout if ary.layout in 'CF' else 'C'\n retty = ary.copy(dtype=dtype, layout=layout)\n return signature(retty, *args)\n\n @bound_function(\"array.ravel\")\n def resolve_ravel(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.flatten\")\n def resolve_flatten(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.take\")\n def resolve_take(self, ary, args, kws):\n assert not kws\n argty, = args\n if isinstance(argty, types.Integer):\n sig = signature(ary.dtype, *args)\n elif isinstance(argty, types.Array):\n sig = signature(argty.copy(layout='C', dtype=ary.dtype), *args)\n elif isinstance(argty, types.List): # 1d lists only\n sig = signature(types.Array(ary.dtype, 1, 'C'), *args)\n elif isinstance(argty, types.BaseTuple):\n sig = signature(types.Array(ary.dtype, np.ndim(argty), 'C'), *args)\n else:\n raise TypeError(\"take(%s) not supported for %s\" % argty)\n return sig\n\n def generic_resolve(self, ary, attr):\n # Resolution of other attributes, for record arrays\n if isinstance(ary.dtype, types.Record):\n if attr in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(attr), layout='A')\n\n\n@infer_getattr\nclass DTypeAttr(AttributeTemplate):\n key = types.DType\n\n def resolve_type(self, ary):\n # Wrap the numeric type in NumberClass\n return types.NumberClass(ary.dtype)\n\n def resolve_kind(self, ary):\n if isinstance(ary.key, types.scalars.Float):\n val = 'f'\n elif isinstance(ary.key, types.scalars.Integer):\n val = 'i'\n else:\n return None # other types not supported yet\n return types.Const(val)\n\n@infer\nclass StaticGetItemArray(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n ary, idx = args\n if (isinstance(ary, types.Array) and isinstance(idx, str) and\n isinstance(ary.dtype, types.Record)):\n if idx in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(idx), layout='A')\n\n\n@infer_getattr\nclass RecordAttribute(AttributeTemplate):\n key = types.Record\n\n def generic_resolve(self, record, attr):\n ret = record.typeof(attr)\n assert ret\n return ret\n\n@infer\nclass StaticGetItemRecord(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for records\n record, idx = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n ret = record.typeof(idx)\n assert ret\n return ret\n\n@infer\nclass StaticSetItemRecord(AbstractTemplate):\n key = \"static_setitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n record, idx, value = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n expectedty = record.typeof(idx)\n if self.context.can_convert(value, expectedty) is not None:\n return signature(types.void, record, types.Const(idx), value)\n\n\n@infer_getattr\nclass ArrayCTypesAttribute(AttributeTemplate):\n key = types.ArrayCTypes\n\n def resolve_data(self, ctinfo):\n return types.uintp\n\n\n@infer_getattr\nclass ArrayFlagsAttribute(AttributeTemplate):\n key = types.ArrayFlags\n\n def resolve_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_c_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_f_contiguous(self, ctflags):\n return types.boolean\n\n\n@infer_getattr\nclass NestedArrayAttribute(ArrayAttribute):\n key = types.NestedArray\n\n\ndef _expand_integer(ty):\n \"\"\"\n If *ty* is an integer, expand it to a machine int (like Numpy).\n \"\"\"\n if isinstance(ty, types.Integer):\n if ty.signed:\n return max(types.intp, ty)\n else:\n return max(types.uintp, ty)\n elif isinstance(ty, types.Boolean):\n return types.intp\n else:\n return ty\n\ndef generic_homog(self, args, kws):\n assert not args\n assert not kws\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_expand(self, args, kws):\n assert not args\n assert not kws\n return signature(_expand_integer(self.this.dtype), recvr=self.this)\n\ndef sum_expand(self, args, kws):\n \"\"\"\n sum can be called with or without an axis parameter.\n \"\"\"\n pysig = None\n if kws:\n def sum_stub(axis):\n pass\n pysig = utils.pysignature(sum_stub)\n # rewrite args\n args = list(args) + [kws['axis']]\n kws = None\n args_len = len(args)\n assert args_len <= 1\n if args_len == 0:\n # No axis parameter so the return type of the summation is a scalar\n # of the type of the array.\n out = signature(_expand_integer(self.this.dtype), *args,\n recvr=self.this)\n else:\n # There is an axis paramter so the return type of this summation is\n # an array of dimension one less than the input array.\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=self.this.ndim-1, layout='C')\n out = signature(return_type, *args, recvr=self.this)\n return out.replace(pysig=pysig)\n\ndef generic_expand_cumulative(self, args, kws):\n assert not args\n assert not kws\n assert isinstance(self.this, types.Array)\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=1, layout='C')\n return signature(return_type, recvr=self.this)\n\ndef generic_hetero_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_hetero_always_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n if isinstance(self.this.dtype, types.Complex):\n return signature(self.this.dtype.underlying_float, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_index(self, args, kws):\n assert not args\n assert not kws\n return signature(types.intp, recvr=self.this)\n\ndef install_array_method(name, generic, support_literals=False):\n my_attr = {\"key\": \"array.\" + name, \"generic\": generic}\n temp_class = type(\"Array_\" + name, (AbstractTemplate,), my_attr)\n if support_literals:\n temp_class.support_literals = support_literals\n def array_attribute_attachment(self, ary):\n return types.BoundFunction(temp_class, ary)\n\n setattr(ArrayAttribute, \"resolve_\" + name, array_attribute_attachment)\n\n# Functions that return the same type as the array\nfor fname in [\"min\", \"max\"]:\n install_array_method(fname, generic_homog)\n\n# Functions that return a machine-width type, to avoid overflows\ninstall_array_method(\"prod\", generic_expand)\ninstall_array_method(\"sum\", sum_expand, support_literals=True)\n\n# Functions that return a machine-width type, to avoid overflows\nfor fname in [\"cumsum\", \"cumprod\"]:\n install_array_method(fname, generic_expand_cumulative)\n\n# Functions that require integer arrays get promoted to float64 return\nfor fName in [\"mean\"]:\n install_array_method(fName, generic_hetero_real)\n\n# var and std by definition return in real space and int arrays\n# get promoted to float64 return\nfor fName in [\"var\", \"std\"]:\n install_array_method(fName, generic_hetero_always_real)\n\n\n# Functions that return an index (intp)\ninstall_array_method(\"argmin\", generic_index)\ninstall_array_method(\"argmax\", generic_index)\n", "path": "numba/typing/arraydecl.py"}]} |
gh_patches_debug_1417 | rasdani/github-patches | git_diff | Theano__Theano-4343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bilinear_Upsampling requires to specify batch_size and num_input_channels
Referring to this line:
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py#L570
this piece of code doesn't work with `batch_size` and/or `num_input_channels` set to `None`
it requires to specify the dimensions as scalar values
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `theano/tensor/nnet/abstract_conv.py`
Content:
```
1 """
2 Abstract conv interface
3 """
4 from __future__ import absolute_import, print_function, division
5
6 import logging
7 from six import reraise, integer_types
8 import sys
9
10 import theano
11
12 from theano.tensor import as_tensor_variable, patternbroadcast
13 from theano.tensor import get_scalar_constant_value, NotScalarConstantError
14 from theano.gof import Apply, Op
15
16 from six.moves import xrange
17
18 import warnings
19 import numpy
20 import numpy as np
21
22 try:
23 from scipy.signal.signaltools import _valfrommode, _bvalfromboundary
24 from scipy.signal.sigtools import _convolve2d
25 imported_scipy_signal = True
26 except ImportError:
27 imported_scipy_signal = False
28
29
30 __docformat__ = "restructuredtext en"
31 _logger = logging.getLogger("theano.tensor.nnet.abstract_conv")
32
33
34 def get_conv_output_shape(image_shape, kernel_shape,
35 border_mode, subsample):
36 """
37 This function compute the output shape of convolution operation.
38
39 Parameters
40 ----------
41 image_shape: tuple of int (symbolic or numeric) corresponding to the input
42 image shape. Its four (or five) element must correspond respectively
43 to: batch size, number of input channels, height and width (and
44 possibly depth) of the image. None where undefined.
45 kernel_shape: tuple of int (symbolic or numeric) corresponding to the
46 kernel shape. Its four (or five) elements must correspond respectively
47 to: number of output channels, number of input channels, height and
48 width (and possibly depth) of the kernel. None where undefined.
49 border_mode: string, int (symbolic or numeric) or tuple of int (symbolic
50 or numeric). If it is a string, it must be 'valid', 'half' or 'full'.
51 If it is a tuple, its two (or three) elements respectively correspond
52 to the padding on height and width (and possibly depth) axis.
53 subsample: tuple of int (symbolic or numeric). Its or three elements
54 espectively correspond to the subsampling on height and width (and
55 possibly depth) axis.
56
57 Returns
58 -------
59 output_shape: tuple of int corresponding to the output image shape. Its
60 four element must correspond respectively to: batch size, number of
61 output channels, height and width of the image. None where undefined.
62
63 """
64 bsize, imshp = image_shape[0], image_shape[2:]
65 nkern, kshp = kernel_shape[0], kernel_shape[2:]
66 if isinstance(border_mode, tuple):
67 out_shp = tuple(get_conv_shape_1axis(
68 imshp[i], kshp[i], border_mode[i], subsample[i])
69 for i in range(len(subsample)))
70 else:
71 out_shp = tuple(get_conv_shape_1axis(
72 imshp[i], kshp[i], border_mode, subsample[i])
73 for i in range(len(subsample)))
74 return (bsize, nkern) + out_shp
75
76
77 def get_conv_shape_1axis(image_shape, kernel_shape,
78 border_mode, subsample):
79 """
80 This function compute the output shape of convolution operation.
81
82 Parameters
83 ----------
84 image_shape: int or None. Corresponds to the input image shape on a
85 given axis. None if undefined.
86 kernel_shape: int or None. Corresponds to the kernel shape on a given
87 axis. None if undefined.
88 border_mode: string or int. If it is a string, it must be
89 'valid', 'half' or 'full'. If it is an integer, it must correspond to
90 the padding on the considered axis.
91 subsample: int. It must correspond to the subsampling on the
92 considered axis.
93
94 Returns
95 -------
96 out_shp: int corresponding to the output image shape on the
97 considered axis. None if undefined.
98
99 """
100 if None in [image_shape, kernel_shape, border_mode, subsample]:
101 return None
102 if border_mode == "half":
103 pad = kernel_shape // 2
104 elif border_mode == "full":
105 pad = kernel_shape - 1
106 elif border_mode == "valid":
107 pad = 0
108 else:
109 pad = border_mode
110 if pad < 0:
111 raise ValueError("border_mode must be >= 0")
112 out_shp = (image_shape + 2 * pad - kernel_shape) // subsample + 1
113
114 return out_shp
115
116
117 def conv2d(input,
118 filters,
119 input_shape=None,
120 filter_shape=None,
121 border_mode='valid',
122 subsample=(1, 1),
123 filter_flip=True):
124 """This function will build the symbolic graph for convolving a mini-batch of a
125 stack of 2D inputs with a set of 2D filters. The implementation is modelled
126 after Convolutional Neural Networks (CNN).
127
128 Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation.
129 """
130
131 input = as_tensor_variable(input)
132 filters = as_tensor_variable(filters)
133 conv_op = AbstractConv2d(imshp=input_shape,
134 kshp=filter_shape,
135 border_mode=border_mode,
136 subsample=subsample,
137 filter_flip=filter_flip)
138 return conv_op(input, filters)
139
140
141 def conv2d_grad_wrt_inputs(output_grad,
142 filters,
143 input_shape,
144 filter_shape=None,
145 border_mode='valid',
146 subsample=(1, 1),
147 filter_flip=True):
148 """Compute conv output gradient w.r.t its inputs
149
150 This function builds the symbolic graph for getting the
151 gradient of the output of a convolution (namely output_grad)
152 w.r.t the input of the convolution, given a set of 2D filters
153 used by the convolution, such that the output_grad is upsampled
154 to the input_shape.
155
156 Parameters
157 ----------
158 output_grad : symbolic 4D tensor
159 mini-batch of feature map stacks, of shape (batch size, input
160 channels, input rows, input columns). This is the tensor that
161 will be upsampled or the output gradient of the convolution
162 whose gradient will be taken with respect to the input of the
163 convolution.
164 filters : symbolic 4D tensor
165 set of filters used in CNN layer of shape (output channels,
166 input channels, filter rows, filter columns). See the
167 optional parameter ``filter_shape``.
168 input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2
169 The shape of the input (upsampled) parameter.
170 A tuple/list of len 4, with the first two dimensions
171 being None or int or Constant and the last two dimensions being
172 Tensor or int or Constant.
173 Not Optional, since given the output_grad shape
174 and the subsample values, multiple input_shape may be
175 plausible.
176 filter_shape : None or [None/int/Constant] * 4
177 The shape of the filters parameter. None or a tuple/list of len 4.
178 Optional, possibly used to choose an optimal implementation.
179 You can give ``None`` for any element of the list to specify that
180 this element is not known at compile time.
181 border_mode : str, int or tuple of two int
182 Either of the following:
183
184 ``'valid'``
185 apply filter wherever it completely overlaps with the
186 input. Generates output of shape: input shape - filter
187 shape + 1
188
189 ``'full'``
190 apply filter wherever it partly overlaps with the input.
191 Generates output of shape: input shape + filter shape - 1
192
193 ``'half'``
194 pad input with a symmetric border of ``filter rows // 2``
195 rows and ``filter columns // 2`` columns, then perform a
196 valid convolution. For filters with an odd number of rows
197 and columns, this leads to the output shape being equal to
198 the input shape. It is known as 'same' elsewhere.
199
200 ``int``
201 pad input with a symmetric border of zeros of the given
202 width, then perform a valid convolution.
203
204 ``(int1, int2)``
205 pad input with a symmetric border of ``int1`` rows and
206 ``int2`` columns, then perform a valid convolution.
207
208 subsample : tuple of len 2
209 The subsampling used in the forward pass. Also called strides
210 elsewhere.
211 filter_flip : bool
212 If ``True``, will flip the filter rows and columns before
213 sliding them over the input. This operation is normally
214 referred to as a convolution, and this is the default. If
215 ``False``, the filters are not flipped and the operation is
216 referred to as a cross-correlation.
217
218 Returns
219 -------
220 symbolic 4D tensor
221 set of feature maps generated by convolutional layer. Tensor
222 is of shape (batch size, output channels, output rows, output
223 columns)
224
225 Notes
226 -----
227
228 :note: If CuDNN is available, it will be used on the
229 GPU. Otherwise, it is the *CorrMM* convolution that will be used
230 "caffe style convolution".
231
232 :note: This is only supported in Theano 0.8 or the development
233 version until it is released.
234
235 """
236
237 filters = as_tensor_variable(filters)
238 output_grad = as_tensor_variable(output_grad)
239
240 # checking the type of input_shape
241 for dim in [0, 1]:
242 assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,
243 integer_types, type(None)))
244 for dim in [2, 3]:
245 assert isinstance(input_shape[dim], (theano.tensor.TensorVariable,
246 theano.tensor.TensorConstant,
247 integer_types))
248
249 # checking the type of filter_shape
250 if filter_shape is not None:
251 for dim in [0, 1, 2, 3]:
252 assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,
253 integer_types, type(None)))
254
255 # setting the last two dimensions of input_shape to None, if
256 # the type of these dimensions is TensorVariable.
257 numerical_input_shape = list(input_shape)
258 for dim in [2, 3]:
259 if isinstance(input_shape[dim], theano.tensor.TensorVariable):
260 numerical_input_shape[dim] = None
261
262 grad_input_op = AbstractConv2d_gradInputs(imshp=numerical_input_shape,
263 kshp=filter_shape,
264 border_mode=border_mode,
265 subsample=subsample,
266 filter_flip=filter_flip)
267
268 return grad_input_op(filters, output_grad, input_shape[-2:])
269
270
271 def conv2d_grad_wrt_weights(input,
272 output_grad,
273 filter_shape,
274 input_shape=None,
275 border_mode='valid',
276 subsample=(1, 1),
277 filter_flip=True):
278 """Compute conv output gradient w.r.t its weights
279
280 This function will build the symbolic graph for getting the
281 gradient of the output of a convolution (output_grad) w.r.t its wights.
282
283 Parameters
284 ----------
285 input : symbolic 4D tensor
286 mini-batch of feature map stacks, of shape (batch size, input
287 channels, input rows, input columns). This is the input of
288 the convolution in the forward pass.
289 output_grad : symbolic 4D tensor
290 mini-batch of feature map stacks, of shape (batch size, input
291 channels, input rows, input columns). This is the gradient of
292 the output of convolution.
293 filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2
294 The shape of the filter parameter. A tuple/list of len 4, with the
295 first two dimensions being None or int or Constant and the last two
296 dimensions being Tensor or int or Constant.
297 Not Optional, since given the output_grad shape and
298 the input_shape, multiple filter_shape may be plausible.
299 input_shape : None or [None/int/Constant] * 4
300 The shape of the input parameter. None or a tuple/list of len 4.
301 Optional, possibly used to choose an optimal implementation.
302 You can give ``None`` for any element of the list to specify
303 that this element is not known at compile time.
304 border_mode : str, int or tuple of two ints
305 Either of the following:
306
307 ``'valid'``
308 apply filter wherever it completely overlaps with the
309 input. Generates output of shape: input shape - filter
310 shape + 1
311
312 ``'full'``
313 apply filter wherever it partly overlaps with the input.
314 Generates output of shape: input shape + filter shape - 1
315
316 ``'half'``
317 pad input with a symmetric border of ``filter rows // 2``
318 rows and ``filter columns // 2`` columns, then perform a
319 valid convolution. For filters with an odd number of rows
320 and columns, this leads to the output shape being equal to
321 the input shape. It is known as 'same' elsewhere.
322
323 ``int``
324 pad input with a symmetric border of zeros of the given
325 width, then perform a valid convolution.
326
327 ``(int1, int2)``
328 pad input with a symmetric border of ``int1`` rows and
329 ``int2`` columns, then perform a valid convolution.
330
331 subsample : tuple of len 2
332 The subsampling used in the forward pass of the convolutional
333 operation. Also called strides elsewhere.
334 filter_flip : bool
335 If ``True``, will flip the filter rows and columns before
336 sliding them over the input. This operation is normally
337 referred to as a convolution, and this is the default. If
338 ``False``, the filters are not flipped and the operation is
339 referred to as a cross-correlation.
340
341 Returns
342 -------
343 symbolic 4D tensor
344 set of feature maps generated by convolutional layer. Tensor
345 is of shape (batch size, output channels, output rows, output
346 columns)
347
348 Notes
349 -----
350
351 :note: If CuDNN is available, it will be used on the
352 GPU. Otherwise, it is the *CorrMM* convolution that will be used
353 "caffe style convolution".
354
355 :note: This is only supported in Theano 0.8 or the development
356 version until it is released.
357
358 """
359
360 input = as_tensor_variable(input)
361 output_grad = as_tensor_variable(output_grad)
362
363 # checking the type of filter_shape
364 for dim in [0, 1]:
365 assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,
366 integer_types, type(None)))
367 for dim in [2, 3]:
368 assert isinstance(filter_shape[dim], (theano.tensor.TensorVariable,
369 theano.tensor.TensorConstant,
370 integer_types))
371
372 # checking the type of input_shape
373 if input_shape is not None:
374 for dim in [0, 1, 2, 3]:
375 assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,
376 integer_types, type(None)))
377
378 # setting the last two dimensions of filter_shape to None, if
379 # the type of these dimensions is TensorVariable.
380 numerical_filter_shape = list(filter_shape)
381 for dim in [2, 3]:
382 if isinstance(filter_shape[dim], theano.tensor.TensorVariable):
383 numerical_filter_shape[dim] = None
384
385 gradWeight_op = AbstractConv2d_gradWeights(imshp=input_shape,
386 kshp=numerical_filter_shape,
387 border_mode=border_mode,
388 subsample=subsample,
389 filter_flip=filter_flip)
390
391 return gradWeight_op(input, output_grad, filter_shape[:-2])
392
393
394 def bilinear_kernel_2D(ratio, normalize=True):
395 """Compute 2D kernel for bilinear upsampling
396
397 This function builds the 2D kernel that can be used to upsample
398 a tensor by the given ratio using bilinear interpolation.
399
400 Parameters
401 ----------
402 ratio: int or Constant/Scalar Theano tensor of int* dtype
403 the ratio by which an image will be upsampled by the returned filter
404 in the 2D space.
405
406 normalize: bool
407 param normalize: indicates whether to normalize the kernel or not.
408 Default is True.
409
410 Returns
411 -------
412 symbolic 2D tensor
413 the 2D kernels that can be applied to any given image to upsample it
414 by the indicated ratio using bilinear interpolation in two dimensions.
415
416 """
417
418 hkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle('x', 0)
419 vkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle(0, 'x')
420 kern = hkern * vkern
421 return kern
422
423
424 def bilinear_kernel_1D(ratio, normalize=True):
425 """Compute 1D kernel for bilinear upsampling
426
427 This function builds the 1D kernel that can be used to upsample
428 a tensor by the given ratio using bilinear interpolation.
429
430 Parameters
431 ----------
432 ratio: int or Constant/Scalar Theano tensor of int* dtype
433 the ratio by which an image will be upsampled by the returned filter
434 in the 2D space.
435
436 normalize: bool
437 param normalize: indicates whether to normalize the kernel or not.
438 Default is True.
439
440 Returns
441 -------
442 symbolic 1D tensor
443 the 1D kernels that can be applied to any given image to upsample it
444 by the indicated ratio using bilinear interpolation in one dimension.
445
446 """
447
448 T = theano.tensor
449 half_kern = T.arange(1, ratio + 1, dtype=theano.config.floatX)
450 kern = T.concatenate([half_kern, half_kern[-2::-1]])
451
452 if normalize:
453 kern /= ratio
454 return kern
455
456
457 def bilinear_upsampling(input,
458 ratio,
459 batch_size=None,
460 num_input_channels=None,
461 use_1D_kernel=True):
462 """Compute bilinear upsampling
463
464 This function will build the symbolic graph for upsampling
465 a tensor by the given ratio using bilinear interpolation.
466
467 Parameters
468 ----------
469 input: symbolic 4D tensor
470 mini-batch of feature map stacks, of shape (batch size,
471 input channels, input rows, input columns) that will be upsampled.
472
473 ratio: int or Constant or Scalar Tensor of int* dtype
474 the ratio by which the input is upsampled in the 2D space (row and
475 col size).
476
477 batch_size: None, int or Constant variable
478 The size of the first dimension of the input variable.
479 Optional, possibly used to choose an optimal implementation.
480 batch_size will be used only if num_input_channels is not None.
481
482 num_input_channels: None, int or Constant variable
483 The size of the second dimension of the input variable.
484 Optional, possibly used to choose an optimal implementation.
485 num_input_channels will be used only if batch_size is not None.
486
487 use_1D_kernel: bool
488 if set to true, row and column will be upsampled seperately by 1D
489 kernels, otherwise they are upsampled together using a 2D kernel. The
490 final result is the same, only the speed can differ, given factors such
491 as upsampling ratio.
492
493 Returns
494 -------
495 symbolic 4D tensor
496 set of feature maps generated by bilinear upsampling. Tensor
497 is of shape (batch size, num_input_channels, input row size * ratio,
498 input column size * ratio)
499
500 Notes
501 -----
502
503 :note: The kernel used for bilinear interpolation is fixed (not learned).
504
505 :note: When the upsampling ratio is even, the last row and column is
506 repeated one extra time compared to the first row and column which makes
507 the upsampled tensor asymmetrical on both sides. This does not happen when
508 the upsampling ratio is odd.
509
510 """
511
512 T = theano.tensor
513 try:
514 up_bs = batch_size * num_input_channels
515 except TypeError:
516 up_bs = None
517 row, col = input.shape[2:]
518 up_input = input.reshape((-1, 1, row, col))
519
520 # concatenating the first and last row and column
521 # first and last row
522 concat_mat = T.concatenate((up_input[:, :, :1, :], up_input,
523 up_input[:, :, -1:, :]), axis=2)
524 # first and last col
525 concat_mat = T.concatenate((concat_mat[:, :, :, :1], concat_mat,
526 concat_mat[:, :, :, -1:]), axis=3)
527 concat_col = col + 2
528
529 pad = 2 * ratio - (ratio - 1) // 2 - 1
530
531 if use_1D_kernel:
532 kern = bilinear_kernel_1D(ratio=ratio, normalize=True)
533 # upsampling rows
534 upsampled_row = conv2d_grad_wrt_inputs(output_grad=concat_mat,
535 filters=kern[np.newaxis,
536 np.newaxis, :,
537 np.newaxis],
538 input_shape=(up_bs, 1,
539 row * ratio,
540 concat_col),
541 filter_shape=(1, 1, None, 1),
542 border_mode=(pad, 0),
543 subsample=(ratio, 1),
544 filter_flip=True)
545 # upsampling cols
546 upsampled_mat = conv2d_grad_wrt_inputs(output_grad=upsampled_row,
547 filters=kern[np.newaxis,
548 np.newaxis,
549 np.newaxis, :],
550 input_shape=(up_bs, 1,
551 row * ratio,
552 col * ratio),
553 filter_shape=(1, 1, 1, None),
554 border_mode=(0, pad),
555 subsample=(1, ratio),
556 filter_flip=True)
557 else:
558 kern = bilinear_kernel_2D(ratio=ratio, normalize=True)
559 upsampled_mat = conv2d_grad_wrt_inputs(output_grad=concat_mat,
560 filters=kern[np.newaxis,
561 np.newaxis, :, :],
562 input_shape=(up_bs, 1,
563 row * ratio,
564 col * ratio),
565 filter_shape=(1, 1, None, None),
566 border_mode=(pad, pad),
567 subsample=(ratio, ratio),
568 filter_flip=True)
569
570 return upsampled_mat.reshape((batch_size, num_input_channels,
571 row * ratio, col * ratio))
572
573
574 class BaseAbstractConv2d(Op):
575 """Base class for AbstractConv
576
577 Define an abstract convolution op that will be replaced with the
578 appropriate implementation
579
580 Parameters
581 ----------
582 imshp: None, tuple/list of len 4 of int or Constant variable
583 The shape of the input parameter.
584 Optional, possibly used to choose an optimal implementation.
585 You can give ``None`` for any element of the list to specify that this
586 element is not known at compile time.
587 imshp is defined w.r.t the forward conv.
588
589 kshp: None, tuple/list of len 4 of int or Constant variable
590 The shape of the filters parameter.
591 Optional, possibly used to choose an optimal implementation.
592 You can give ``None`` for any element of the list to specify that this
593 element is not known at compile time.
594 kshp is defined w.r.t the forward conv.
595
596 border_mode: str, int or tuple of two int
597 Either of the following:
598
599 ``'valid'``: apply filter wherever it completely overlaps with the
600 input. Generates output of shape: input shape - filter shape + 1
601 ``'full'``: apply filter wherever it partly overlaps with the input.
602 Generates output of shape: input shape + filter shape - 1
603 ``'half'``: pad input with a symmetric border of ``filter rows // 2``
604 rows and ``filter columns // 2`` columns, then perform a valid
605 convolution. For filters with an odd number of rows and columns, this
606 leads to the output shape being equal to the input shape.
607 ``int``: pad input with a symmetric border of zeros of the given
608 width, then perform a valid convolution.
609 ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
610 and ``int2`` columns, then perform a valid convolution.
611
612 subsample: tuple of len 2
613 Factor by which to subsample the output.
614 Also called strides elsewhere.
615
616 filter_flip: bool
617 If ``True``, will flip the filter rows and columns
618 before sliding them over the input. This operation is normally referred
619 to as a convolution, and this is the default. If ``False``, the filters
620 are not flipped and the operation is referred to as a
621 cross-correlation.
622
623 """
624 check_broadcast = False
625 __props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')
626
627 def __init__(self,
628 imshp=None, kshp=None,
629 border_mode="valid", subsample=(1, 1),
630 filter_flip=True):
631
632 if isinstance(border_mode, integer_types):
633 border_mode = (border_mode, border_mode)
634 if isinstance(border_mode, tuple):
635 pad_h, pad_w = map(int, border_mode)
636 border_mode = (pad_h, pad_w)
637 if border_mode == (0, 0):
638 border_mode = 'valid'
639 if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or
640 border_mode in ('valid', 'full', 'half')):
641 raise ValueError(
642 'invalid border_mode {}, which must be either '
643 '"valid", "full", "half", an integer or a pair of'
644 ' integers'.format(border_mode))
645
646 self.imshp = tuple(imshp) if imshp else (None,) * 4
647 for imshp_i in self.imshp:
648 if imshp_i is not None:
649 # Components of imshp should be constant or ints
650 try:
651 get_scalar_constant_value(imshp_i,
652 only_process_constants=True)
653 except NotScalarConstantError:
654 reraise(ValueError,
655 ValueError("imshp should be None or a tuple of "
656 "constant int values"),
657 sys.exc_info()[2])
658 self.kshp = tuple(kshp) if kshp else (None,) * 4
659 for kshp_i in self.kshp:
660 if kshp_i is not None:
661 # Components of kshp should be constant or ints
662 try:
663 get_scalar_constant_value(kshp_i,
664 only_process_constants=True)
665 except NotScalarConstantError:
666 reraise(ValueError,
667 ValueError("kshp should be None or a tuple of "
668 "constant int values"),
669 sys.exc_info()[2])
670 self.border_mode = border_mode
671 self.filter_flip = filter_flip
672
673 if len(subsample) != 2:
674 raise ValueError("subsample must have two elements")
675 self.subsample = tuple(subsample)
676
677 def flops(self, inp, outp):
678 """ Useful with the hack in profilemode to print the MFlops"""
679 # if the output shape is correct, then this gives the correct
680 # flops for any direction, sampling, padding, and border mode
681 inputs, filters = inp
682 outputs, = outp
683 assert inputs[1] == filters[1]
684 # nb mul and add by output pixel
685 flops = filters[2] * filters[3] * 2
686 # nb flops by output image
687 flops *= outputs[2] * outputs[3]
688 # nb patch multiplied
689 flops *= inputs[1] * filters[0] * inputs[0]
690 return flops
691
692 def do_constant_folding(self, node):
693 # Disable constant folding since there is no implementation.
694 # This may change in the future.
695 return False
696
697 def conv2d(self, img, kern, mode="valid"):
698 """
699 Basic slow python implementatation for DebugMode
700 """
701
702 if not imported_scipy_signal:
703 raise NotImplementedError(
704 "AbstractConv perform requires the python package"
705 " for scipy.signal to be installed.")
706 if not (mode in ('valid', 'full')):
707 raise ValueError(
708 'invalid mode {}, which must be either '
709 '"valid" or "full"'.format(mode))
710
711 out_shape = get_conv_output_shape(img.shape, kern.shape, mode, [1, 1])
712 out = numpy.zeros(out_shape, dtype=img.dtype)
713 val = _valfrommode(mode)
714 bval = _bvalfromboundary('fill')
715
716 with warnings.catch_warnings():
717 warnings.simplefilter('ignore', numpy.ComplexWarning)
718 for b in xrange(img.shape[0]):
719 for n in xrange(kern.shape[0]):
720 for im0 in xrange(img.shape[1]):
721 # some cast generates a warning here
722 out[b, n, ...] += _convolve2d(img[b, im0, ...],
723 kern[n, im0, ...],
724 1, val, bval, 0)
725 return out
726
727
728 class AbstractConv2d(BaseAbstractConv2d):
729 """ Abstract Op for the forward convolution.
730 Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`
731 for a more detailed documentation.
732 """
733
734 def __init__(self,
735 imshp=None,
736 kshp=None,
737 border_mode="valid",
738 subsample=(1, 1),
739 filter_flip=True):
740 super(AbstractConv2d, self).__init__(imshp, kshp,
741 border_mode, subsample,
742 filter_flip)
743
744 def make_node(self, img, kern):
745 # Make sure both inputs are Variables with the same Type
746 if not isinstance(img, theano.Variable):
747 img = as_tensor_variable(img)
748 if not isinstance(kern, theano.Variable):
749 kern = as_tensor_variable(kern)
750 ktype = img.type.clone(dtype=kern.dtype,
751 broadcastable=kern.broadcastable)
752 kern = ktype.filter_variable(kern)
753
754 if img.type.ndim != 4:
755 raise TypeError('img must be 4D tensor')
756 if kern.type.ndim != 4:
757 raise TypeError('kern must be 4D tensor')
758
759 broadcastable = [img.broadcastable[0],
760 kern.broadcastable[0],
761 False, False]
762 output = img.type.clone(broadcastable=broadcastable)()
763 return Apply(self, [img, kern], [output])
764
765 def perform(self, node, inp, out_):
766 img, kern = inp
767 img = numpy.asarray(img)
768 kern = numpy.asarray(kern)
769 o, = out_
770 mode = self.border_mode
771
772 if not ((isinstance(mode, tuple) and min(mode) >= 0) or
773 mode in ('valid', 'full', 'half')):
774 raise ValueError(
775 'invalid border_mode {}, which must be either '
776 '"valid", "full", "half", an integer or a pair of'
777 ' integers'.format(mode))
778
779 if mode == "full":
780 mode = (kern.shape[2] - 1, kern.shape[3] - 1)
781 elif mode == "half":
782 mode = (kern.shape[2] // 2, kern.shape[3] // 2)
783 if isinstance(mode, tuple):
784 pad_h, pad_w = map(int, mode)
785 mode = "valid"
786 new_img = numpy.zeros((img.shape[0], img.shape[1],
787 img.shape[2] + 2 * pad_h,
788 img.shape[3] + 2 * pad_w), dtype=img.dtype)
789 new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img
790 img = new_img
791 if not self.filter_flip:
792 kern = kern[:, :, ::-1, ::-1]
793 conv_out = self.conv2d(img, kern, mode="valid")
794 conv_out = conv_out[:, :, ::self.subsample[0], ::self.subsample[1]]
795
796 o[0] = node.outputs[0].type.filter(conv_out)
797
798 def R_op(self, inputs, eval_points):
799 rval = None
800 if eval_points[0] is not None:
801 rval = self.make_node(eval_points[0], inputs[1]).outputs[0]
802 if eval_points[1] is not None:
803 if rval is None:
804 rval = self.make_node(inputs[0], eval_points[1]).outputs[0]
805 else:
806 rval += self.make_node(inputs[0], eval_points[1]).outputs[0]
807 return [rval]
808
809 def grad(self, inp, grads):
810 bottom, weights = inp
811 top, = grads
812 d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
813 self.border_mode,
814 self.subsample,
815 self.filter_flip)(
816 weights, top, bottom.shape[-2:])
817 d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
818 self.border_mode,
819 self.subsample,
820 self.filter_flip)(
821
822 bottom, top, weights.shape[-2:])
823
824 # Make sure that the broadcastable pattern of the inputs is used
825 # for the gradients, even if the grad opts are not able to infer
826 # that the dimensions are broadcastable.
827 # Also make sure that the gradient lives on the same device than
828 # the corresponding input.
829 d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)
830 d_bottom = bottom.type.filter_variable(d_bottom)
831 d_weights = patternbroadcast(d_weights, weights.broadcastable)
832 d_weights = weights.type.filter_variable(d_weights)
833 return d_bottom, d_weights
834
835 def infer_shape(self, node, input_shapes):
836 imshp = input_shapes[0]
837 kshp = input_shapes[1]
838
839 # replace symbolic shapes with known constant shapes
840 if self.imshp is not None:
841 imshp = [imshp[i] if self.imshp[i] is None else self.imshp[i]
842 for i in range(4)]
843 if self.kshp is not None:
844 kshp = [kshp[i] if self.kshp[i] is None else self.kshp[i]
845 for i in range(4)]
846 res = get_conv_output_shape(imshp, kshp, self.border_mode,
847 self.subsample)
848 return [res]
849
850
851 class AbstractConv2d_gradWeights(BaseAbstractConv2d):
852 """Gradient wrt. filters for `AbstractConv2d`.
853 Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`
854 for a more detailed documentation.
855
856 :note: You will not want to use this directly, but rely on
857 Theano's automatic differentiation or graph optimization to
858 use it as needed.
859
860 """
861 def __init__(self,
862 imshp=None,
863 kshp=None,
864 border_mode="valid",
865 subsample=(1, 1),
866 filter_flip=True):
867 super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,
868 border_mode,
869 subsample,
870 filter_flip)
871
872 # Update shape/height_width
873 def make_node(self, img, topgrad, shape):
874 # Make sure both inputs are Variables with the same Type
875 if not isinstance(img, theano.Variable):
876 img = as_tensor_variable(img)
877 if not isinstance(topgrad, theano.Variable):
878 topgrad = as_tensor_variable(topgrad)
879 gtype = img.type.clone(dtype=topgrad.dtype,
880 broadcastable=topgrad.broadcastable)
881 topgrad = gtype.filter_variable(topgrad)
882
883 if img.type.ndim != 4:
884 raise TypeError('img must be 4D tensor')
885 if topgrad.type.ndim != 4:
886 raise TypeError('topgrad must be 4D tensor')
887
888 shape = as_tensor_variable(shape)
889 broadcastable = [topgrad.broadcastable[1],
890 img.broadcastable[1],
891 False, False]
892 output = img.type.clone(broadcastable=broadcastable)()
893 return Apply(self, [img, topgrad, shape], [output])
894
895 def perform(self, node, inp, out_):
896 img, topgrad, shape = inp
897 img = numpy.asarray(img)
898 topgrad = numpy.asarray(topgrad)
899
900 o, = out_
901
902 mode = self.border_mode
903 if not ((isinstance(mode, tuple) and min(mode) >= 0) or
904 mode in ('valid', 'full', 'half')):
905 raise ValueError(
906 'invalid border_mode {}, which must be either '
907 '"valid", "full", "half", an integer or a pair of'
908 ' integers'.format(mode))
909
910 if mode == "full":
911 mode = (shape[0] - 1, shape[1] - 1)
912 elif mode == "half":
913 mode = (shape[0] // 2, shape[1] // 2)
914 if isinstance(mode, tuple):
915 pad_h, pad_w = map(int, mode)
916 mode = "valid"
917 new_img = numpy.zeros((img.shape[0], img.shape[1],
918 img.shape[2] + 2 * pad_h,
919 img.shape[3] + 2 * pad_w), dtype=img.dtype)
920 new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img
921 img = new_img
922
923 if self.subsample[0] > 1 or self.subsample[1] > 1:
924 new_shape = (topgrad.shape[0], topgrad.shape[1],
925 img.shape[2] - shape[0] + 1,
926 img.shape[3] - shape[1] + 1)
927 new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)
928 new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad
929 topgrad = new_topgrad
930
931 topgrad = topgrad.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]
932 img = img.transpose(1, 0, 2, 3)
933 kern = self.conv2d(img, topgrad, mode="valid")
934 if self.filter_flip:
935 kern = kern.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]
936 else:
937 kern = kern.transpose(1, 0, 2, 3)
938 o[0] = node.outputs[0].type.filter(kern)
939
940 def grad(self, inp, grads):
941 bottom, top = inp[:2]
942 weights, = grads
943 d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
944 self.border_mode,
945 self.subsample,
946 self.filter_flip)(
947 weights,
948 top,
949 bottom.shape[-2:])
950 d_top = AbstractConv2d(self.imshp,
951 self.kshp,
952 self.border_mode,
953 self.subsample,
954 self.filter_flip)(bottom, weights)
955 # Make sure that the broadcastable pattern of the inputs is used
956 # for the gradients, even if the grad opts are not able to infer
957 # that the dimensions are broadcastable.
958 # Also make sure that the gradient lives on the same device than
959 # the corresponding input.
960 d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)
961 d_bottom = bottom.type.filter_variable(d_bottom)
962 d_top = patternbroadcast(d_top, top.broadcastable)
963 d_top = top.type.filter_variable(d_top)
964
965 d_height_width = (theano.gradient.DisconnectedType()(),)
966 return (d_bottom, d_top) + d_height_width
967
968 def connection_pattern(self, node):
969 return [[1], [1], [0]] # no connection to height, width
970
971 def infer_shape(self, node, input_shapes):
972 # We use self.kshp (that was passed when creating the Op) if possible,
973 # or fall back to the `shape` input of the node.
974 # TODO: when there is no subsampling, try to infer the kernel shape
975 # from the shapes of inputs.
976 imshp = input_shapes[0]
977 topshp = input_shapes[1]
978 kshp = self.kshp[:] if self.kshp is not None else [None] * 4
979 fallback_kshp = [topshp[1], imshp[1], node.inputs[2][0], node.inputs[2][1]]
980 kshp = [fallback_kshp[i] if kshp[i] is None else kshp[i]
981 for i in range(4)]
982 return [kshp]
983
984
985 class AbstractConv2d_gradInputs(BaseAbstractConv2d):
986 """Gradient wrt. inputs for `AbstractConv2d`.
987 Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`
988 for a more detailed documentation.
989
990 :note: You will not want to use this directly, but rely on
991 Theano's automatic differentiation or graph optimization to
992 use it as needed.
993
994 """
995
996 def __init__(self,
997 imshp=None,
998 kshp=None,
999 border_mode="valid",
1000 subsample=(1, 1),
1001 filter_flip=True):
1002 super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,
1003 border_mode,
1004 subsample,
1005 filter_flip)
1006
1007 # Update shape/height_width
1008 def make_node(self, kern, topgrad, shape):
1009 # Make sure both inputs are Variables with the same Type
1010 if not isinstance(kern, theano.Variable):
1011 kern = as_tensor_variable(kern)
1012 if not isinstance(topgrad, theano.Variable):
1013 topgrad = as_tensor_variable(topgrad)
1014 gtype = kern.type.clone(dtype=topgrad.dtype,
1015 broadcastable=topgrad.broadcastable)
1016 topgrad = gtype.filter_variable(topgrad)
1017
1018 if kern.type.ndim != 4:
1019 raise TypeError('kern must be 4D tensor')
1020 if topgrad.type.ndim != 4:
1021 raise TypeError('topgrad must be 4D tensor')
1022
1023 shape = as_tensor_variable(shape)
1024 broadcastable = [topgrad.type.broadcastable[0],
1025 kern.type.broadcastable[1],
1026 False, False]
1027 output = kern.type.clone(broadcastable=broadcastable)()
1028 return Apply(self, [kern, topgrad, shape], [output])
1029
1030 def perform(self, node, inp, out_):
1031 kern, topgrad, shape = inp
1032 kern = numpy.asarray(kern)
1033 topgrad = numpy.asarray(topgrad)
1034 o, = out_
1035
1036 mode = self.border_mode
1037 if not ((isinstance(mode, tuple) and min(mode) >= 0) or
1038 mode in ('valid', 'full', 'half')):
1039 raise ValueError(
1040 'invalid border_mode {}, which must be either '
1041 '"valid", "full", "half", an integer or a pair of'
1042 ' integers'.format(mode))
1043
1044 pad_h, pad_w = 0, 0
1045 if mode == "full":
1046 pad_h, pad_w = (kern.shape[2] - 1, kern.shape[3] - 1)
1047 elif mode == "half":
1048 pad_h, pad_w = (kern.shape[2] // 2, kern.shape[3] // 2)
1049 elif isinstance(mode, tuple):
1050 pad_h, pad_w = map(int, self.border_mode)
1051 if self.subsample[0] > 1 or self.subsample[1] > 1:
1052 new_shape = (topgrad.shape[0], topgrad.shape[1],
1053 shape[0] + 2 * pad_h - kern.shape[2] + 1,
1054 shape[1] + 2 * pad_w - kern.shape[3] + 1)
1055 new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)
1056 new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad
1057 topgrad = new_topgrad
1058 kern = kern.transpose(1, 0, 2, 3)
1059 if self.filter_flip:
1060 topgrad = topgrad[:, :, ::-1, ::-1]
1061 img = self.conv2d(topgrad, kern, mode="full")
1062 if self.filter_flip:
1063 img = img[:, :, ::-1, ::-1]
1064 if pad_h > 0 or pad_w > 0:
1065 img = img[:, :, pad_h:img.shape[2] - pad_h, pad_w:img.shape[3] - pad_w]
1066 o[0] = node.outputs[0].type.filter(img)
1067
1068 def grad(self, inp, grads):
1069 weights, top = inp[:2]
1070 bottom, = grads
1071 d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
1072 self.border_mode,
1073 self.subsample)(
1074 bottom, top,
1075 weights.shape[-2:])
1076 d_top = AbstractConv2d(self.imshp, self.kshp,
1077 self.border_mode, self.subsample)(
1078 bottom, weights)
1079 # Make sure that the broadcastable pattern of the inputs is used
1080 # for the gradients, even if the grad opts are not able to infer
1081 # that the dimensions are broadcastable.
1082 # Also make sure that the gradient lives on the same device than
1083 # the corresponding input.
1084 d_weights = patternbroadcast(d_weights, weights.broadcastable)
1085 d_weights = weights.type.filter_variable(d_weights)
1086 d_top = patternbroadcast(d_top, top.broadcastable)
1087 d_top = top.type.filter_variable(d_top)
1088
1089 d_height_width = (theano.gradient.DisconnectedType()(),)
1090 return (d_weights, d_top) + d_height_width
1091
1092 def connection_pattern(self, node):
1093 return [[1], [1], [0]] # no connection to height, width
1094
1095 def infer_shape(self, node, input_shapes):
1096 # We use self.imshp (that was passed when creating the Op) if possible,
1097 # or fall back to the `shape` input of the node.
1098 # TODO: when there is no subsampling, try to infer the image shape
1099 # from the shapes of inputs.
1100 kshp = input_shapes[0]
1101 topshp = input_shapes[1]
1102 imshp = self.imshp[:] if self.imshp is not None else [None] * 4
1103 fallback_imshp = [topshp[0], kshp[1], node.inputs[2][0],
1104 node.inputs[2][1]]
1105 imshp = [fallback_imshp[i] if imshp[i] is None else imshp[i]
1106 for i in range(4)]
1107 return [imshp]
1108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/theano/tensor/nnet/abstract_conv.py b/theano/tensor/nnet/abstract_conv.py
--- a/theano/tensor/nnet/abstract_conv.py
+++ b/theano/tensor/nnet/abstract_conv.py
@@ -567,7 +567,7 @@
subsample=(ratio, ratio),
filter_flip=True)
- return upsampled_mat.reshape((batch_size, num_input_channels,
+ return upsampled_mat.reshape((input.shape[0], input.shape[1],
row * ratio, col * ratio))
| {"golden_diff": "diff --git a/theano/tensor/nnet/abstract_conv.py b/theano/tensor/nnet/abstract_conv.py\n--- a/theano/tensor/nnet/abstract_conv.py\n+++ b/theano/tensor/nnet/abstract_conv.py\n@@ -567,7 +567,7 @@\n subsample=(ratio, ratio),\n filter_flip=True)\n \n- return upsampled_mat.reshape((batch_size, num_input_channels,\n+ return upsampled_mat.reshape((input.shape[0], input.shape[1],\n row * ratio, col * ratio))\n", "issue": "Bilinear_Upsampling requires to specify batch_size and num_input_channels\nReferring to this line:\nhttps://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py#L570\n\nthis piece of code doesn't work with `batch_size` and/or `num_input_channels` set to `None`\nit requires to specify the dimensions as scalar values\n\n", "before_files": [{"content": "\"\"\"\nAbstract conv interface\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport logging\nfrom six import reraise, integer_types\nimport sys\n\nimport theano\n\nfrom theano.tensor import as_tensor_variable, patternbroadcast\nfrom theano.tensor import get_scalar_constant_value, NotScalarConstantError\nfrom theano.gof import Apply, Op\n\nfrom six.moves import xrange\n\nimport warnings\nimport numpy\nimport numpy as np\n\ntry:\n from scipy.signal.signaltools import _valfrommode, _bvalfromboundary\n from scipy.signal.sigtools import _convolve2d\n imported_scipy_signal = True\nexcept ImportError:\n imported_scipy_signal = False\n\n\n__docformat__ = \"restructuredtext en\"\n_logger = logging.getLogger(\"theano.tensor.nnet.abstract_conv\")\n\n\ndef get_conv_output_shape(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: tuple of int (symbolic or numeric) corresponding to the input\n image shape. Its four (or five) element must correspond respectively\n to: batch size, number of input channels, height and width (and\n possibly depth) of the image. None where undefined.\n kernel_shape: tuple of int (symbolic or numeric) corresponding to the\n kernel shape. Its four (or five) elements must correspond respectively\n to: number of output channels, number of input channels, height and\n width (and possibly depth) of the kernel. None where undefined.\n border_mode: string, int (symbolic or numeric) or tuple of int (symbolic\n or numeric). If it is a string, it must be 'valid', 'half' or 'full'.\n If it is a tuple, its two (or three) elements respectively correspond\n to the padding on height and width (and possibly depth) axis.\n subsample: tuple of int (symbolic or numeric). Its or three elements\n espectively correspond to the subsampling on height and width (and\n possibly depth) axis.\n\n Returns\n -------\n output_shape: tuple of int corresponding to the output image shape. Its\n four element must correspond respectively to: batch size, number of\n output channels, height and width of the image. None where undefined.\n\n \"\"\"\n bsize, imshp = image_shape[0], image_shape[2:]\n nkern, kshp = kernel_shape[0], kernel_shape[2:]\n if isinstance(border_mode, tuple):\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode[i], subsample[i])\n for i in range(len(subsample)))\n else:\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode, subsample[i])\n for i in range(len(subsample)))\n return (bsize, nkern) + out_shp\n\n\ndef get_conv_shape_1axis(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: int or None. Corresponds to the input image shape on a\n given axis. None if undefined.\n kernel_shape: int or None. Corresponds to the kernel shape on a given\n axis. None if undefined.\n border_mode: string or int. If it is a string, it must be\n 'valid', 'half' or 'full'. If it is an integer, it must correspond to\n the padding on the considered axis.\n subsample: int. It must correspond to the subsampling on the\n considered axis.\n\n Returns\n -------\n out_shp: int corresponding to the output image shape on the\n considered axis. None if undefined.\n\n \"\"\"\n if None in [image_shape, kernel_shape, border_mode, subsample]:\n return None\n if border_mode == \"half\":\n pad = kernel_shape // 2\n elif border_mode == \"full\":\n pad = kernel_shape - 1\n elif border_mode == \"valid\":\n pad = 0\n else:\n pad = border_mode\n if pad < 0:\n raise ValueError(\"border_mode must be >= 0\")\n out_shp = (image_shape + 2 * pad - kernel_shape) // subsample + 1\n\n return out_shp\n\n\ndef conv2d(input,\n filters,\n input_shape=None,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"This function will build the symbolic graph for convolving a mini-batch of a\n stack of 2D inputs with a set of 2D filters. The implementation is modelled\n after Convolutional Neural Networks (CNN).\n\n Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation.\n \"\"\"\n\n input = as_tensor_variable(input)\n filters = as_tensor_variable(filters)\n conv_op = AbstractConv2d(imshp=input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n return conv_op(input, filters)\n\n\ndef conv2d_grad_wrt_inputs(output_grad,\n filters,\n input_shape,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its inputs\n\n This function builds the symbolic graph for getting the\n gradient of the output of a convolution (namely output_grad)\n w.r.t the input of the convolution, given a set of 2D filters\n used by the convolution, such that the output_grad is upsampled\n to the input_shape.\n\n Parameters\n ----------\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the tensor that\n will be upsampled or the output gradient of the convolution\n whose gradient will be taken with respect to the input of the\n convolution.\n filters : symbolic 4D tensor\n set of filters used in CNN layer of shape (output channels,\n input channels, filter rows, filter columns). See the\n optional parameter ``filter_shape``.\n input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the input (upsampled) parameter.\n A tuple/list of len 4, with the first two dimensions\n being None or int or Constant and the last two dimensions being\n Tensor or int or Constant.\n Not Optional, since given the output_grad shape\n and the subsample values, multiple input_shape may be\n plausible.\n filter_shape : None or [None/int/Constant] * 4\n The shape of the filters parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that\n this element is not known at compile time.\n border_mode : str, int or tuple of two int\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass. Also called strides\n elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n filters = as_tensor_variable(filters)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of input_shape\n for dim in [0, 1]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of filter_shape\n if filter_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of input_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_input_shape = list(input_shape)\n for dim in [2, 3]:\n if isinstance(input_shape[dim], theano.tensor.TensorVariable):\n numerical_input_shape[dim] = None\n\n grad_input_op = AbstractConv2d_gradInputs(imshp=numerical_input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return grad_input_op(filters, output_grad, input_shape[-2:])\n\n\ndef conv2d_grad_wrt_weights(input,\n output_grad,\n filter_shape,\n input_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its weights\n\n This function will build the symbolic graph for getting the\n gradient of the output of a convolution (output_grad) w.r.t its wights.\n\n Parameters\n ----------\n input : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the input of\n the convolution in the forward pass.\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the gradient of\n the output of convolution.\n filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the filter parameter. A tuple/list of len 4, with the\n first two dimensions being None or int or Constant and the last two\n dimensions being Tensor or int or Constant.\n Not Optional, since given the output_grad shape and\n the input_shape, multiple filter_shape may be plausible.\n input_shape : None or [None/int/Constant] * 4\n The shape of the input parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify\n that this element is not known at compile time.\n border_mode : str, int or tuple of two ints\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass of the convolutional\n operation. Also called strides elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n input = as_tensor_variable(input)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of filter_shape\n for dim in [0, 1]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of input_shape\n if input_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of filter_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_filter_shape = list(filter_shape)\n for dim in [2, 3]:\n if isinstance(filter_shape[dim], theano.tensor.TensorVariable):\n numerical_filter_shape[dim] = None\n\n gradWeight_op = AbstractConv2d_gradWeights(imshp=input_shape,\n kshp=numerical_filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return gradWeight_op(input, output_grad, filter_shape[:-2])\n\n\ndef bilinear_kernel_2D(ratio, normalize=True):\n \"\"\"Compute 2D kernel for bilinear upsampling\n\n This function builds the 2D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 2D tensor\n the 2D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in two dimensions.\n\n \"\"\"\n\n hkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle('x', 0)\n vkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle(0, 'x')\n kern = hkern * vkern\n return kern\n\n\ndef bilinear_kernel_1D(ratio, normalize=True):\n \"\"\"Compute 1D kernel for bilinear upsampling\n\n This function builds the 1D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 1D tensor\n the 1D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in one dimension.\n\n \"\"\"\n\n T = theano.tensor\n half_kern = T.arange(1, ratio + 1, dtype=theano.config.floatX)\n kern = T.concatenate([half_kern, half_kern[-2::-1]])\n\n if normalize:\n kern /= ratio\n return kern\n\n\ndef bilinear_upsampling(input,\n ratio,\n batch_size=None,\n num_input_channels=None,\n use_1D_kernel=True):\n \"\"\"Compute bilinear upsampling\n\n This function will build the symbolic graph for upsampling\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n input: symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size,\n input channels, input rows, input columns) that will be upsampled.\n\n ratio: int or Constant or Scalar Tensor of int* dtype\n the ratio by which the input is upsampled in the 2D space (row and\n col size).\n\n batch_size: None, int or Constant variable\n The size of the first dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n batch_size will be used only if num_input_channels is not None.\n\n num_input_channels: None, int or Constant variable\n The size of the second dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n num_input_channels will be used only if batch_size is not None.\n\n use_1D_kernel: bool\n if set to true, row and column will be upsampled seperately by 1D\n kernels, otherwise they are upsampled together using a 2D kernel. The\n final result is the same, only the speed can differ, given factors such\n as upsampling ratio.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by bilinear upsampling. Tensor\n is of shape (batch size, num_input_channels, input row size * ratio,\n input column size * ratio)\n\n Notes\n -----\n\n :note: The kernel used for bilinear interpolation is fixed (not learned).\n\n :note: When the upsampling ratio is even, the last row and column is\n repeated one extra time compared to the first row and column which makes\n the upsampled tensor asymmetrical on both sides. This does not happen when\n the upsampling ratio is odd.\n\n \"\"\"\n\n T = theano.tensor\n try:\n up_bs = batch_size * num_input_channels\n except TypeError:\n up_bs = None\n row, col = input.shape[2:]\n up_input = input.reshape((-1, 1, row, col))\n\n # concatenating the first and last row and column\n # first and last row\n concat_mat = T.concatenate((up_input[:, :, :1, :], up_input,\n up_input[:, :, -1:, :]), axis=2)\n # first and last col\n concat_mat = T.concatenate((concat_mat[:, :, :, :1], concat_mat,\n concat_mat[:, :, :, -1:]), axis=3)\n concat_col = col + 2\n\n pad = 2 * ratio - (ratio - 1) // 2 - 1\n\n if use_1D_kernel:\n kern = bilinear_kernel_1D(ratio=ratio, normalize=True)\n # upsampling rows\n upsampled_row = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :,\n np.newaxis],\n input_shape=(up_bs, 1,\n row * ratio,\n concat_col),\n filter_shape=(1, 1, None, 1),\n border_mode=(pad, 0),\n subsample=(ratio, 1),\n filter_flip=True)\n # upsampling cols\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=upsampled_row,\n filters=kern[np.newaxis,\n np.newaxis,\n np.newaxis, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, 1, None),\n border_mode=(0, pad),\n subsample=(1, ratio),\n filter_flip=True)\n else:\n kern = bilinear_kernel_2D(ratio=ratio, normalize=True)\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, None, None),\n border_mode=(pad, pad),\n subsample=(ratio, ratio),\n filter_flip=True)\n\n return upsampled_mat.reshape((batch_size, num_input_channels,\n row * ratio, col * ratio))\n\n\nclass BaseAbstractConv2d(Op):\n \"\"\"Base class for AbstractConv\n\n Define an abstract convolution op that will be replaced with the\n appropriate implementation\n\n Parameters\n ----------\n imshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the input parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n imshp is defined w.r.t the forward conv.\n\n kshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the filters parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n kshp is defined w.r.t the forward conv.\n\n border_mode: str, int or tuple of two int\n Either of the following:\n\n ``'valid'``: apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter shape + 1\n ``'full'``: apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n ``'half'``: pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a valid\n convolution. For filters with an odd number of rows and columns, this\n leads to the output shape being equal to the input shape.\n ``int``: pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows\n and ``int2`` columns, then perform a valid convolution.\n\n subsample: tuple of len 2\n Factor by which to subsample the output.\n Also called strides elsewhere.\n\n filter_flip: bool\n If ``True``, will flip the filter rows and columns\n before sliding them over the input. This operation is normally referred\n to as a convolution, and this is the default. If ``False``, the filters\n are not flipped and the operation is referred to as a\n cross-correlation.\n\n \"\"\"\n check_broadcast = False\n __props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')\n\n def __init__(self,\n imshp=None, kshp=None,\n border_mode=\"valid\", subsample=(1, 1),\n filter_flip=True):\n\n if isinstance(border_mode, integer_types):\n border_mode = (border_mode, border_mode)\n if isinstance(border_mode, tuple):\n pad_h, pad_w = map(int, border_mode)\n border_mode = (pad_h, pad_w)\n if border_mode == (0, 0):\n border_mode = 'valid'\n if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or\n border_mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(border_mode))\n\n self.imshp = tuple(imshp) if imshp else (None,) * 4\n for imshp_i in self.imshp:\n if imshp_i is not None:\n # Components of imshp should be constant or ints\n try:\n get_scalar_constant_value(imshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"imshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.kshp = tuple(kshp) if kshp else (None,) * 4\n for kshp_i in self.kshp:\n if kshp_i is not None:\n # Components of kshp should be constant or ints\n try:\n get_scalar_constant_value(kshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"kshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.border_mode = border_mode\n self.filter_flip = filter_flip\n\n if len(subsample) != 2:\n raise ValueError(\"subsample must have two elements\")\n self.subsample = tuple(subsample)\n\n def flops(self, inp, outp):\n \"\"\" Useful with the hack in profilemode to print the MFlops\"\"\"\n # if the output shape is correct, then this gives the correct\n # flops for any direction, sampling, padding, and border mode\n inputs, filters = inp\n outputs, = outp\n assert inputs[1] == filters[1]\n # nb mul and add by output pixel\n flops = filters[2] * filters[3] * 2\n # nb flops by output image\n flops *= outputs[2] * outputs[3]\n # nb patch multiplied\n flops *= inputs[1] * filters[0] * inputs[0]\n return flops\n\n def do_constant_folding(self, node):\n # Disable constant folding since there is no implementation.\n # This may change in the future.\n return False\n\n def conv2d(self, img, kern, mode=\"valid\"):\n \"\"\"\n Basic slow python implementatation for DebugMode\n \"\"\"\n\n if not imported_scipy_signal:\n raise NotImplementedError(\n \"AbstractConv perform requires the python package\"\n \" for scipy.signal to be installed.\")\n if not (mode in ('valid', 'full')):\n raise ValueError(\n 'invalid mode {}, which must be either '\n '\"valid\" or \"full\"'.format(mode))\n\n out_shape = get_conv_output_shape(img.shape, kern.shape, mode, [1, 1])\n out = numpy.zeros(out_shape, dtype=img.dtype)\n val = _valfrommode(mode)\n bval = _bvalfromboundary('fill')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', numpy.ComplexWarning)\n for b in xrange(img.shape[0]):\n for n in xrange(kern.shape[0]):\n for im0 in xrange(img.shape[1]):\n # some cast generates a warning here\n out[b, n, ...] += _convolve2d(img[b, im0, ...],\n kern[n, im0, ...],\n 1, val, bval, 0)\n return out\n\n\nclass AbstractConv2d(BaseAbstractConv2d):\n \"\"\" Abstract Op for the forward convolution.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d, self).__init__(imshp, kshp,\n border_mode, subsample,\n filter_flip)\n\n def make_node(self, img, kern):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n ktype = img.type.clone(dtype=kern.dtype,\n broadcastable=kern.broadcastable)\n kern = ktype.filter_variable(kern)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n\n broadcastable = [img.broadcastable[0],\n kern.broadcastable[0],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, kern], [output])\n\n def perform(self, node, inp, out_):\n img, kern = inp\n img = numpy.asarray(img)\n kern = numpy.asarray(kern)\n o, = out_\n mode = self.border_mode\n\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n mode = (kern.shape[2] // 2, kern.shape[3] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n if not self.filter_flip:\n kern = kern[:, :, ::-1, ::-1]\n conv_out = self.conv2d(img, kern, mode=\"valid\")\n conv_out = conv_out[:, :, ::self.subsample[0], ::self.subsample[1]]\n\n o[0] = node.outputs[0].type.filter(conv_out)\n\n def R_op(self, inputs, eval_points):\n rval = None\n if eval_points[0] is not None:\n rval = self.make_node(eval_points[0], inputs[1]).outputs[0]\n if eval_points[1] is not None:\n if rval is None:\n rval = self.make_node(inputs[0], eval_points[1]).outputs[0]\n else:\n rval += self.make_node(inputs[0], eval_points[1]).outputs[0]\n return [rval]\n\n def grad(self, inp, grads):\n bottom, weights = inp\n top, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights, top, bottom.shape[-2:])\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n\n bottom, top, weights.shape[-2:])\n\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n return d_bottom, d_weights\n\n def infer_shape(self, node, input_shapes):\n imshp = input_shapes[0]\n kshp = input_shapes[1]\n\n # replace symbolic shapes with known constant shapes\n if self.imshp is not None:\n imshp = [imshp[i] if self.imshp[i] is None else self.imshp[i]\n for i in range(4)]\n if self.kshp is not None:\n kshp = [kshp[i] if self.kshp[i] is None else self.kshp[i]\n for i in range(4)]\n res = get_conv_output_shape(imshp, kshp, self.border_mode,\n self.subsample)\n return [res]\n\n\nclass AbstractConv2d_gradWeights(BaseAbstractConv2d):\n \"\"\"Gradient wrt. filters for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, img, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = img.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.broadcastable[1],\n img.broadcastable[1],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n img, topgrad, shape = inp\n img = numpy.asarray(img)\n topgrad = numpy.asarray(topgrad)\n\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (shape[0] - 1, shape[1] - 1)\n elif mode == \"half\":\n mode = (shape[0] // 2, shape[1] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n img.shape[2] - shape[0] + 1,\n img.shape[3] - shape[1] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n\n topgrad = topgrad.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n img = img.transpose(1, 0, 2, 3)\n kern = self.conv2d(img, topgrad, mode=\"valid\")\n if self.filter_flip:\n kern = kern.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n else:\n kern = kern.transpose(1, 0, 2, 3)\n o[0] = node.outputs[0].type.filter(kern)\n\n def grad(self, inp, grads):\n bottom, top = inp[:2]\n weights, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights,\n top,\n bottom.shape[-2:])\n d_top = AbstractConv2d(self.imshp,\n self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_bottom, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.kshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the kernel shape\n # from the shapes of inputs.\n imshp = input_shapes[0]\n topshp = input_shapes[1]\n kshp = self.kshp[:] if self.kshp is not None else [None] * 4\n fallback_kshp = [topshp[1], imshp[1], node.inputs[2][0], node.inputs[2][1]]\n kshp = [fallback_kshp[i] if kshp[i] is None else kshp[i]\n for i in range(4)]\n return [kshp]\n\n\nclass AbstractConv2d_gradInputs(BaseAbstractConv2d):\n \"\"\"Gradient wrt. inputs for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, kern, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = kern.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.type.broadcastable[0],\n kern.type.broadcastable[1],\n False, False]\n output = kern.type.clone(broadcastable=broadcastable)()\n return Apply(self, [kern, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n kern, topgrad, shape = inp\n kern = numpy.asarray(kern)\n topgrad = numpy.asarray(topgrad)\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n pad_h, pad_w = 0, 0\n if mode == \"full\":\n pad_h, pad_w = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n pad_h, pad_w = (kern.shape[2] // 2, kern.shape[3] // 2)\n elif isinstance(mode, tuple):\n pad_h, pad_w = map(int, self.border_mode)\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n shape[0] + 2 * pad_h - kern.shape[2] + 1,\n shape[1] + 2 * pad_w - kern.shape[3] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n kern = kern.transpose(1, 0, 2, 3)\n if self.filter_flip:\n topgrad = topgrad[:, :, ::-1, ::-1]\n img = self.conv2d(topgrad, kern, mode=\"full\")\n if self.filter_flip:\n img = img[:, :, ::-1, ::-1]\n if pad_h > 0 or pad_w > 0:\n img = img[:, :, pad_h:img.shape[2] - pad_h, pad_w:img.shape[3] - pad_w]\n o[0] = node.outputs[0].type.filter(img)\n\n def grad(self, inp, grads):\n weights, top = inp[:2]\n bottom, = grads\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample)(\n bottom, top,\n weights.shape[-2:])\n d_top = AbstractConv2d(self.imshp, self.kshp,\n self.border_mode, self.subsample)(\n bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_weights, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.imshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the image shape\n # from the shapes of inputs.\n kshp = input_shapes[0]\n topshp = input_shapes[1]\n imshp = self.imshp[:] if self.imshp is not None else [None] * 4\n fallback_imshp = [topshp[0], kshp[1], node.inputs[2][0],\n node.inputs[2][1]]\n imshp = [fallback_imshp[i] if imshp[i] is None else imshp[i]\n for i in range(4)]\n return [imshp]\n", "path": "theano/tensor/nnet/abstract_conv.py"}], "after_files": [{"content": "\"\"\"\nAbstract conv interface\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport logging\nfrom six import reraise, integer_types\nimport sys\n\nimport theano\n\nfrom theano.tensor import as_tensor_variable, patternbroadcast\nfrom theano.tensor import get_scalar_constant_value, NotScalarConstantError\nfrom theano.gof import Apply, Op\n\nfrom six.moves import xrange\n\nimport warnings\nimport numpy\nimport numpy as np\n\ntry:\n from scipy.signal.signaltools import _valfrommode, _bvalfromboundary\n from scipy.signal.sigtools import _convolve2d\n imported_scipy_signal = True\nexcept ImportError:\n imported_scipy_signal = False\n\n\n__docformat__ = \"restructuredtext en\"\n_logger = logging.getLogger(\"theano.tensor.nnet.abstract_conv\")\n\n\ndef get_conv_output_shape(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: tuple of int (symbolic or numeric) corresponding to the input\n image shape. Its four (or five) element must correspond respectively\n to: batch size, number of input channels, height and width (and\n possibly depth) of the image. None where undefined.\n kernel_shape: tuple of int (symbolic or numeric) corresponding to the\n kernel shape. Its four (or five) elements must correspond respectively\n to: number of output channels, number of input channels, height and\n width (and possibly depth) of the kernel. None where undefined.\n border_mode: string, int (symbolic or numeric) or tuple of int (symbolic\n or numeric). If it is a string, it must be 'valid', 'half' or 'full'.\n If it is a tuple, its two (or three) elements respectively correspond\n to the padding on height and width (and possibly depth) axis.\n subsample: tuple of int (symbolic or numeric). Its or three elements\n espectively correspond to the subsampling on height and width (and\n possibly depth) axis.\n\n Returns\n -------\n output_shape: tuple of int corresponding to the output image shape. Its\n four element must correspond respectively to: batch size, number of\n output channels, height and width of the image. None where undefined.\n\n \"\"\"\n bsize, imshp = image_shape[0], image_shape[2:]\n nkern, kshp = kernel_shape[0], kernel_shape[2:]\n if isinstance(border_mode, tuple):\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode[i], subsample[i])\n for i in range(len(subsample)))\n else:\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode, subsample[i])\n for i in range(len(subsample)))\n return (bsize, nkern) + out_shp\n\n\ndef get_conv_shape_1axis(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: int or None. Corresponds to the input image shape on a\n given axis. None if undefined.\n kernel_shape: int or None. Corresponds to the kernel shape on a given\n axis. None if undefined.\n border_mode: string or int. If it is a string, it must be\n 'valid', 'half' or 'full'. If it is an integer, it must correspond to\n the padding on the considered axis.\n subsample: int. It must correspond to the subsampling on the\n considered axis.\n\n Returns\n -------\n out_shp: int corresponding to the output image shape on the\n considered axis. None if undefined.\n\n \"\"\"\n if None in [image_shape, kernel_shape, border_mode, subsample]:\n return None\n if border_mode == \"half\":\n pad = kernel_shape // 2\n elif border_mode == \"full\":\n pad = kernel_shape - 1\n elif border_mode == \"valid\":\n pad = 0\n else:\n pad = border_mode\n if pad < 0:\n raise ValueError(\"border_mode must be >= 0\")\n out_shp = (image_shape + 2 * pad - kernel_shape) // subsample + 1\n\n return out_shp\n\n\ndef conv2d(input,\n filters,\n input_shape=None,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"This function will build the symbolic graph for convolving a mini-batch of a\n stack of 2D inputs with a set of 2D filters. The implementation is modelled\n after Convolutional Neural Networks (CNN).\n\n Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation.\n \"\"\"\n\n input = as_tensor_variable(input)\n filters = as_tensor_variable(filters)\n conv_op = AbstractConv2d(imshp=input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n return conv_op(input, filters)\n\n\ndef conv2d_grad_wrt_inputs(output_grad,\n filters,\n input_shape,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its inputs\n\n This function builds the symbolic graph for getting the\n gradient of the output of a convolution (namely output_grad)\n w.r.t the input of the convolution, given a set of 2D filters\n used by the convolution, such that the output_grad is upsampled\n to the input_shape.\n\n Parameters\n ----------\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the tensor that\n will be upsampled or the output gradient of the convolution\n whose gradient will be taken with respect to the input of the\n convolution.\n filters : symbolic 4D tensor\n set of filters used in CNN layer of shape (output channels,\n input channels, filter rows, filter columns). See the\n optional parameter ``filter_shape``.\n input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the input (upsampled) parameter.\n A tuple/list of len 4, with the first two dimensions\n being None or int or Constant and the last two dimensions being\n Tensor or int or Constant.\n Not Optional, since given the output_grad shape\n and the subsample values, multiple input_shape may be\n plausible.\n filter_shape : None or [None/int/Constant] * 4\n The shape of the filters parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that\n this element is not known at compile time.\n border_mode : str, int or tuple of two int\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass. Also called strides\n elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n filters = as_tensor_variable(filters)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of input_shape\n for dim in [0, 1]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of filter_shape\n if filter_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of input_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_input_shape = list(input_shape)\n for dim in [2, 3]:\n if isinstance(input_shape[dim], theano.tensor.TensorVariable):\n numerical_input_shape[dim] = None\n\n grad_input_op = AbstractConv2d_gradInputs(imshp=numerical_input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return grad_input_op(filters, output_grad, input_shape[-2:])\n\n\ndef conv2d_grad_wrt_weights(input,\n output_grad,\n filter_shape,\n input_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its weights\n\n This function will build the symbolic graph for getting the\n gradient of the output of a convolution (output_grad) w.r.t its wights.\n\n Parameters\n ----------\n input : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the input of\n the convolution in the forward pass.\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the gradient of\n the output of convolution.\n filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the filter parameter. A tuple/list of len 4, with the\n first two dimensions being None or int or Constant and the last two\n dimensions being Tensor or int or Constant.\n Not Optional, since given the output_grad shape and\n the input_shape, multiple filter_shape may be plausible.\n input_shape : None or [None/int/Constant] * 4\n The shape of the input parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify\n that this element is not known at compile time.\n border_mode : str, int or tuple of two ints\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass of the convolutional\n operation. Also called strides elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n input = as_tensor_variable(input)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of filter_shape\n for dim in [0, 1]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of input_shape\n if input_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of filter_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_filter_shape = list(filter_shape)\n for dim in [2, 3]:\n if isinstance(filter_shape[dim], theano.tensor.TensorVariable):\n numerical_filter_shape[dim] = None\n\n gradWeight_op = AbstractConv2d_gradWeights(imshp=input_shape,\n kshp=numerical_filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return gradWeight_op(input, output_grad, filter_shape[:-2])\n\n\ndef bilinear_kernel_2D(ratio, normalize=True):\n \"\"\"Compute 2D kernel for bilinear upsampling\n\n This function builds the 2D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 2D tensor\n the 2D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in two dimensions.\n\n \"\"\"\n\n hkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle('x', 0)\n vkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle(0, 'x')\n kern = hkern * vkern\n return kern\n\n\ndef bilinear_kernel_1D(ratio, normalize=True):\n \"\"\"Compute 1D kernel for bilinear upsampling\n\n This function builds the 1D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 1D tensor\n the 1D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in one dimension.\n\n \"\"\"\n\n T = theano.tensor\n half_kern = T.arange(1, ratio + 1, dtype=theano.config.floatX)\n kern = T.concatenate([half_kern, half_kern[-2::-1]])\n\n if normalize:\n kern /= ratio\n return kern\n\n\ndef bilinear_upsampling(input,\n ratio,\n batch_size=None,\n num_input_channels=None,\n use_1D_kernel=True):\n \"\"\"Compute bilinear upsampling\n\n This function will build the symbolic graph for upsampling\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n input: symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size,\n input channels, input rows, input columns) that will be upsampled.\n\n ratio: int or Constant or Scalar Tensor of int* dtype\n the ratio by which the input is upsampled in the 2D space (row and\n col size).\n\n batch_size: None, int or Constant variable\n The size of the first dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n batch_size will be used only if num_input_channels is not None.\n\n num_input_channels: None, int or Constant variable\n The size of the second dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n num_input_channels will be used only if batch_size is not None.\n\n use_1D_kernel: bool\n if set to true, row and column will be upsampled seperately by 1D\n kernels, otherwise they are upsampled together using a 2D kernel. The\n final result is the same, only the speed can differ, given factors such\n as upsampling ratio.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by bilinear upsampling. Tensor\n is of shape (batch size, num_input_channels, input row size * ratio,\n input column size * ratio)\n\n Notes\n -----\n\n :note: The kernel used for bilinear interpolation is fixed (not learned).\n\n :note: When the upsampling ratio is even, the last row and column is\n repeated one extra time compared to the first row and column which makes\n the upsampled tensor asymmetrical on both sides. This does not happen when\n the upsampling ratio is odd.\n\n \"\"\"\n\n T = theano.tensor\n try:\n up_bs = batch_size * num_input_channels\n except TypeError:\n up_bs = None\n row, col = input.shape[2:]\n up_input = input.reshape((-1, 1, row, col))\n\n # concatenating the first and last row and column\n # first and last row\n concat_mat = T.concatenate((up_input[:, :, :1, :], up_input,\n up_input[:, :, -1:, :]), axis=2)\n # first and last col\n concat_mat = T.concatenate((concat_mat[:, :, :, :1], concat_mat,\n concat_mat[:, :, :, -1:]), axis=3)\n concat_col = col + 2\n\n pad = 2 * ratio - (ratio - 1) // 2 - 1\n\n if use_1D_kernel:\n kern = bilinear_kernel_1D(ratio=ratio, normalize=True)\n # upsampling rows\n upsampled_row = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :,\n np.newaxis],\n input_shape=(up_bs, 1,\n row * ratio,\n concat_col),\n filter_shape=(1, 1, None, 1),\n border_mode=(pad, 0),\n subsample=(ratio, 1),\n filter_flip=True)\n # upsampling cols\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=upsampled_row,\n filters=kern[np.newaxis,\n np.newaxis,\n np.newaxis, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, 1, None),\n border_mode=(0, pad),\n subsample=(1, ratio),\n filter_flip=True)\n else:\n kern = bilinear_kernel_2D(ratio=ratio, normalize=True)\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, None, None),\n border_mode=(pad, pad),\n subsample=(ratio, ratio),\n filter_flip=True)\n\n return upsampled_mat.reshape((input.shape[0], input.shape[1],\n row * ratio, col * ratio))\n\n\nclass BaseAbstractConv2d(Op):\n \"\"\"Base class for AbstractConv\n\n Define an abstract convolution op that will be replaced with the\n appropriate implementation\n\n Parameters\n ----------\n imshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the input parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n imshp is defined w.r.t the forward conv.\n\n kshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the filters parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n kshp is defined w.r.t the forward conv.\n\n border_mode: str, int or tuple of two int\n Either of the following:\n\n ``'valid'``: apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter shape + 1\n ``'full'``: apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n ``'half'``: pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a valid\n convolution. For filters with an odd number of rows and columns, this\n leads to the output shape being equal to the input shape.\n ``int``: pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows\n and ``int2`` columns, then perform a valid convolution.\n\n subsample: tuple of len 2\n Factor by which to subsample the output.\n Also called strides elsewhere.\n\n filter_flip: bool\n If ``True``, will flip the filter rows and columns\n before sliding them over the input. This operation is normally referred\n to as a convolution, and this is the default. If ``False``, the filters\n are not flipped and the operation is referred to as a\n cross-correlation.\n\n \"\"\"\n check_broadcast = False\n __props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')\n\n def __init__(self,\n imshp=None, kshp=None,\n border_mode=\"valid\", subsample=(1, 1),\n filter_flip=True):\n\n if isinstance(border_mode, integer_types):\n border_mode = (border_mode, border_mode)\n if isinstance(border_mode, tuple):\n pad_h, pad_w = map(int, border_mode)\n border_mode = (pad_h, pad_w)\n if border_mode == (0, 0):\n border_mode = 'valid'\n if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or\n border_mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(border_mode))\n\n self.imshp = tuple(imshp) if imshp else (None,) * 4\n for imshp_i in self.imshp:\n if imshp_i is not None:\n # Components of imshp should be constant or ints\n try:\n get_scalar_constant_value(imshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"imshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.kshp = tuple(kshp) if kshp else (None,) * 4\n for kshp_i in self.kshp:\n if kshp_i is not None:\n # Components of kshp should be constant or ints\n try:\n get_scalar_constant_value(kshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"kshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.border_mode = border_mode\n self.filter_flip = filter_flip\n\n if len(subsample) != 2:\n raise ValueError(\"subsample must have two elements\")\n self.subsample = tuple(subsample)\n\n def flops(self, inp, outp):\n \"\"\" Useful with the hack in profilemode to print the MFlops\"\"\"\n # if the output shape is correct, then this gives the correct\n # flops for any direction, sampling, padding, and border mode\n inputs, filters = inp\n outputs, = outp\n assert inputs[1] == filters[1]\n # nb mul and add by output pixel\n flops = filters[2] * filters[3] * 2\n # nb flops by output image\n flops *= outputs[2] * outputs[3]\n # nb patch multiplied\n flops *= inputs[1] * filters[0] * inputs[0]\n return flops\n\n def do_constant_folding(self, node):\n # Disable constant folding since there is no implementation.\n # This may change in the future.\n return False\n\n def conv2d(self, img, kern, mode=\"valid\"):\n \"\"\"\n Basic slow python implementatation for DebugMode\n \"\"\"\n\n if not imported_scipy_signal:\n raise NotImplementedError(\n \"AbstractConv perform requires the python package\"\n \" for scipy.signal to be installed.\")\n if not (mode in ('valid', 'full')):\n raise ValueError(\n 'invalid mode {}, which must be either '\n '\"valid\" or \"full\"'.format(mode))\n\n out_shape = get_conv_output_shape(img.shape, kern.shape, mode, [1, 1])\n out = numpy.zeros(out_shape, dtype=img.dtype)\n val = _valfrommode(mode)\n bval = _bvalfromboundary('fill')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', numpy.ComplexWarning)\n for b in xrange(img.shape[0]):\n for n in xrange(kern.shape[0]):\n for im0 in xrange(img.shape[1]):\n # some cast generates a warning here\n out[b, n, ...] += _convolve2d(img[b, im0, ...],\n kern[n, im0, ...],\n 1, val, bval, 0)\n return out\n\n\nclass AbstractConv2d(BaseAbstractConv2d):\n \"\"\" Abstract Op for the forward convolution.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d, self).__init__(imshp, kshp,\n border_mode, subsample,\n filter_flip)\n\n def make_node(self, img, kern):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n ktype = img.type.clone(dtype=kern.dtype,\n broadcastable=kern.broadcastable)\n kern = ktype.filter_variable(kern)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n\n broadcastable = [img.broadcastable[0],\n kern.broadcastable[0],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, kern], [output])\n\n def perform(self, node, inp, out_):\n img, kern = inp\n img = numpy.asarray(img)\n kern = numpy.asarray(kern)\n o, = out_\n mode = self.border_mode\n\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n mode = (kern.shape[2] // 2, kern.shape[3] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n if not self.filter_flip:\n kern = kern[:, :, ::-1, ::-1]\n conv_out = self.conv2d(img, kern, mode=\"valid\")\n conv_out = conv_out[:, :, ::self.subsample[0], ::self.subsample[1]]\n\n o[0] = node.outputs[0].type.filter(conv_out)\n\n def R_op(self, inputs, eval_points):\n rval = None\n if eval_points[0] is not None:\n rval = self.make_node(eval_points[0], inputs[1]).outputs[0]\n if eval_points[1] is not None:\n if rval is None:\n rval = self.make_node(inputs[0], eval_points[1]).outputs[0]\n else:\n rval += self.make_node(inputs[0], eval_points[1]).outputs[0]\n return [rval]\n\n def grad(self, inp, grads):\n bottom, weights = inp\n top, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights, top, bottom.shape[-2:])\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n\n bottom, top, weights.shape[-2:])\n\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n return d_bottom, d_weights\n\n def infer_shape(self, node, input_shapes):\n imshp = input_shapes[0]\n kshp = input_shapes[1]\n\n # replace symbolic shapes with known constant shapes\n if self.imshp is not None:\n imshp = [imshp[i] if self.imshp[i] is None else self.imshp[i]\n for i in range(4)]\n if self.kshp is not None:\n kshp = [kshp[i] if self.kshp[i] is None else self.kshp[i]\n for i in range(4)]\n res = get_conv_output_shape(imshp, kshp, self.border_mode,\n self.subsample)\n return [res]\n\n\nclass AbstractConv2d_gradWeights(BaseAbstractConv2d):\n \"\"\"Gradient wrt. filters for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, img, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = img.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.broadcastable[1],\n img.broadcastable[1],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n img, topgrad, shape = inp\n img = numpy.asarray(img)\n topgrad = numpy.asarray(topgrad)\n\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (shape[0] - 1, shape[1] - 1)\n elif mode == \"half\":\n mode = (shape[0] // 2, shape[1] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n img.shape[2] - shape[0] + 1,\n img.shape[3] - shape[1] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n\n topgrad = topgrad.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n img = img.transpose(1, 0, 2, 3)\n kern = self.conv2d(img, topgrad, mode=\"valid\")\n if self.filter_flip:\n kern = kern.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n else:\n kern = kern.transpose(1, 0, 2, 3)\n o[0] = node.outputs[0].type.filter(kern)\n\n def grad(self, inp, grads):\n bottom, top = inp[:2]\n weights, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights,\n top,\n bottom.shape[-2:])\n d_top = AbstractConv2d(self.imshp,\n self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_bottom, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.kshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the kernel shape\n # from the shapes of inputs.\n imshp = input_shapes[0]\n topshp = input_shapes[1]\n kshp = self.kshp[:] if self.kshp is not None else [None] * 4\n fallback_kshp = [topshp[1], imshp[1], node.inputs[2][0], node.inputs[2][1]]\n kshp = [fallback_kshp[i] if kshp[i] is None else kshp[i]\n for i in range(4)]\n return [kshp]\n\n\nclass AbstractConv2d_gradInputs(BaseAbstractConv2d):\n \"\"\"Gradient wrt. inputs for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, kern, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = kern.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.type.broadcastable[0],\n kern.type.broadcastable[1],\n False, False]\n output = kern.type.clone(broadcastable=broadcastable)()\n return Apply(self, [kern, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n kern, topgrad, shape = inp\n kern = numpy.asarray(kern)\n topgrad = numpy.asarray(topgrad)\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n pad_h, pad_w = 0, 0\n if mode == \"full\":\n pad_h, pad_w = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n pad_h, pad_w = (kern.shape[2] // 2, kern.shape[3] // 2)\n elif isinstance(mode, tuple):\n pad_h, pad_w = map(int, self.border_mode)\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n shape[0] + 2 * pad_h - kern.shape[2] + 1,\n shape[1] + 2 * pad_w - kern.shape[3] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n kern = kern.transpose(1, 0, 2, 3)\n if self.filter_flip:\n topgrad = topgrad[:, :, ::-1, ::-1]\n img = self.conv2d(topgrad, kern, mode=\"full\")\n if self.filter_flip:\n img = img[:, :, ::-1, ::-1]\n if pad_h > 0 or pad_w > 0:\n img = img[:, :, pad_h:img.shape[2] - pad_h, pad_w:img.shape[3] - pad_w]\n o[0] = node.outputs[0].type.filter(img)\n\n def grad(self, inp, grads):\n weights, top = inp[:2]\n bottom, = grads\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample)(\n bottom, top,\n weights.shape[-2:])\n d_top = AbstractConv2d(self.imshp, self.kshp,\n self.border_mode, self.subsample)(\n bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_weights, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.imshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the image shape\n # from the shapes of inputs.\n kshp = input_shapes[0]\n topshp = input_shapes[1]\n imshp = self.imshp[:] if self.imshp is not None else [None] * 4\n fallback_imshp = [topshp[0], kshp[1], node.inputs[2][0],\n node.inputs[2][1]]\n imshp = [fallback_imshp[i] if imshp[i] is None else imshp[i]\n for i in range(4)]\n return [imshp]\n", "path": "theano/tensor/nnet/abstract_conv.py"}]} |
gh_patches_debug_1418 | rasdani/github-patches | git_diff | openshift__openshift-ansible-3914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Long DN string with spaces can cause incorrect YAML to be generated in master-config
#### Description
I have a configuration file where my settings for an LDAP identity provider has a long string containing spaces, when this is converted to YAML, line feeds are inserted on some of the longer DN/LDAP filter strings, this results in the master API service failing to start due to invalid config.
Modifying the following `yaml.dump()` to include `width=1000` for example results in a working master-config.yaml.
https://github.com/openshift/openshift-ansible/blob/7496b1235f72bd4241e4917f50df722174bf90fa/roles/openshift_master_facts/filter_plugins/openshift_master.py#L496-L499
##### Version
```
ansible 2.2.1.0
atomic-openshift-utils-3.4.67-1.git.0.14a0b4d.el7.noarch
openshift-ansible-3.4.67-1.git.0.14a0b4d.el7.noarch
```
##### Steps To Reproduce
Create a long LDAP identity provider in openshift_master_identity_providers that has spaces in the DN. eg. `'url':'ldap://url.to.some.ldap.server/DC=blah,DC=foo,DC=bar?uid??(memberof=CN=UserGroupForPaaS,OU=Groups,OU=Unit With Spaces For Some Reason,OU=Also With - In Case,DC=blah,DC=foo,DC=bar)'`
##### Expected Results
master-config.yaml with a DN that is readable by the OpenShift master service.
##### Observed Results
Master service fails to start.
Long DN string with spaces can cause incorrect YAML to be generated in master-config
#### Description
I have a configuration file where my settings for an LDAP identity provider has a long string containing spaces, when this is converted to YAML, line feeds are inserted on some of the longer DN/LDAP filter strings, this results in the master API service failing to start due to invalid config.
Modifying the following `yaml.dump()` to include `width=1000` for example results in a working master-config.yaml.
https://github.com/openshift/openshift-ansible/blob/7496b1235f72bd4241e4917f50df722174bf90fa/roles/openshift_master_facts/filter_plugins/openshift_master.py#L496-L499
##### Version
```
ansible 2.2.1.0
atomic-openshift-utils-3.4.67-1.git.0.14a0b4d.el7.noarch
openshift-ansible-3.4.67-1.git.0.14a0b4d.el7.noarch
```
##### Steps To Reproduce
Create a long LDAP identity provider in openshift_master_identity_providers that has spaces in the DN. eg. `'url':'ldap://url.to.some.ldap.server/DC=blah,DC=foo,DC=bar?uid??(memberof=CN=UserGroupForPaaS,OU=Groups,OU=Unit With Spaces For Some Reason,OU=Also With - In Case,DC=blah,DC=foo,DC=bar)'`
##### Expected Results
master-config.yaml with a DN that is readable by the OpenShift master service.
##### Observed Results
Master service fails to start.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/openshift_master_facts/filter_plugins/openshift_master.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 # vim: expandtab:tabstop=4:shiftwidth=4
4 '''
5 Custom filters for use in openshift-master
6 '''
7 import copy
8 import sys
9
10 # pylint import-error disabled because pylint cannot find the package
11 # when installed in a virtualenv
12 from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
13
14 from ansible import errors
15 from ansible.parsing.yaml.dumper import AnsibleDumper
16 from ansible.plugins.filter.core import to_bool as ansible_bool
17
18 # ansible.compat.six goes away with Ansible 2.4
19 try:
20 from ansible.compat.six import string_types, u
21 except ImportError:
22 from ansible.module_utils.six import string_types, u
23
24 import yaml
25
26
27 class IdentityProviderBase(object):
28 """ IdentityProviderBase
29
30 Attributes:
31 name (str): Identity provider Name
32 login (bool): Is this identity provider a login provider?
33 challenge (bool): Is this identity provider a challenge provider?
34 provider (dict): Provider specific config
35 _idp (dict): internal copy of the IDP dict passed in
36 _required (list): List of lists of strings for required attributes
37 _optional (list): List of lists of strings for optional attributes
38 _allow_additional (bool): Does this provider support attributes
39 not in _required and _optional
40
41 Args:
42 api_version(str): OpenShift config version
43 idp (dict): idp config dict
44
45 Raises:
46 AnsibleFilterError:
47 """
48 # disabling this check since the number of instance attributes are
49 # necessary for this class
50 # pylint: disable=too-many-instance-attributes
51 def __init__(self, api_version, idp):
52 if api_version not in ['v1']:
53 raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version))
54
55 self._idp = copy.deepcopy(idp)
56
57 if 'name' not in self._idp:
58 raise errors.AnsibleFilterError("|failed identity provider missing a name")
59
60 if 'kind' not in self._idp:
61 raise errors.AnsibleFilterError("|failed identity provider missing a kind")
62
63 self.name = self._idp.pop('name')
64 self.login = ansible_bool(self._idp.pop('login', False))
65 self.challenge = ansible_bool(self._idp.pop('challenge', False))
66 self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))
67
68 mm_keys = ('mappingMethod', 'mapping_method')
69 mapping_method = None
70 for key in mm_keys:
71 if key in self._idp:
72 mapping_method = self._idp.pop(key)
73 if mapping_method is None:
74 mapping_method = self.get_default('mappingMethod')
75 self.mapping_method = mapping_method
76
77 valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
78 if self.mapping_method not in valid_mapping_methods:
79 raise errors.AnsibleFilterError("|failed unknown mapping method "
80 "for provider {0}".format(self.__class__.__name__))
81 self._required = []
82 self._optional = []
83 self._allow_additional = True
84
85 @staticmethod
86 def validate_idp_list(idp_list, openshift_version, deployment_type):
87 ''' validates a list of idps '''
88 login_providers = [x.name for x in idp_list if x.login]
89
90 multiple_logins_unsupported = False
91 if len(login_providers) > 1:
92 if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:
93 if LooseVersion(openshift_version) < LooseVersion('3.2'):
94 multiple_logins_unsupported = True
95 if deployment_type in ['origin']:
96 if LooseVersion(openshift_version) < LooseVersion('1.2'):
97 multiple_logins_unsupported = True
98 if multiple_logins_unsupported:
99 raise errors.AnsibleFilterError("|failed multiple providers are "
100 "not allowed for login. login "
101 "providers: {0}".format(', '.join(login_providers)))
102
103 names = [x.name for x in idp_list]
104 if len(set(names)) != len(names):
105 raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
106
107 for idp in idp_list:
108 idp.validate()
109
110 def validate(self):
111 ''' validate an instance of this idp class '''
112 pass
113
114 @staticmethod
115 def get_default(key):
116 ''' get a default value for a given key '''
117 if key == 'mappingMethod':
118 return 'claim'
119 else:
120 return None
121
122 def set_provider_item(self, items, required=False):
123 ''' set a provider item based on the list of item names provided. '''
124 for item in items:
125 provider_key = items[0]
126 if item in self._idp:
127 self.provider[provider_key] = self._idp.pop(item)
128 break
129 else:
130 default = self.get_default(provider_key)
131 if default is not None:
132 self.provider[provider_key] = default
133 elif required:
134 raise errors.AnsibleFilterError("|failed provider {0} missing "
135 "required key {1}".format(self.__class__.__name__, provider_key))
136
137 def set_provider_items(self):
138 ''' set the provider items for this idp '''
139 for items in self._required:
140 self.set_provider_item(items, True)
141 for items in self._optional:
142 self.set_provider_item(items)
143 if self._allow_additional:
144 for key in self._idp.keys():
145 self.set_provider_item([key])
146 else:
147 if len(self._idp) > 0:
148 raise errors.AnsibleFilterError("|failed provider {0} "
149 "contains unknown keys "
150 "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys())))
151
152 def to_dict(self):
153 ''' translate this idp to a dictionary '''
154 return dict(name=self.name, challenge=self.challenge,
155 login=self.login, mappingMethod=self.mapping_method,
156 provider=self.provider)
157
158
159 class LDAPPasswordIdentityProvider(IdentityProviderBase):
160 """ LDAPPasswordIdentityProvider
161
162 Attributes:
163
164 Args:
165 api_version(str): OpenShift config version
166 idp (dict): idp config dict
167
168 Raises:
169 AnsibleFilterError:
170 """
171 def __init__(self, api_version, idp):
172 super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)
173 self._allow_additional = False
174 self._required += [['attributes'], ['url'], ['insecure']]
175 self._optional += [['ca'],
176 ['bindDN', 'bind_dn'],
177 ['bindPassword', 'bind_password']]
178
179 self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))
180
181 if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:
182 pref_user = self._idp['attributes'].pop('preferred_username')
183 self._idp['attributes']['preferredUsername'] = pref_user
184
185 def validate(self):
186 ''' validate this idp instance '''
187 if not isinstance(self.provider['attributes'], dict):
188 raise errors.AnsibleFilterError("|failed attributes for provider "
189 "{0} must be a dictionary".format(self.__class__.__name__))
190
191 attrs = ['id', 'email', 'name', 'preferredUsername']
192 for attr in attrs:
193 if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):
194 raise errors.AnsibleFilterError("|failed {0} attribute for "
195 "provider {1} must be a list".format(attr, self.__class__.__name__))
196
197 unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)
198 if len(unknown_attrs) > 0:
199 raise errors.AnsibleFilterError("|failed provider {0} has unknown "
200 "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs)))
201
202
203 class KeystonePasswordIdentityProvider(IdentityProviderBase):
204 """ KeystoneIdentityProvider
205
206 Attributes:
207
208 Args:
209 api_version(str): OpenShift config version
210 idp (dict): idp config dict
211
212 Raises:
213 AnsibleFilterError:
214 """
215 def __init__(self, api_version, idp):
216 super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)
217 self._allow_additional = False
218 self._required += [['url'], ['domainName', 'domain_name']]
219 self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
220
221
222 class RequestHeaderIdentityProvider(IdentityProviderBase):
223 """ RequestHeaderIdentityProvider
224
225 Attributes:
226
227 Args:
228 api_version(str): OpenShift config version
229 idp (dict): idp config dict
230
231 Raises:
232 AnsibleFilterError:
233 """
234 def __init__(self, api_version, idp):
235 super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)
236 self._allow_additional = False
237 self._required += [['headers']]
238 self._optional += [['challengeURL', 'challenge_url'],
239 ['loginURL', 'login_url'],
240 ['clientCA', 'client_ca'],
241 ['clientCommonNames', 'client_common_names'],
242 ['emailHeaders', 'email_headers'],
243 ['nameHeaders', 'name_headers'],
244 ['preferredUsernameHeaders', 'preferred_username_headers']]
245
246 def validate(self):
247 ''' validate this idp instance '''
248 if not isinstance(self.provider['headers'], list):
249 raise errors.AnsibleFilterError("|failed headers for provider {0} "
250 "must be a list".format(self.__class__.__name__))
251
252
253 class AllowAllPasswordIdentityProvider(IdentityProviderBase):
254 """ AllowAllPasswordIdentityProvider
255
256 Attributes:
257
258 Args:
259 api_version(str): OpenShift config version
260 idp (dict): idp config dict
261
262 Raises:
263 AnsibleFilterError:
264 """
265 def __init__(self, api_version, idp):
266 super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)
267 self._allow_additional = False
268
269
270 class DenyAllPasswordIdentityProvider(IdentityProviderBase):
271 """ DenyAllPasswordIdentityProvider
272
273 Attributes:
274
275 Args:
276 api_version(str): OpenShift config version
277 idp (dict): idp config dict
278
279 Raises:
280 AnsibleFilterError:
281 """
282 def __init__(self, api_version, idp):
283 super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)
284 self._allow_additional = False
285
286
287 class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
288 """ HTPasswdPasswordIdentity
289
290 Attributes:
291
292 Args:
293 api_version(str): OpenShift config version
294 idp (dict): idp config dict
295
296 Raises:
297 AnsibleFilterError:
298 """
299 def __init__(self, api_version, idp):
300 super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)
301 self._allow_additional = False
302 self._required += [['file', 'filename', 'fileName', 'file_name']]
303
304 @staticmethod
305 def get_default(key):
306 if key == 'file':
307 return '/etc/origin/htpasswd'
308 else:
309 return IdentityProviderBase.get_default(key)
310
311
312 class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
313 """ BasicAuthPasswordIdentityProvider
314
315 Attributes:
316
317 Args:
318 api_version(str): OpenShift config version
319 idp (dict): idp config dict
320
321 Raises:
322 AnsibleFilterError:
323 """
324 def __init__(self, api_version, idp):
325 super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)
326 self._allow_additional = False
327 self._required += [['url']]
328 self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
329
330
331 class IdentityProviderOauthBase(IdentityProviderBase):
332 """ IdentityProviderOauthBase
333
334 Attributes:
335
336 Args:
337 api_version(str): OpenShift config version
338 idp (dict): idp config dict
339
340 Raises:
341 AnsibleFilterError:
342 """
343 def __init__(self, api_version, idp):
344 super(IdentityProviderOauthBase, self).__init__(api_version, idp)
345 self._allow_additional = False
346 self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
347
348 def validate(self):
349 ''' validate this idp instance '''
350 if self.challenge:
351 raise errors.AnsibleFilterError("|failed provider {0} does not "
352 "allow challenge authentication".format(self.__class__.__name__))
353
354
355 class OpenIDIdentityProvider(IdentityProviderOauthBase):
356 """ OpenIDIdentityProvider
357
358 Attributes:
359
360 Args:
361 api_version(str): OpenShift config version
362 idp (dict): idp config dict
363
364 Raises:
365 AnsibleFilterError:
366 """
367 def __init__(self, api_version, idp):
368 IdentityProviderOauthBase.__init__(self, api_version, idp)
369 self._required += [['claims'], ['urls']]
370 self._optional += [['ca'],
371 ['extraScopes'],
372 ['extraAuthorizeParameters']]
373 if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:
374 pref_user = self._idp['claims'].pop('preferred_username')
375 self._idp['claims']['preferredUsername'] = pref_user
376 if 'urls' in self._idp and 'user_info' in self._idp['urls']:
377 user_info = self._idp['urls'].pop('user_info')
378 self._idp['urls']['userInfo'] = user_info
379 if 'extra_scopes' in self._idp:
380 self._idp['extraScopes'] = self._idp.pop('extra_scopes')
381 if 'extra_authorize_parameters' in self._idp:
382 self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
383
384 if 'extraAuthorizeParameters' in self._idp:
385 if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
386 val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
387 self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
388
389 def validate(self):
390 ''' validate this idp instance '''
391 IdentityProviderOauthBase.validate(self)
392 if not isinstance(self.provider['claims'], dict):
393 raise errors.AnsibleFilterError("|failed claims for provider {0} "
394 "must be a dictionary".format(self.__class__.__name__))
395
396 for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):
397 if var in self.provider and not isinstance(self.provider[var], var_type):
398 raise errors.AnsibleFilterError("|failed {1} for provider "
399 "{0} must be a {2}".format(self.__class__.__name__,
400 var,
401 var_type.__class__.__name__))
402
403 required_claims = ['id']
404 optional_claims = ['email', 'name', 'preferredUsername']
405 all_claims = required_claims + optional_claims
406
407 for claim in required_claims:
408 if claim in required_claims and claim not in self.provider['claims']:
409 raise errors.AnsibleFilterError("|failed {0} claim missing "
410 "for provider {1}".format(claim, self.__class__.__name__))
411
412 for claim in all_claims:
413 if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):
414 raise errors.AnsibleFilterError("|failed {0} claims for "
415 "provider {1} must be a list".format(claim, self.__class__.__name__))
416
417 unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)
418 if len(unknown_claims) > 0:
419 raise errors.AnsibleFilterError("|failed provider {0} has unknown "
420 "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims)))
421
422 if not isinstance(self.provider['urls'], dict):
423 raise errors.AnsibleFilterError("|failed urls for provider {0} "
424 "must be a dictionary".format(self.__class__.__name__))
425
426 required_urls = ['authorize', 'token']
427 optional_urls = ['userInfo']
428 all_urls = required_urls + optional_urls
429
430 for url in required_urls:
431 if url not in self.provider['urls']:
432 raise errors.AnsibleFilterError("|failed {0} url missing for "
433 "provider {1}".format(url, self.__class__.__name__))
434
435 unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)
436 if len(unknown_urls) > 0:
437 raise errors.AnsibleFilterError("|failed provider {0} has unknown "
438 "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls)))
439
440
441 class GoogleIdentityProvider(IdentityProviderOauthBase):
442 """ GoogleIdentityProvider
443
444 Attributes:
445
446 Args:
447 api_version(str): OpenShift config version
448 idp (dict): idp config dict
449
450 Raises:
451 AnsibleFilterError:
452 """
453 def __init__(self, api_version, idp):
454 IdentityProviderOauthBase.__init__(self, api_version, idp)
455 self._optional += [['hostedDomain', 'hosted_domain']]
456
457
458 class GitHubIdentityProvider(IdentityProviderOauthBase):
459 """ GitHubIdentityProvider
460
461 Attributes:
462
463 Args:
464 api_version(str): OpenShift config version
465 idp (dict): idp config dict
466
467 Raises:
468 AnsibleFilterError:
469 """
470 def __init__(self, api_version, idp):
471 IdentityProviderOauthBase.__init__(self, api_version, idp)
472 self._optional += [['organizations']]
473
474
475 class FilterModule(object):
476 ''' Custom ansible filters for use by the openshift_master role'''
477
478 @staticmethod
479 def translate_idps(idps, api_version, openshift_version, deployment_type):
480 ''' Translates a list of dictionaries into a valid identityProviders config '''
481 idp_list = []
482
483 if not isinstance(idps, list):
484 raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers")
485 for idp in idps:
486 if not isinstance(idp, dict):
487 raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries")
488
489 cur_module = sys.modules[__name__]
490 idp_class = getattr(cur_module, idp['kind'], None)
491 idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)
492 idp_inst.set_provider_items()
493 idp_list.append(idp_inst)
494
495 IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
496 return u(yaml.dump([idp.to_dict() for idp in idp_list],
497 allow_unicode=True,
498 default_flow_style=False,
499 Dumper=AnsibleDumper))
500
501 @staticmethod
502 def validate_pcs_cluster(data, masters=None):
503 ''' Validates output from "pcs status", ensuring that each master
504 provided is online.
505 Ex: data = ('...',
506 'PCSD Status:',
507 'master1.example.com: Online',
508 'master2.example.com: Online',
509 'master3.example.com: Online',
510 '...')
511 masters = ['master1.example.com',
512 'master2.example.com',
513 'master3.example.com']
514 returns True
515 '''
516 if not issubclass(type(data), string_types):
517 raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
518 if not issubclass(type(masters), list):
519 raise errors.AnsibleFilterError("|failed expects masters is a list")
520 valid = True
521 for master in masters:
522 if "{0}: Online".format(master) not in data:
523 valid = False
524 return valid
525
526 @staticmethod
527 def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):
528 ''' Return certificates to synchronize based on facts. '''
529 if not issubclass(type(hostvars), dict):
530 raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
531 certs = ['admin.crt',
532 'admin.key',
533 'admin.kubeconfig',
534 'master.kubelet-client.crt',
535 'master.kubelet-client.key']
536 if bool(include_ca):
537 certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']
538 if bool(include_keys):
539 certs += ['serviceaccounts.private.key',
540 'serviceaccounts.public.key']
541 if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
542 certs += ['master.proxy-client.crt',
543 'master.proxy-client.key']
544 if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):
545 certs += ['openshift-master.crt',
546 'openshift-master.key',
547 'openshift-master.kubeconfig']
548 if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):
549 certs += ['service-signer.crt',
550 'service-signer.key']
551 if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):
552 certs += ['openshift-registry.crt',
553 'openshift-registry.key',
554 'openshift-registry.kubeconfig',
555 'openshift-router.crt',
556 'openshift-router.key',
557 'openshift-router.kubeconfig']
558 return certs
559
560 @staticmethod
561 def oo_htpasswd_users_from_file(file_contents):
562 ''' return a dictionary of htpasswd users from htpasswd file contents '''
563 htpasswd_entries = {}
564 if not isinstance(file_contents, string_types):
565 raise errors.AnsibleFilterError("failed, expects to filter on a string")
566 for line in file_contents.splitlines():
567 user = None
568 passwd = None
569 if len(line) == 0:
570 continue
571 if ':' in line:
572 user, passwd = line.split(':', 1)
573
574 if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:
575 error_msg = "failed, expects each line to be a colon separated string representing the user and passwd"
576 raise errors.AnsibleFilterError(error_msg)
577 htpasswd_entries[user] = passwd
578 return htpasswd_entries
579
580 def filters(self):
581 ''' returns a mapping of filters to methods '''
582 return {"translate_idps": self.translate_idps,
583 "validate_pcs_cluster": self.validate_pcs_cluster,
584 "certificates_to_synchronize": self.certificates_to_synchronize,
585 "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
586
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -496,6 +496,7 @@
return u(yaml.dump([idp.to_dict() for idp in idp_list],
allow_unicode=True,
default_flow_style=False,
+ width=float("inf"),
Dumper=AnsibleDumper))
@staticmethod
| {"golden_diff": "diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py\n--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py\n+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py\n@@ -496,6 +496,7 @@\n return u(yaml.dump([idp.to_dict() for idp in idp_list],\n allow_unicode=True,\n default_flow_style=False,\n+ width=float(\"inf\"),\n Dumper=AnsibleDumper))\n \n @staticmethod\n", "issue": "Long DN string with spaces can cause incorrect YAML to be generated in master-config\n#### Description\r\nI have a configuration file where my settings for an LDAP identity provider has a long string containing spaces, when this is converted to YAML, line feeds are inserted on some of the longer DN/LDAP filter strings, this results in the master API service failing to start due to invalid config.\r\n\r\nModifying the following `yaml.dump()` to include `width=1000` for example results in a working master-config.yaml.\r\n\r\nhttps://github.com/openshift/openshift-ansible/blob/7496b1235f72bd4241e4917f50df722174bf90fa/roles/openshift_master_facts/filter_plugins/openshift_master.py#L496-L499\r\n\r\n\r\n##### Version\r\n```\r\nansible 2.2.1.0\r\natomic-openshift-utils-3.4.67-1.git.0.14a0b4d.el7.noarch\r\nopenshift-ansible-3.4.67-1.git.0.14a0b4d.el7.noarch\r\n```\r\n\r\n##### Steps To Reproduce\r\nCreate a long LDAP identity provider in openshift_master_identity_providers that has spaces in the DN. eg. `'url':'ldap://url.to.some.ldap.server/DC=blah,DC=foo,DC=bar?uid??(memberof=CN=UserGroupForPaaS,OU=Groups,OU=Unit With Spaces For Some Reason,OU=Also With - In Case,DC=blah,DC=foo,DC=bar)'`\r\n\r\n##### Expected Results\r\nmaster-config.yaml with a DN that is readable by the OpenShift master service.\r\n\r\n##### Observed Results\r\nMaster service fails to start.\nLong DN string with spaces can cause incorrect YAML to be generated in master-config\n#### Description\r\nI have a configuration file where my settings for an LDAP identity provider has a long string containing spaces, when this is converted to YAML, line feeds are inserted on some of the longer DN/LDAP filter strings, this results in the master API service failing to start due to invalid config.\r\n\r\nModifying the following `yaml.dump()` to include `width=1000` for example results in a working master-config.yaml.\r\n\r\nhttps://github.com/openshift/openshift-ansible/blob/7496b1235f72bd4241e4917f50df722174bf90fa/roles/openshift_master_facts/filter_plugins/openshift_master.py#L496-L499\r\n\r\n\r\n##### Version\r\n```\r\nansible 2.2.1.0\r\natomic-openshift-utils-3.4.67-1.git.0.14a0b4d.el7.noarch\r\nopenshift-ansible-3.4.67-1.git.0.14a0b4d.el7.noarch\r\n```\r\n\r\n##### Steps To Reproduce\r\nCreate a long LDAP identity provider in openshift_master_identity_providers that has spaces in the DN. eg. `'url':'ldap://url.to.some.ldap.server/DC=blah,DC=foo,DC=bar?uid??(memberof=CN=UserGroupForPaaS,OU=Groups,OU=Unit With Spaces For Some Reason,OU=Also With - In Case,DC=blah,DC=foo,DC=bar)'`\r\n\r\n##### Expected Results\r\nmaster-config.yaml with a DN that is readable by the OpenShift master service.\r\n\r\n##### Observed Results\r\nMaster service fails to start.\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vim: expandtab:tabstop=4:shiftwidth=4\n'''\nCustom filters for use in openshift-master\n'''\nimport copy\nimport sys\n\n# pylint import-error disabled because pylint cannot find the package\n# when installed in a virtualenv\nfrom distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error\n\nfrom ansible import errors\nfrom ansible.parsing.yaml.dumper import AnsibleDumper\nfrom ansible.plugins.filter.core import to_bool as ansible_bool\n\n# ansible.compat.six goes away with Ansible 2.4\ntry:\n from ansible.compat.six import string_types, u\nexcept ImportError:\n from ansible.module_utils.six import string_types, u\n\nimport yaml\n\n\nclass IdentityProviderBase(object):\n \"\"\" IdentityProviderBase\n\n Attributes:\n name (str): Identity provider Name\n login (bool): Is this identity provider a login provider?\n challenge (bool): Is this identity provider a challenge provider?\n provider (dict): Provider specific config\n _idp (dict): internal copy of the IDP dict passed in\n _required (list): List of lists of strings for required attributes\n _optional (list): List of lists of strings for optional attributes\n _allow_additional (bool): Does this provider support attributes\n not in _required and _optional\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n # disabling this check since the number of instance attributes are\n # necessary for this class\n # pylint: disable=too-many-instance-attributes\n def __init__(self, api_version, idp):\n if api_version not in ['v1']:\n raise errors.AnsibleFilterError(\"|failed api version {0} unknown\".format(api_version))\n\n self._idp = copy.deepcopy(idp)\n\n if 'name' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a name\")\n\n if 'kind' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a kind\")\n\n self.name = self._idp.pop('name')\n self.login = ansible_bool(self._idp.pop('login', False))\n self.challenge = ansible_bool(self._idp.pop('challenge', False))\n self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))\n\n mm_keys = ('mappingMethod', 'mapping_method')\n mapping_method = None\n for key in mm_keys:\n if key in self._idp:\n mapping_method = self._idp.pop(key)\n if mapping_method is None:\n mapping_method = self.get_default('mappingMethod')\n self.mapping_method = mapping_method\n\n valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']\n if self.mapping_method not in valid_mapping_methods:\n raise errors.AnsibleFilterError(\"|failed unknown mapping method \"\n \"for provider {0}\".format(self.__class__.__name__))\n self._required = []\n self._optional = []\n self._allow_additional = True\n\n @staticmethod\n def validate_idp_list(idp_list, openshift_version, deployment_type):\n ''' validates a list of idps '''\n login_providers = [x.name for x in idp_list if x.login]\n\n multiple_logins_unsupported = False\n if len(login_providers) > 1:\n if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:\n if LooseVersion(openshift_version) < LooseVersion('3.2'):\n multiple_logins_unsupported = True\n if deployment_type in ['origin']:\n if LooseVersion(openshift_version) < LooseVersion('1.2'):\n multiple_logins_unsupported = True\n if multiple_logins_unsupported:\n raise errors.AnsibleFilterError(\"|failed multiple providers are \"\n \"not allowed for login. login \"\n \"providers: {0}\".format(', '.join(login_providers)))\n\n names = [x.name for x in idp_list]\n if len(set(names)) != len(names):\n raise errors.AnsibleFilterError(\"|failed more than one provider configured with the same name\")\n\n for idp in idp_list:\n idp.validate()\n\n def validate(self):\n ''' validate an instance of this idp class '''\n pass\n\n @staticmethod\n def get_default(key):\n ''' get a default value for a given key '''\n if key == 'mappingMethod':\n return 'claim'\n else:\n return None\n\n def set_provider_item(self, items, required=False):\n ''' set a provider item based on the list of item names provided. '''\n for item in items:\n provider_key = items[0]\n if item in self._idp:\n self.provider[provider_key] = self._idp.pop(item)\n break\n else:\n default = self.get_default(provider_key)\n if default is not None:\n self.provider[provider_key] = default\n elif required:\n raise errors.AnsibleFilterError(\"|failed provider {0} missing \"\n \"required key {1}\".format(self.__class__.__name__, provider_key))\n\n def set_provider_items(self):\n ''' set the provider items for this idp '''\n for items in self._required:\n self.set_provider_item(items, True)\n for items in self._optional:\n self.set_provider_item(items)\n if self._allow_additional:\n for key in self._idp.keys():\n self.set_provider_item([key])\n else:\n if len(self._idp) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} \"\n \"contains unknown keys \"\n \"{1}\".format(self.__class__.__name__, ', '.join(self._idp.keys())))\n\n def to_dict(self):\n ''' translate this idp to a dictionary '''\n return dict(name=self.name, challenge=self.challenge,\n login=self.login, mappingMethod=self.mapping_method,\n provider=self.provider)\n\n\nclass LDAPPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" LDAPPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['attributes'], ['url'], ['insecure']]\n self._optional += [['ca'],\n ['bindDN', 'bind_dn'],\n ['bindPassword', 'bind_password']]\n\n self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))\n\n if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:\n pref_user = self._idp['attributes'].pop('preferred_username')\n self._idp['attributes']['preferredUsername'] = pref_user\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['attributes'], dict):\n raise errors.AnsibleFilterError(\"|failed attributes for provider \"\n \"{0} must be a dictionary\".format(self.__class__.__name__))\n\n attrs = ['id', 'email', 'name', 'preferredUsername']\n for attr in attrs:\n if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):\n raise errors.AnsibleFilterError(\"|failed {0} attribute for \"\n \"provider {1} must be a list\".format(attr, self.__class__.__name__))\n\n unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)\n if len(unknown_attrs) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"attributes: {1}\".format(self.__class__.__name__, ', '.join(unknown_attrs)))\n\n\nclass KeystonePasswordIdentityProvider(IdentityProviderBase):\n \"\"\" KeystoneIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url'], ['domainName', 'domain_name']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass RequestHeaderIdentityProvider(IdentityProviderBase):\n \"\"\" RequestHeaderIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['headers']]\n self._optional += [['challengeURL', 'challenge_url'],\n ['loginURL', 'login_url'],\n ['clientCA', 'client_ca'],\n ['clientCommonNames', 'client_common_names'],\n ['emailHeaders', 'email_headers'],\n ['nameHeaders', 'name_headers'],\n ['preferredUsernameHeaders', 'preferred_username_headers']]\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['headers'], list):\n raise errors.AnsibleFilterError(\"|failed headers for provider {0} \"\n \"must be a list\".format(self.__class__.__name__))\n\n\nclass AllowAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" AllowAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass DenyAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" DenyAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass HTPasswdPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" HTPasswdPasswordIdentity\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['file', 'filename', 'fileName', 'file_name']]\n\n @staticmethod\n def get_default(key):\n if key == 'file':\n return '/etc/origin/htpasswd'\n else:\n return IdentityProviderBase.get_default(key)\n\n\nclass BasicAuthPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" BasicAuthPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass IdentityProviderOauthBase(IdentityProviderBase):\n \"\"\" IdentityProviderOauthBase\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(IdentityProviderOauthBase, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]\n\n def validate(self):\n ''' validate this idp instance '''\n if self.challenge:\n raise errors.AnsibleFilterError(\"|failed provider {0} does not \"\n \"allow challenge authentication\".format(self.__class__.__name__))\n\n\nclass OpenIDIdentityProvider(IdentityProviderOauthBase):\n \"\"\" OpenIDIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._required += [['claims'], ['urls']]\n self._optional += [['ca'],\n ['extraScopes'],\n ['extraAuthorizeParameters']]\n if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:\n pref_user = self._idp['claims'].pop('preferred_username')\n self._idp['claims']['preferredUsername'] = pref_user\n if 'urls' in self._idp and 'user_info' in self._idp['urls']:\n user_info = self._idp['urls'].pop('user_info')\n self._idp['urls']['userInfo'] = user_info\n if 'extra_scopes' in self._idp:\n self._idp['extraScopes'] = self._idp.pop('extra_scopes')\n if 'extra_authorize_parameters' in self._idp:\n self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')\n\n if 'extraAuthorizeParameters' in self._idp:\n if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:\n val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))\n self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val\n\n def validate(self):\n ''' validate this idp instance '''\n IdentityProviderOauthBase.validate(self)\n if not isinstance(self.provider['claims'], dict):\n raise errors.AnsibleFilterError(\"|failed claims for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):\n if var in self.provider and not isinstance(self.provider[var], var_type):\n raise errors.AnsibleFilterError(\"|failed {1} for provider \"\n \"{0} must be a {2}\".format(self.__class__.__name__,\n var,\n var_type.__class__.__name__))\n\n required_claims = ['id']\n optional_claims = ['email', 'name', 'preferredUsername']\n all_claims = required_claims + optional_claims\n\n for claim in required_claims:\n if claim in required_claims and claim not in self.provider['claims']:\n raise errors.AnsibleFilterError(\"|failed {0} claim missing \"\n \"for provider {1}\".format(claim, self.__class__.__name__))\n\n for claim in all_claims:\n if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):\n raise errors.AnsibleFilterError(\"|failed {0} claims for \"\n \"provider {1} must be a list\".format(claim, self.__class__.__name__))\n\n unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)\n if len(unknown_claims) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"claims: {1}\".format(self.__class__.__name__, ', '.join(unknown_claims)))\n\n if not isinstance(self.provider['urls'], dict):\n raise errors.AnsibleFilterError(\"|failed urls for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n required_urls = ['authorize', 'token']\n optional_urls = ['userInfo']\n all_urls = required_urls + optional_urls\n\n for url in required_urls:\n if url not in self.provider['urls']:\n raise errors.AnsibleFilterError(\"|failed {0} url missing for \"\n \"provider {1}\".format(url, self.__class__.__name__))\n\n unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)\n if len(unknown_urls) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"urls: {1}\".format(self.__class__.__name__, ', '.join(unknown_urls)))\n\n\nclass GoogleIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GoogleIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['hostedDomain', 'hosted_domain']]\n\n\nclass GitHubIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GitHubIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['organizations']]\n\n\nclass FilterModule(object):\n ''' Custom ansible filters for use by the openshift_master role'''\n\n @staticmethod\n def translate_idps(idps, api_version, openshift_version, deployment_type):\n ''' Translates a list of dictionaries into a valid identityProviders config '''\n idp_list = []\n\n if not isinstance(idps, list):\n raise errors.AnsibleFilterError(\"|failed expects to filter on a list of identity providers\")\n for idp in idps:\n if not isinstance(idp, dict):\n raise errors.AnsibleFilterError(\"|failed identity providers must be a list of dictionaries\")\n\n cur_module = sys.modules[__name__]\n idp_class = getattr(cur_module, idp['kind'], None)\n idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)\n idp_inst.set_provider_items()\n idp_list.append(idp_inst)\n\n IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)\n return u(yaml.dump([idp.to_dict() for idp in idp_list],\n allow_unicode=True,\n default_flow_style=False,\n Dumper=AnsibleDumper))\n\n @staticmethod\n def validate_pcs_cluster(data, masters=None):\n ''' Validates output from \"pcs status\", ensuring that each master\n provided is online.\n Ex: data = ('...',\n 'PCSD Status:',\n 'master1.example.com: Online',\n 'master2.example.com: Online',\n 'master3.example.com: Online',\n '...')\n masters = ['master1.example.com',\n 'master2.example.com',\n 'master3.example.com']\n returns True\n '''\n if not issubclass(type(data), string_types):\n raise errors.AnsibleFilterError(\"|failed expects data is a string or unicode\")\n if not issubclass(type(masters), list):\n raise errors.AnsibleFilterError(\"|failed expects masters is a list\")\n valid = True\n for master in masters:\n if \"{0}: Online\".format(master) not in data:\n valid = False\n return valid\n\n @staticmethod\n def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):\n ''' Return certificates to synchronize based on facts. '''\n if not issubclass(type(hostvars), dict):\n raise errors.AnsibleFilterError(\"|failed expects hostvars is a dict\")\n certs = ['admin.crt',\n 'admin.key',\n 'admin.kubeconfig',\n 'master.kubelet-client.crt',\n 'master.kubelet-client.key']\n if bool(include_ca):\n certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']\n if bool(include_keys):\n certs += ['serviceaccounts.private.key',\n 'serviceaccounts.public.key']\n if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):\n certs += ['master.proxy-client.crt',\n 'master.proxy-client.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):\n certs += ['openshift-master.crt',\n 'openshift-master.key',\n 'openshift-master.kubeconfig']\n if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):\n certs += ['service-signer.crt',\n 'service-signer.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):\n certs += ['openshift-registry.crt',\n 'openshift-registry.key',\n 'openshift-registry.kubeconfig',\n 'openshift-router.crt',\n 'openshift-router.key',\n 'openshift-router.kubeconfig']\n return certs\n\n @staticmethod\n def oo_htpasswd_users_from_file(file_contents):\n ''' return a dictionary of htpasswd users from htpasswd file contents '''\n htpasswd_entries = {}\n if not isinstance(file_contents, string_types):\n raise errors.AnsibleFilterError(\"failed, expects to filter on a string\")\n for line in file_contents.splitlines():\n user = None\n passwd = None\n if len(line) == 0:\n continue\n if ':' in line:\n user, passwd = line.split(':', 1)\n\n if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:\n error_msg = \"failed, expects each line to be a colon separated string representing the user and passwd\"\n raise errors.AnsibleFilterError(error_msg)\n htpasswd_entries[user] = passwd\n return htpasswd_entries\n\n def filters(self):\n ''' returns a mapping of filters to methods '''\n return {\"translate_idps\": self.translate_idps,\n \"validate_pcs_cluster\": self.validate_pcs_cluster,\n \"certificates_to_synchronize\": self.certificates_to_synchronize,\n \"oo_htpasswd_users_from_file\": self.oo_htpasswd_users_from_file}\n", "path": "roles/openshift_master_facts/filter_plugins/openshift_master.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vim: expandtab:tabstop=4:shiftwidth=4\n'''\nCustom filters for use in openshift-master\n'''\nimport copy\nimport sys\n\n# pylint import-error disabled because pylint cannot find the package\n# when installed in a virtualenv\nfrom distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error\n\nfrom ansible import errors\nfrom ansible.parsing.yaml.dumper import AnsibleDumper\nfrom ansible.plugins.filter.core import to_bool as ansible_bool\n\n# ansible.compat.six goes away with Ansible 2.4\ntry:\n from ansible.compat.six import string_types, u\nexcept ImportError:\n from ansible.module_utils.six import string_types, u\n\nimport yaml\n\n\nclass IdentityProviderBase(object):\n \"\"\" IdentityProviderBase\n\n Attributes:\n name (str): Identity provider Name\n login (bool): Is this identity provider a login provider?\n challenge (bool): Is this identity provider a challenge provider?\n provider (dict): Provider specific config\n _idp (dict): internal copy of the IDP dict passed in\n _required (list): List of lists of strings for required attributes\n _optional (list): List of lists of strings for optional attributes\n _allow_additional (bool): Does this provider support attributes\n not in _required and _optional\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n # disabling this check since the number of instance attributes are\n # necessary for this class\n # pylint: disable=too-many-instance-attributes\n def __init__(self, api_version, idp):\n if api_version not in ['v1']:\n raise errors.AnsibleFilterError(\"|failed api version {0} unknown\".format(api_version))\n\n self._idp = copy.deepcopy(idp)\n\n if 'name' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a name\")\n\n if 'kind' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a kind\")\n\n self.name = self._idp.pop('name')\n self.login = ansible_bool(self._idp.pop('login', False))\n self.challenge = ansible_bool(self._idp.pop('challenge', False))\n self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))\n\n mm_keys = ('mappingMethod', 'mapping_method')\n mapping_method = None\n for key in mm_keys:\n if key in self._idp:\n mapping_method = self._idp.pop(key)\n if mapping_method is None:\n mapping_method = self.get_default('mappingMethod')\n self.mapping_method = mapping_method\n\n valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']\n if self.mapping_method not in valid_mapping_methods:\n raise errors.AnsibleFilterError(\"|failed unknown mapping method \"\n \"for provider {0}\".format(self.__class__.__name__))\n self._required = []\n self._optional = []\n self._allow_additional = True\n\n @staticmethod\n def validate_idp_list(idp_list, openshift_version, deployment_type):\n ''' validates a list of idps '''\n login_providers = [x.name for x in idp_list if x.login]\n\n multiple_logins_unsupported = False\n if len(login_providers) > 1:\n if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:\n if LooseVersion(openshift_version) < LooseVersion('3.2'):\n multiple_logins_unsupported = True\n if deployment_type in ['origin']:\n if LooseVersion(openshift_version) < LooseVersion('1.2'):\n multiple_logins_unsupported = True\n if multiple_logins_unsupported:\n raise errors.AnsibleFilterError(\"|failed multiple providers are \"\n \"not allowed for login. login \"\n \"providers: {0}\".format(', '.join(login_providers)))\n\n names = [x.name for x in idp_list]\n if len(set(names)) != len(names):\n raise errors.AnsibleFilterError(\"|failed more than one provider configured with the same name\")\n\n for idp in idp_list:\n idp.validate()\n\n def validate(self):\n ''' validate an instance of this idp class '''\n pass\n\n @staticmethod\n def get_default(key):\n ''' get a default value for a given key '''\n if key == 'mappingMethod':\n return 'claim'\n else:\n return None\n\n def set_provider_item(self, items, required=False):\n ''' set a provider item based on the list of item names provided. '''\n for item in items:\n provider_key = items[0]\n if item in self._idp:\n self.provider[provider_key] = self._idp.pop(item)\n break\n else:\n default = self.get_default(provider_key)\n if default is not None:\n self.provider[provider_key] = default\n elif required:\n raise errors.AnsibleFilterError(\"|failed provider {0} missing \"\n \"required key {1}\".format(self.__class__.__name__, provider_key))\n\n def set_provider_items(self):\n ''' set the provider items for this idp '''\n for items in self._required:\n self.set_provider_item(items, True)\n for items in self._optional:\n self.set_provider_item(items)\n if self._allow_additional:\n for key in self._idp.keys():\n self.set_provider_item([key])\n else:\n if len(self._idp) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} \"\n \"contains unknown keys \"\n \"{1}\".format(self.__class__.__name__, ', '.join(self._idp.keys())))\n\n def to_dict(self):\n ''' translate this idp to a dictionary '''\n return dict(name=self.name, challenge=self.challenge,\n login=self.login, mappingMethod=self.mapping_method,\n provider=self.provider)\n\n\nclass LDAPPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" LDAPPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['attributes'], ['url'], ['insecure']]\n self._optional += [['ca'],\n ['bindDN', 'bind_dn'],\n ['bindPassword', 'bind_password']]\n\n self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))\n\n if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:\n pref_user = self._idp['attributes'].pop('preferred_username')\n self._idp['attributes']['preferredUsername'] = pref_user\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['attributes'], dict):\n raise errors.AnsibleFilterError(\"|failed attributes for provider \"\n \"{0} must be a dictionary\".format(self.__class__.__name__))\n\n attrs = ['id', 'email', 'name', 'preferredUsername']\n for attr in attrs:\n if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):\n raise errors.AnsibleFilterError(\"|failed {0} attribute for \"\n \"provider {1} must be a list\".format(attr, self.__class__.__name__))\n\n unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)\n if len(unknown_attrs) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"attributes: {1}\".format(self.__class__.__name__, ', '.join(unknown_attrs)))\n\n\nclass KeystonePasswordIdentityProvider(IdentityProviderBase):\n \"\"\" KeystoneIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url'], ['domainName', 'domain_name']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass RequestHeaderIdentityProvider(IdentityProviderBase):\n \"\"\" RequestHeaderIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['headers']]\n self._optional += [['challengeURL', 'challenge_url'],\n ['loginURL', 'login_url'],\n ['clientCA', 'client_ca'],\n ['clientCommonNames', 'client_common_names'],\n ['emailHeaders', 'email_headers'],\n ['nameHeaders', 'name_headers'],\n ['preferredUsernameHeaders', 'preferred_username_headers']]\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['headers'], list):\n raise errors.AnsibleFilterError(\"|failed headers for provider {0} \"\n \"must be a list\".format(self.__class__.__name__))\n\n\nclass AllowAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" AllowAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass DenyAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" DenyAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass HTPasswdPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" HTPasswdPasswordIdentity\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['file', 'filename', 'fileName', 'file_name']]\n\n @staticmethod\n def get_default(key):\n if key == 'file':\n return '/etc/origin/htpasswd'\n else:\n return IdentityProviderBase.get_default(key)\n\n\nclass BasicAuthPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" BasicAuthPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass IdentityProviderOauthBase(IdentityProviderBase):\n \"\"\" IdentityProviderOauthBase\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(IdentityProviderOauthBase, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]\n\n def validate(self):\n ''' validate this idp instance '''\n if self.challenge:\n raise errors.AnsibleFilterError(\"|failed provider {0} does not \"\n \"allow challenge authentication\".format(self.__class__.__name__))\n\n\nclass OpenIDIdentityProvider(IdentityProviderOauthBase):\n \"\"\" OpenIDIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._required += [['claims'], ['urls']]\n self._optional += [['ca'],\n ['extraScopes'],\n ['extraAuthorizeParameters']]\n if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:\n pref_user = self._idp['claims'].pop('preferred_username')\n self._idp['claims']['preferredUsername'] = pref_user\n if 'urls' in self._idp and 'user_info' in self._idp['urls']:\n user_info = self._idp['urls'].pop('user_info')\n self._idp['urls']['userInfo'] = user_info\n if 'extra_scopes' in self._idp:\n self._idp['extraScopes'] = self._idp.pop('extra_scopes')\n if 'extra_authorize_parameters' in self._idp:\n self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')\n\n if 'extraAuthorizeParameters' in self._idp:\n if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:\n val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))\n self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val\n\n def validate(self):\n ''' validate this idp instance '''\n IdentityProviderOauthBase.validate(self)\n if not isinstance(self.provider['claims'], dict):\n raise errors.AnsibleFilterError(\"|failed claims for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):\n if var in self.provider and not isinstance(self.provider[var], var_type):\n raise errors.AnsibleFilterError(\"|failed {1} for provider \"\n \"{0} must be a {2}\".format(self.__class__.__name__,\n var,\n var_type.__class__.__name__))\n\n required_claims = ['id']\n optional_claims = ['email', 'name', 'preferredUsername']\n all_claims = required_claims + optional_claims\n\n for claim in required_claims:\n if claim in required_claims and claim not in self.provider['claims']:\n raise errors.AnsibleFilterError(\"|failed {0} claim missing \"\n \"for provider {1}\".format(claim, self.__class__.__name__))\n\n for claim in all_claims:\n if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):\n raise errors.AnsibleFilterError(\"|failed {0} claims for \"\n \"provider {1} must be a list\".format(claim, self.__class__.__name__))\n\n unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)\n if len(unknown_claims) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"claims: {1}\".format(self.__class__.__name__, ', '.join(unknown_claims)))\n\n if not isinstance(self.provider['urls'], dict):\n raise errors.AnsibleFilterError(\"|failed urls for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n required_urls = ['authorize', 'token']\n optional_urls = ['userInfo']\n all_urls = required_urls + optional_urls\n\n for url in required_urls:\n if url not in self.provider['urls']:\n raise errors.AnsibleFilterError(\"|failed {0} url missing for \"\n \"provider {1}\".format(url, self.__class__.__name__))\n\n unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)\n if len(unknown_urls) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"urls: {1}\".format(self.__class__.__name__, ', '.join(unknown_urls)))\n\n\nclass GoogleIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GoogleIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['hostedDomain', 'hosted_domain']]\n\n\nclass GitHubIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GitHubIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['organizations']]\n\n\nclass FilterModule(object):\n ''' Custom ansible filters for use by the openshift_master role'''\n\n @staticmethod\n def translate_idps(idps, api_version, openshift_version, deployment_type):\n ''' Translates a list of dictionaries into a valid identityProviders config '''\n idp_list = []\n\n if not isinstance(idps, list):\n raise errors.AnsibleFilterError(\"|failed expects to filter on a list of identity providers\")\n for idp in idps:\n if not isinstance(idp, dict):\n raise errors.AnsibleFilterError(\"|failed identity providers must be a list of dictionaries\")\n\n cur_module = sys.modules[__name__]\n idp_class = getattr(cur_module, idp['kind'], None)\n idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)\n idp_inst.set_provider_items()\n idp_list.append(idp_inst)\n\n IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)\n return u(yaml.dump([idp.to_dict() for idp in idp_list],\n allow_unicode=True,\n default_flow_style=False,\n width=float(\"inf\"),\n Dumper=AnsibleDumper))\n\n @staticmethod\n def validate_pcs_cluster(data, masters=None):\n ''' Validates output from \"pcs status\", ensuring that each master\n provided is online.\n Ex: data = ('...',\n 'PCSD Status:',\n 'master1.example.com: Online',\n 'master2.example.com: Online',\n 'master3.example.com: Online',\n '...')\n masters = ['master1.example.com',\n 'master2.example.com',\n 'master3.example.com']\n returns True\n '''\n if not issubclass(type(data), string_types):\n raise errors.AnsibleFilterError(\"|failed expects data is a string or unicode\")\n if not issubclass(type(masters), list):\n raise errors.AnsibleFilterError(\"|failed expects masters is a list\")\n valid = True\n for master in masters:\n if \"{0}: Online\".format(master) not in data:\n valid = False\n return valid\n\n @staticmethod\n def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):\n ''' Return certificates to synchronize based on facts. '''\n if not issubclass(type(hostvars), dict):\n raise errors.AnsibleFilterError(\"|failed expects hostvars is a dict\")\n certs = ['admin.crt',\n 'admin.key',\n 'admin.kubeconfig',\n 'master.kubelet-client.crt',\n 'master.kubelet-client.key']\n if bool(include_ca):\n certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']\n if bool(include_keys):\n certs += ['serviceaccounts.private.key',\n 'serviceaccounts.public.key']\n if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):\n certs += ['master.proxy-client.crt',\n 'master.proxy-client.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):\n certs += ['openshift-master.crt',\n 'openshift-master.key',\n 'openshift-master.kubeconfig']\n if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):\n certs += ['service-signer.crt',\n 'service-signer.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):\n certs += ['openshift-registry.crt',\n 'openshift-registry.key',\n 'openshift-registry.kubeconfig',\n 'openshift-router.crt',\n 'openshift-router.key',\n 'openshift-router.kubeconfig']\n return certs\n\n @staticmethod\n def oo_htpasswd_users_from_file(file_contents):\n ''' return a dictionary of htpasswd users from htpasswd file contents '''\n htpasswd_entries = {}\n if not isinstance(file_contents, string_types):\n raise errors.AnsibleFilterError(\"failed, expects to filter on a string\")\n for line in file_contents.splitlines():\n user = None\n passwd = None\n if len(line) == 0:\n continue\n if ':' in line:\n user, passwd = line.split(':', 1)\n\n if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:\n error_msg = \"failed, expects each line to be a colon separated string representing the user and passwd\"\n raise errors.AnsibleFilterError(error_msg)\n htpasswd_entries[user] = passwd\n return htpasswd_entries\n\n def filters(self):\n ''' returns a mapping of filters to methods '''\n return {\"translate_idps\": self.translate_idps,\n \"validate_pcs_cluster\": self.validate_pcs_cluster,\n \"certificates_to_synchronize\": self.certificates_to_synchronize,\n \"oo_htpasswd_users_from_file\": self.oo_htpasswd_users_from_file}\n", "path": "roles/openshift_master_facts/filter_plugins/openshift_master.py"}]} |
gh_patches_debug_1419 | rasdani/github-patches | git_diff | beeware__toga-1078 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
toga-winforms\windows.py openFileDialog needs an "s"
**Describe the bug**
When using multiselect = True in winforms the returned value is a single file because
if dialog.ShowDialog() == WinForms.DialogResult.OK:
return dialog.FileName
should be
if dialog.ShowDialog() == WinForms.DialogResult.OK:
return dialog.FileName**s**
**To Reproduce**
fname = self.main_window.open_file_dialog(
title="Open the file",
multiselect=True
)
**Expected behavior**
returns a list of files (fname) which is true on a Mac. On Windows it is the name of the first file as a single string . With suggested fix return selection but it still needs to be coerced into a list.
**Environment:**
- Operating System: Mac OS 10.15.6 & Windows 10
- Python version: 3.8
- Software versions:
- Briefcase: 0.3.3
- Toga: 0.3.0 dev23
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/winforms/toga_winforms/window.py`
Content:
```
1 from toga import GROUP_BREAK, SECTION_BREAK
2
3 from .libs import Size, WinForms
4
5
6 class WinFormsViewport:
7 def __init__(self, native, frame):
8 self.native = native
9 self.frame = frame
10 self.baseline_dpi = 96
11
12 @property
13 def width(self):
14 # Treat `native=None` as a 0x0 viewport
15 if self.native is None:
16 return 0
17 return self.native.ClientSize.Width
18
19 @property
20 def height(self):
21 if self.native is None:
22 return 0
23 # Subtract any vertical shift of the frame. This is to allow
24 # for toolbars, or any other viewport-level decoration.
25 return self.native.ClientSize.Height - self.frame.vertical_shift
26
27 @property
28 def dpi(self):
29 if self.native is None:
30 return self.baseline_dpi
31 return self.native.CreateGraphics().DpiX
32
33
34 class Window:
35 def __init__(self, interface):
36 self.interface = interface
37 self.interface._impl = self
38 self.create()
39
40 def create(self):
41 self.native = WinForms.Form(self)
42 self.native.ClientSize = Size(*self.interface._size)
43 self.native.interface = self.interface
44 self.native.Resize += self.winforms_resize
45 self.toolbar_native = None
46 self.toolbar_items = None
47
48 def create_toolbar(self):
49 self.toolbar_native = WinForms.ToolStrip()
50 for cmd in self.interface.toolbar:
51 if cmd == GROUP_BREAK:
52 item = WinForms.ToolStripSeparator()
53 elif cmd == SECTION_BREAK:
54 item = WinForms.ToolStripSeparator()
55 else:
56 if cmd.icon is not None:
57 native_icon = cmd.icon._impl.native
58 item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())
59 else:
60 item = WinForms.ToolStripMenuItem(cmd.label)
61 item.Click += cmd._impl.as_handler()
62 cmd._impl.native.append(item)
63 self.toolbar_native.Items.Add(item)
64
65 def set_position(self, position):
66 pass
67
68 def set_size(self, size):
69 self.native.ClientSize = Size(*self.interface._size)
70
71 def set_app(self, app):
72 if app is None:
73 return
74 icon_impl = app.interface.icon._impl
75 if icon_impl is None:
76 return
77 self.native.Icon = icon_impl.native
78
79 @property
80 def vertical_shift(self):
81 # vertical shift is the toolbar height or 0
82 result = 0
83 try:
84 result += self.native.interface._impl.toolbar_native.Height
85 except AttributeError:
86 pass
87 try:
88 result += self.native.interface._impl.native.MainMenuStrip.Height
89 except AttributeError:
90 pass
91 return result
92
93 def set_content(self, widget):
94 if self.toolbar_native:
95 self.native.Controls.Add(self.toolbar_native)
96 # Create the lookup table of menu items,
97 # then force the creation of the menus.
98 self.native.Controls.Add(widget.native)
99
100 # Set the widget's viewport to be based on the window's content.
101 widget.viewport = WinFormsViewport(native=self.native, frame=self)
102 widget.frame = self
103
104 # Add all children to the content widget.
105 for child in widget.interface.children:
106 child._impl.container = widget
107
108 def set_title(self, title):
109 self.native.Text = title
110
111 def show(self):
112 # The first render of the content will establish the
113 # minimum possible content size; use that to enforce
114 # a minimum window size.
115 TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight
116 # Now that the content is visible, we can do our initial hinting,
117 # and use that as the basis for setting the minimum window size.
118 self.interface.content._impl.rehint()
119 self.interface.content.style.layout(
120 self.interface.content,
121 WinFormsViewport(native=None, frame=None),
122 )
123 self.native.MinimumSize = Size(
124 int(self.interface.content.layout.width),
125 int(self.interface.content.layout.height) + TITLEBAR_HEIGHT
126 )
127 self.interface.content.refresh()
128
129 self.native.Show()
130
131 def winforms_FormClosing(self, event, handler):
132 if self.interface.app.on_exit:
133 self.interface.app.on_exit(self.interface.app)
134
135 def set_full_screen(self, is_full_screen):
136 self.interface.factory.not_implemented('Window.set_full_screen()')
137
138 def on_close(self):
139 pass
140
141 def close(self):
142 self.native.Close()
143
144 def winforms_resize(self, sender, args):
145 if self.interface.content:
146 # Re-layout the content
147 self.interface.content.refresh()
148
149 def info_dialog(self, title, message):
150 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)
151
152 def question_dialog(self, title, message):
153 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)
154 return result
155
156 def confirm_dialog(self, title, message):
157 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)
158 # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel
159 return True if result == WinForms.DialogResult.OK else False
160
161 def error_dialog(self, title, message):
162 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,
163 WinForms.MessageBoxIcon.Error)
164
165 def stack_trace_dialog(self, title, message, content, retry=False):
166 pass
167
168 def save_file_dialog(self, title, suggested_filename, file_types):
169 dialog = WinForms.SaveFileDialog()
170 dialog.Title = title
171 if suggested_filename is not None:
172 dialog.FileName = suggested_filename
173 if file_types is not None:
174 dialog.Filter = self.build_filter(file_types)
175 if dialog.ShowDialog() == WinForms.DialogResult.OK:
176 return dialog.FileName
177 else:
178 raise ValueError("No filename provided in the save file dialog")
179
180 def open_file_dialog(self, title, initial_directory, file_types, multiselect):
181 dialog = WinForms.OpenFileDialog()
182 dialog.Title = title
183 if initial_directory is not None:
184 dialog.InitialDirectory = initial_directory
185 if file_types is not None:
186 dialog.Filter = self.build_filter(file_types)
187 if multiselect:
188 dialog.Multiselect = True
189 if dialog.ShowDialog() == WinForms.DialogResult.OK:
190 return dialog.FileName
191 else:
192 raise ValueError("No filename provided in the open file dialog")
193
194 def select_folder_dialog(self, title, initial_directory, multiselect):
195 dialog = WinForms.FolderBrowserDialog()
196 dialog.Title = title
197 if initial_directory is not None:
198 dialog.InitialDirectory = initial_directory
199
200 if dialog.ShowDialog() == WinForms.DialogResult.OK:
201 return [dialog.SelectedPath]
202 else:
203 raise ValueError("No folder provided in the select folder dialog")
204
205 def build_filter(self, file_types):
206 file_string = "{0} files (*.{0})|*.{0}"
207 return '|'.join([file_string.format(ext) for ext in file_types]) + \
208 "|All files (*.*)|*.*"
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py
--- a/src/winforms/toga_winforms/window.py
+++ b/src/winforms/toga_winforms/window.py
@@ -187,7 +187,7 @@
if multiselect:
dialog.Multiselect = True
if dialog.ShowDialog() == WinForms.DialogResult.OK:
- return dialog.FileName
+ return dialog.FileNames if multiselect else dialog.FileName
else:
raise ValueError("No filename provided in the open file dialog")
| {"golden_diff": "diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py\n--- a/src/winforms/toga_winforms/window.py\n+++ b/src/winforms/toga_winforms/window.py\n@@ -187,7 +187,7 @@\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n- return dialog.FileName\n+ return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n", "issue": "toga-winforms\\windows.py openFileDialog needs an \"s\"\n**Describe the bug**\r\nWhen using multiselect = True in winforms the returned value is a single file because \r\n\r\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\r\n return dialog.FileName\r\n\r\nshould be \r\n\r\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\r\n return dialog.FileName**s**\r\n\r\n \r\n\r\n**To Reproduce**\r\n\r\n fname = self.main_window.open_file_dialog(\r\n title=\"Open the file\",\r\n multiselect=True\r\n )\r\n\r\n\r\n**Expected behavior**\r\nreturns a list of files (fname) which is true on a Mac. On Windows it is the name of the first file as a single string . With suggested fix return selection but it still needs to be coerced into a list.\r\n\r\n\r\n**Environment:**\r\n - Operating System: Mac OS 10.15.6 & Windows 10\r\n - Python version: 3.8\r\n - Software versions:\r\n - Briefcase: 0.3.3\r\n - Toga: 0.3.0 dev23\r\n\n", "before_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n", "path": "src/winforms/toga_winforms/window.py"}], "after_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n", "path": "src/winforms/toga_winforms/window.py"}]} |
gh_patches_debug_1420 | rasdani/github-patches | git_diff | facebookresearch__hydra-2677 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Fix failing tests
Several tests are broken on main
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import os
3
4 from omegaconf import DictConfig
5
6 import hydra
7
8
9 @hydra.main(version_base=None)
10 def my_app(_cfg: DictConfig) -> None:
11 print(f"Working directory : {os.getcwd()}")
12 print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
13
14
15 if __name__ == "__main__":
16 my_app()
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
@@ -9,7 +9,9 @@
@hydra.main(version_base=None)
def my_app(_cfg: DictConfig) -> None:
print(f"Working directory : {os.getcwd()}")
- print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
+ print(
+ f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}"
+ )
if __name__ == "__main__":
| {"golden_diff": "diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n@@ -9,7 +9,9 @@\n @hydra.main(version_base=None)\n def my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n- print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n+ print(\n+ f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\"\n+ )\n \n \n if __name__ == \"__main__\":\n", "issue": "[Bug] Fix failing tests\nSeveral tests are broken on main\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n\n\nif __name__ == \"__main__\":\n my_app()\n", "path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(\n f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\"\n )\n\n\nif __name__ == \"__main__\":\n my_app()\n", "path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"}]} |
gh_patches_debug_1421 | rasdani/github-patches | git_diff | Pycord-Development__pycord-888 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
case_insensitive not working properly in 2.0.0b1
### Summary
Commands will only be accepted if they are full lowercase
### Reproduction Steps
Implement command with not only lowercase letters
Add case_insensitive=True
Command is only accessible with full lowercase
Tested the same setup in 2.0.0b1 and 1.7.3
1.7.3 accepted all inputs as Command, 2.0.0b1 only accepts lowercase commands
### Minimal Reproducible Code
```python
from discord.ext import commands
comand_prefix = "-"
bot_token = open("Bot_Token.txt").readline()
bot = commands.Bot(command_prefix=comand_prefix, case_insensitive=True)
class a(commands.Cog, name="Cog Name"):
@commands.command()
async def Test(self, ctx):
print("test")
bot.add_cog(a(bot))
bot.run(bot_token)
# -> Accepts "-test" but not "-Test"
```
### Expected Results
Accept commands case insensitive
### Actual Results
Accepts commands only when lowercase, otherwise CommandNotFound error
### Intents
None
### System Information
- Python v3.10.0-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b1
- aiohttp v3.7.4.post0
- system info: Windows 10 10.0.19043
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/ext/commands/bot.py`
Content:
```
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2015-2021 Rapptz
5 Copyright (c) 2021-present Pycord Development
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 and/or sell copies of the Software, and to permit persons to whom the
12 Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice shall be included in
15 all copies or substantial portions of the Software.
16
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 DEALINGS IN THE SOFTWARE.
24 """
25
26 from __future__ import annotations
27
28
29 import asyncio
30 import collections
31 import collections.abc
32 import inspect
33 import importlib.util
34 import sys
35 import traceback
36 import types
37 from typing import Any, Callable, Mapping, List, Dict, TYPE_CHECKING, Optional, TypeVar, Type, Union
38
39 import discord
40
41 from .core import GroupMixin
42 from .view import StringView
43 from .context import Context
44 from . import errors
45 from .help import HelpCommand, DefaultHelpCommand
46 from .cog import Cog
47
48 if TYPE_CHECKING:
49 import importlib.machinery
50
51 from discord.message import Message
52 from ._types import (
53 Check,
54 CoroFunc,
55 )
56
57 __all__ = (
58 'when_mentioned',
59 'when_mentioned_or',
60 'Bot',
61 'AutoShardedBot',
62 )
63
64 MISSING: Any = discord.utils.MISSING
65
66 T = TypeVar('T')
67 CFT = TypeVar('CFT', bound='CoroFunc')
68 CXT = TypeVar('CXT', bound='Context')
69
70 def when_mentioned(bot: Union[Bot, AutoShardedBot], msg: Message) -> List[str]:
71 """A callable that implements a command prefix equivalent to being mentioned.
72
73 These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
74 """
75 # bot.user will never be None when this is called
76 return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore
77
78 def when_mentioned_or(*prefixes: str) -> Callable[[Union[Bot, AutoShardedBot], Message], List[str]]:
79 """A callable that implements when mentioned or other prefixes provided.
80
81 These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
82
83 Example
84 --------
85
86 .. code-block:: python3
87
88 bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
89
90
91 .. note::
92
93 This callable returns another callable, so if this is done inside a custom
94 callable, you must call the returned callable, for example:
95
96 .. code-block:: python3
97
98 async def get_prefix(bot, message):
99 extras = await prefixes_for(message.guild) # returns a list
100 return commands.when_mentioned_or(*extras)(bot, message)
101
102
103 See Also
104 ----------
105 :func:`.when_mentioned`
106 """
107 def inner(bot, msg):
108 r = list(prefixes)
109 r = when_mentioned(bot, msg) + r
110 return r
111
112 return inner
113
114 def _is_submodule(parent: str, child: str) -> bool:
115 return parent == child or child.startswith(parent + ".")
116
117 class _DefaultRepr:
118 def __repr__(self):
119 return '<default-help-command>'
120
121 _default = _DefaultRepr()
122
123 class BotBase(GroupMixin, discord.cog.CogMixin):
124 _supports_prefixed_commands = True
125 def __init__(self, command_prefix=when_mentioned, help_command=_default, **options):
126 super().__init__(**options)
127 self.command_prefix = command_prefix
128 self._help_command = None
129 self.strip_after_prefix = options.get('strip_after_prefix', False)
130
131 if help_command is _default:
132 self.help_command = DefaultHelpCommand()
133 else:
134 self.help_command = help_command
135
136 @discord.utils.copy_doc(discord.Client.close)
137 async def close(self) -> None:
138 for extension in tuple(self.__extensions):
139 try:
140 self.unload_extension(extension)
141 except Exception:
142 pass
143
144 for cog in tuple(self.__cogs):
145 try:
146 self.remove_cog(cog)
147 except Exception:
148 pass
149
150 await super().close() # type: ignore
151
152 async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:
153 """|coro|
154
155 The default command error handler provided by the bot.
156
157 By default this prints to :data:`sys.stderr` however it could be
158 overridden to have a different implementation.
159
160 This only fires if you do not specify any listeners for command error.
161 """
162 if self.extra_events.get('on_command_error', None):
163 return
164
165 command = context.command
166 if command and command.has_error_handler():
167 return
168
169 cog = context.cog
170 if cog and cog.has_error_handler():
171 return
172
173 print(f'Ignoring exception in command {context.command}:', file=sys.stderr)
174 traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
175
176 async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:
177 data = self._check_once if call_once else self._checks
178
179 if len(data) == 0:
180 return True
181
182 # type-checker doesn't distinguish between functions and methods
183 return await discord.utils.async_all(f(ctx) for f in data) # type: ignore
184 # help command stuff
185
186 @property
187 def help_command(self) -> Optional[HelpCommand]:
188 return self._help_command
189
190 @help_command.setter
191 def help_command(self, value: Optional[HelpCommand]) -> None:
192 if value is not None:
193 if not isinstance(value, HelpCommand):
194 raise TypeError('help_command must be a subclass of HelpCommand')
195 if self._help_command is not None:
196 self._help_command._remove_from_bot(self)
197 self._help_command = value
198 value._add_to_bot(self)
199 elif self._help_command is not None:
200 self._help_command._remove_from_bot(self)
201 self._help_command = None
202 else:
203 self._help_command = None
204
205 # command processing
206
207 async def get_prefix(self, message: Message) -> Union[List[str], str]:
208 """|coro|
209
210 Retrieves the prefix the bot is listening to
211 with the message as a context.
212
213 Parameters
214 -----------
215 message: :class:`discord.Message`
216 The message context to get the prefix of.
217
218 Returns
219 --------
220 Union[List[:class:`str`], :class:`str`]
221 A list of prefixes or a single prefix that the bot is
222 listening for.
223 """
224 prefix = ret = self.command_prefix
225 if callable(prefix):
226 ret = await discord.utils.maybe_coroutine(prefix, self, message)
227
228 if not isinstance(ret, str):
229 try:
230 ret = list(ret)
231 except TypeError:
232 # It's possible that a generator raised this exception. Don't
233 # replace it with our own error if that's the case.
234 if isinstance(ret, collections.abc.Iterable):
235 raise
236
237 raise TypeError("command_prefix must be plain string, iterable of strings, or callable "
238 f"returning either of these, not {ret.__class__.__name__}")
239
240 if not ret:
241 raise ValueError("Iterable command_prefix must contain at least one prefix")
242
243 return ret
244
245 async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:
246 r"""|coro|
247
248 Returns the invocation context from the message.
249
250 This is a more low-level counter-part for :meth:`.process_commands`
251 to allow users more fine grained control over the processing.
252
253 The returned context is not guaranteed to be a valid invocation
254 context, :attr:`.Context.valid` must be checked to make sure it is.
255 If the context is not valid then it is not a valid candidate to be
256 invoked under :meth:`~.Bot.invoke`.
257
258 Parameters
259 -----------
260 message: :class:`discord.Message`
261 The message to get the invocation context from.
262 cls
263 The factory class that will be used to create the context.
264 By default, this is :class:`.Context`. Should a custom
265 class be provided, it must be similar enough to :class:`.Context`\'s
266 interface.
267
268 Returns
269 --------
270 :class:`.Context`
271 The invocation context. The type of this can change via the
272 ``cls`` parameter.
273 """
274
275 view = StringView(message.content)
276 ctx = cls(prefix=None, view=view, bot=self, message=message)
277
278 if message.author.id == self.user.id: # type: ignore
279 return ctx
280
281 prefix = await self.get_prefix(message)
282 invoked_prefix = prefix
283
284 if isinstance(prefix, str):
285 if not view.skip_string(prefix):
286 return ctx
287 else:
288 try:
289 # if the context class' __init__ consumes something from the view this
290 # will be wrong. That seems unreasonable though.
291 if message.content.startswith(tuple(prefix)):
292 invoked_prefix = discord.utils.find(view.skip_string, prefix)
293 else:
294 return ctx
295
296 except TypeError:
297 if not isinstance(prefix, list):
298 raise TypeError("get_prefix must return either a string or a list of string, "
299 f"not {prefix.__class__.__name__}")
300
301 # It's possible a bad command_prefix got us here.
302 for value in prefix:
303 if not isinstance(value, str):
304 raise TypeError("Iterable command_prefix or list returned from get_prefix must "
305 f"contain only strings, not {value.__class__.__name__}")
306
307 # Getting here shouldn't happen
308 raise
309
310 if self.strip_after_prefix:
311 view.skip_ws()
312
313 invoker = view.get_word()
314 ctx.invoked_with = invoker
315 # type-checker fails to narrow invoked_prefix type.
316 ctx.prefix = invoked_prefix # type: ignore
317 ctx.command = self.all_commands.get(invoker)
318 return ctx
319
320 async def invoke(self, ctx: Context) -> None:
321 """|coro|
322
323 Invokes the command given under the invocation context and
324 handles all the internal event dispatch mechanisms.
325
326 Parameters
327 -----------
328 ctx: :class:`.Context`
329 The invocation context to invoke.
330 """
331 if ctx.command is not None:
332 self.dispatch('command', ctx)
333 try:
334 if await self.can_run(ctx, call_once=True):
335 await ctx.command.invoke(ctx)
336 else:
337 raise errors.CheckFailure('The global check once functions failed.')
338 except errors.CommandError as exc:
339 await ctx.command.dispatch_error(ctx, exc)
340 else:
341 self.dispatch('command_completion', ctx)
342 elif ctx.invoked_with:
343 exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
344 self.dispatch('command_error', ctx, exc)
345
346 async def process_commands(self, message: Message) -> None:
347 """|coro|
348
349 This function processes the commands that have been registered
350 to the bot and other groups. Without this coroutine, none of the
351 commands will be triggered.
352
353 By default, this coroutine is called inside the :func:`.on_message`
354 event. If you choose to override the :func:`.on_message` event, then
355 you should invoke this coroutine as well.
356
357 This is built using other low level tools, and is equivalent to a
358 call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
359
360 This also checks if the message's author is a bot and doesn't
361 call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
362
363 Parameters
364 -----------
365 message: :class:`discord.Message`
366 The message to process commands for.
367 """
368 if message.author.bot:
369 return
370
371 ctx = await self.get_context(message)
372 await self.invoke(ctx)
373
374 async def on_message(self, message):
375 await self.process_commands(message)
376
377
378 class Bot(BotBase, discord.Bot):
379 """Represents a discord bot.
380
381 This class is a subclass of :class:`discord.Bot` and as a result
382 anything that you can do with a :class:`discord.Bot` you can do with
383 this bot.
384
385 This class also subclasses :class:`.GroupMixin` to provide the functionality
386 to manage commands.
387
388 Attributes
389 -----------
390 command_prefix
391 The command prefix is what the message content must contain initially
392 to have a command invoked. This prefix could either be a string to
393 indicate what the prefix should be, or a callable that takes in the bot
394 as its first parameter and :class:`discord.Message` as its second
395 parameter and returns the prefix. This is to facilitate "dynamic"
396 command prefixes. This callable can be either a regular function or
397 a coroutine.
398
399 An empty string as the prefix always matches, enabling prefix-less
400 command invocation. While this may be useful in DMs it should be avoided
401 in servers, as it's likely to cause performance issues and unintended
402 command invocations.
403
404 The command prefix could also be an iterable of strings indicating that
405 multiple checks for the prefix should be used and the first one to
406 match will be the invocation prefix. You can get this prefix via
407 :attr:`.Context.prefix`. To avoid confusion empty iterables are not
408 allowed.
409
410 .. note::
411
412 When passing multiple prefixes be careful to not pass a prefix
413 that matches a longer prefix occurring later in the sequence. For
414 example, if the command prefix is ``('!', '!?')`` the ``'!?'``
415 prefix will never be matched to any message as the previous one
416 matches messages starting with ``!?``. This is especially important
417 when passing an empty string, it should always be last as no prefix
418 after it will be matched.
419 case_insensitive: :class:`bool`
420 Whether the commands should be case insensitive. Defaults to ``False``. This
421 attribute does not carry over to groups. You must set it to every group if
422 you require group commands to be case insensitive as well.
423 help_command: Optional[:class:`.HelpCommand`]
424 The help command implementation to use. This can be dynamically
425 set at runtime. To remove the help command pass ``None``. For more
426 information on implementing a help command, see :ref:`ext_commands_help_command`.
427 strip_after_prefix: :class:`bool`
428 Whether to strip whitespace characters after encountering the command
429 prefix. This allows for ``! hello`` and ``!hello`` to both work if
430 the ``command_prefix`` is set to ``!``. Defaults to ``False``.
431
432 .. versionadded:: 1.7
433 """
434 pass
435
436 class AutoShardedBot(BotBase, discord.AutoShardedBot):
437 """This is similar to :class:`.Bot` except that it is inherited from
438 :class:`discord.AutoShardedBot` instead.
439 """
440 pass
441
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/ext/commands/bot.py b/discord/ext/commands/bot.py
--- a/discord/ext/commands/bot.py
+++ b/discord/ext/commands/bot.py
@@ -314,7 +314,7 @@
ctx.invoked_with = invoker
# type-checker fails to narrow invoked_prefix type.
ctx.prefix = invoked_prefix # type: ignore
- ctx.command = self.all_commands.get(invoker)
+ ctx.command = self.prefixed_commands.get(invoker)
return ctx
async def invoke(self, ctx: Context) -> None:
| {"golden_diff": "diff --git a/discord/ext/commands/bot.py b/discord/ext/commands/bot.py\n--- a/discord/ext/commands/bot.py\n+++ b/discord/ext/commands/bot.py\n@@ -314,7 +314,7 @@\n ctx.invoked_with = invoker\n # type-checker fails to narrow invoked_prefix type.\n ctx.prefix = invoked_prefix # type: ignore\n- ctx.command = self.all_commands.get(invoker)\n+ ctx.command = self.prefixed_commands.get(invoker)\n return ctx\n \n async def invoke(self, ctx: Context) -> None:\n", "issue": "case_insensitive not working properly in 2.0.0b1\n### Summary\r\n\r\nCommands will only be accepted if they are full lowercase\r\n\r\n### Reproduction Steps\r\n\r\nImplement command with not only lowercase letters\r\nAdd case_insensitive=True\r\nCommand is only accessible with full lowercase \r\n\r\nTested the same setup in 2.0.0b1 and 1.7.3\r\n1.7.3 accepted all inputs as Command, 2.0.0b1 only accepts lowercase commands\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\nfrom discord.ext import commands\r\n\r\ncomand_prefix = \"-\"\r\nbot_token = open(\"Bot_Token.txt\").readline()\r\n\r\nbot = commands.Bot(command_prefix=comand_prefix, case_insensitive=True)\r\n\r\nclass a(commands.Cog, name=\"Cog Name\"):\r\n\r\n @commands.command()\r\n async def Test(self, ctx):\r\n print(\"test\")\r\n\r\nbot.add_cog(a(bot))\r\nbot.run(bot_token)\r\n\r\n# -> Accepts \"-test\" but not \"-Test\"\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nAccept commands case insensitive\r\n\r\n### Actual Results\r\n\r\nAccepts commands only when lowercase, otherwise CommandNotFound error\r\n\r\n### Intents\r\n\r\nNone\r\n\r\n### System Information\r\n\r\n- Python v3.10.0-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b1\r\n- aiohttp v3.7.4.post0\r\n- system info: Windows 10 10.0.19043\r\n\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\n\nimport asyncio\nimport collections\nimport collections.abc\nimport inspect\nimport importlib.util\nimport sys\nimport traceback\nimport types\nfrom typing import Any, Callable, Mapping, List, Dict, TYPE_CHECKING, Optional, TypeVar, Type, Union\n\nimport discord\n\nfrom .core import GroupMixin\nfrom .view import StringView\nfrom .context import Context\nfrom . import errors\nfrom .help import HelpCommand, DefaultHelpCommand\nfrom .cog import Cog\n\nif TYPE_CHECKING:\n import importlib.machinery\n\n from discord.message import Message\n from ._types import (\n Check,\n CoroFunc,\n )\n\n__all__ = (\n 'when_mentioned',\n 'when_mentioned_or',\n 'Bot',\n 'AutoShardedBot',\n)\n\nMISSING: Any = discord.utils.MISSING\n\nT = TypeVar('T')\nCFT = TypeVar('CFT', bound='CoroFunc')\nCXT = TypeVar('CXT', bound='Context')\n\ndef when_mentioned(bot: Union[Bot, AutoShardedBot], msg: Message) -> List[str]:\n \"\"\"A callable that implements a command prefix equivalent to being mentioned.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n \"\"\"\n # bot.user will never be None when this is called\n return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore\n\ndef when_mentioned_or(*prefixes: str) -> Callable[[Union[Bot, AutoShardedBot], Message], List[str]]:\n \"\"\"A callable that implements when mentioned or other prefixes provided.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n\n Example\n --------\n\n .. code-block:: python3\n\n bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))\n\n\n .. note::\n\n This callable returns another callable, so if this is done inside a custom\n callable, you must call the returned callable, for example:\n\n .. code-block:: python3\n\n async def get_prefix(bot, message):\n extras = await prefixes_for(message.guild) # returns a list\n return commands.when_mentioned_or(*extras)(bot, message)\n\n\n See Also\n ----------\n :func:`.when_mentioned`\n \"\"\"\n def inner(bot, msg):\n r = list(prefixes)\n r = when_mentioned(bot, msg) + r\n return r\n\n return inner\n\ndef _is_submodule(parent: str, child: str) -> bool:\n return parent == child or child.startswith(parent + \".\")\n\nclass _DefaultRepr:\n def __repr__(self):\n return '<default-help-command>'\n\n_default = _DefaultRepr()\n\nclass BotBase(GroupMixin, discord.cog.CogMixin):\n _supports_prefixed_commands = True\n def __init__(self, command_prefix=when_mentioned, help_command=_default, **options):\n super().__init__(**options)\n self.command_prefix = command_prefix\n self._help_command = None\n self.strip_after_prefix = options.get('strip_after_prefix', False)\n\n if help_command is _default:\n self.help_command = DefaultHelpCommand()\n else:\n self.help_command = help_command\n\n @discord.utils.copy_doc(discord.Client.close)\n async def close(self) -> None:\n for extension in tuple(self.__extensions):\n try:\n self.unload_extension(extension)\n except Exception:\n pass\n\n for cog in tuple(self.__cogs):\n try:\n self.remove_cog(cog)\n except Exception:\n pass\n\n await super().close() # type: ignore\n\n async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:\n \"\"\"|coro|\n\n The default command error handler provided by the bot.\n\n By default this prints to :data:`sys.stderr` however it could be\n overridden to have a different implementation.\n\n This only fires if you do not specify any listeners for command error.\n \"\"\"\n if self.extra_events.get('on_command_error', None):\n return\n\n command = context.command\n if command and command.has_error_handler():\n return\n\n cog = context.cog\n if cog and cog.has_error_handler():\n return\n\n print(f'Ignoring exception in command {context.command}:', file=sys.stderr)\n traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)\n\n async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:\n data = self._check_once if call_once else self._checks\n\n if len(data) == 0:\n return True\n\n # type-checker doesn't distinguish between functions and methods\n return await discord.utils.async_all(f(ctx) for f in data) # type: ignore\n # help command stuff\n\n @property\n def help_command(self) -> Optional[HelpCommand]:\n return self._help_command\n\n @help_command.setter\n def help_command(self, value: Optional[HelpCommand]) -> None:\n if value is not None:\n if not isinstance(value, HelpCommand):\n raise TypeError('help_command must be a subclass of HelpCommand')\n if self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = value\n value._add_to_bot(self)\n elif self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = None\n else:\n self._help_command = None\n\n # command processing\n\n async def get_prefix(self, message: Message) -> Union[List[str], str]:\n \"\"\"|coro|\n\n Retrieves the prefix the bot is listening to\n with the message as a context.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message context to get the prefix of.\n\n Returns\n --------\n Union[List[:class:`str`], :class:`str`]\n A list of prefixes or a single prefix that the bot is\n listening for.\n \"\"\"\n prefix = ret = self.command_prefix\n if callable(prefix):\n ret = await discord.utils.maybe_coroutine(prefix, self, message)\n\n if not isinstance(ret, str):\n try:\n ret = list(ret)\n except TypeError:\n # It's possible that a generator raised this exception. Don't\n # replace it with our own error if that's the case.\n if isinstance(ret, collections.abc.Iterable):\n raise\n\n raise TypeError(\"command_prefix must be plain string, iterable of strings, or callable \"\n f\"returning either of these, not {ret.__class__.__name__}\")\n\n if not ret:\n raise ValueError(\"Iterable command_prefix must contain at least one prefix\")\n\n return ret\n\n async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:\n r\"\"\"|coro|\n\n Returns the invocation context from the message.\n\n This is a more low-level counter-part for :meth:`.process_commands`\n to allow users more fine grained control over the processing.\n\n The returned context is not guaranteed to be a valid invocation\n context, :attr:`.Context.valid` must be checked to make sure it is.\n If the context is not valid then it is not a valid candidate to be\n invoked under :meth:`~.Bot.invoke`.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.Context`. Should a custom\n class be provided, it must be similar enough to :class:`.Context`\\'s\n interface.\n\n Returns\n --------\n :class:`.Context`\n The invocation context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n\n view = StringView(message.content)\n ctx = cls(prefix=None, view=view, bot=self, message=message)\n\n if message.author.id == self.user.id: # type: ignore\n return ctx\n\n prefix = await self.get_prefix(message)\n invoked_prefix = prefix\n\n if isinstance(prefix, str):\n if not view.skip_string(prefix):\n return ctx\n else:\n try:\n # if the context class' __init__ consumes something from the view this\n # will be wrong. That seems unreasonable though.\n if message.content.startswith(tuple(prefix)):\n invoked_prefix = discord.utils.find(view.skip_string, prefix)\n else:\n return ctx\n\n except TypeError:\n if not isinstance(prefix, list):\n raise TypeError(\"get_prefix must return either a string or a list of string, \"\n f\"not {prefix.__class__.__name__}\")\n\n # It's possible a bad command_prefix got us here.\n for value in prefix:\n if not isinstance(value, str):\n raise TypeError(\"Iterable command_prefix or list returned from get_prefix must \"\n f\"contain only strings, not {value.__class__.__name__}\")\n\n # Getting here shouldn't happen\n raise\n\n if self.strip_after_prefix:\n view.skip_ws()\n\n invoker = view.get_word()\n ctx.invoked_with = invoker\n # type-checker fails to narrow invoked_prefix type.\n ctx.prefix = invoked_prefix # type: ignore\n ctx.command = self.all_commands.get(invoker)\n return ctx\n\n async def invoke(self, ctx: Context) -> None:\n \"\"\"|coro|\n\n Invokes the command given under the invocation context and\n handles all the internal event dispatch mechanisms.\n\n Parameters\n -----------\n ctx: :class:`.Context`\n The invocation context to invoke.\n \"\"\"\n if ctx.command is not None:\n self.dispatch('command', ctx)\n try:\n if await self.can_run(ctx, call_once=True):\n await ctx.command.invoke(ctx)\n else:\n raise errors.CheckFailure('The global check once functions failed.')\n except errors.CommandError as exc:\n await ctx.command.dispatch_error(ctx, exc)\n else:\n self.dispatch('command_completion', ctx)\n elif ctx.invoked_with:\n exc = errors.CommandNotFound(f'Command \"{ctx.invoked_with}\" is not found')\n self.dispatch('command_error', ctx, exc)\n\n async def process_commands(self, message: Message) -> None:\n \"\"\"|coro|\n\n This function processes the commands that have been registered\n to the bot and other groups. Without this coroutine, none of the\n commands will be triggered.\n\n By default, this coroutine is called inside the :func:`.on_message`\n event. If you choose to override the :func:`.on_message` event, then\n you should invoke this coroutine as well.\n\n This is built using other low level tools, and is equivalent to a\n call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.\n\n This also checks if the message's author is a bot and doesn't\n call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to process commands for.\n \"\"\"\n if message.author.bot:\n return\n\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n\n async def on_message(self, message):\n await self.process_commands(message)\n\n\nclass Bot(BotBase, discord.Bot):\n \"\"\"Represents a discord bot.\n\n This class is a subclass of :class:`discord.Bot` and as a result\n anything that you can do with a :class:`discord.Bot` you can do with\n this bot.\n\n This class also subclasses :class:`.GroupMixin` to provide the functionality\n to manage commands.\n\n Attributes\n -----------\n command_prefix\n The command prefix is what the message content must contain initially\n to have a command invoked. This prefix could either be a string to\n indicate what the prefix should be, or a callable that takes in the bot\n as its first parameter and :class:`discord.Message` as its second\n parameter and returns the prefix. This is to facilitate \"dynamic\"\n command prefixes. This callable can be either a regular function or\n a coroutine.\n\n An empty string as the prefix always matches, enabling prefix-less\n command invocation. While this may be useful in DMs it should be avoided\n in servers, as it's likely to cause performance issues and unintended\n command invocations.\n\n The command prefix could also be an iterable of strings indicating that\n multiple checks for the prefix should be used and the first one to\n match will be the invocation prefix. You can get this prefix via\n :attr:`.Context.prefix`. To avoid confusion empty iterables are not\n allowed.\n\n .. note::\n\n When passing multiple prefixes be careful to not pass a prefix\n that matches a longer prefix occurring later in the sequence. For\n example, if the command prefix is ``('!', '!?')`` the ``'!?'``\n prefix will never be matched to any message as the previous one\n matches messages starting with ``!?``. This is especially important\n when passing an empty string, it should always be last as no prefix\n after it will be matched.\n case_insensitive: :class:`bool`\n Whether the commands should be case insensitive. Defaults to ``False``. This\n attribute does not carry over to groups. You must set it to every group if\n you require group commands to be case insensitive as well.\n help_command: Optional[:class:`.HelpCommand`]\n The help command implementation to use. This can be dynamically\n set at runtime. To remove the help command pass ``None``. For more\n information on implementing a help command, see :ref:`ext_commands_help_command`.\n strip_after_prefix: :class:`bool`\n Whether to strip whitespace characters after encountering the command\n prefix. This allows for ``! hello`` and ``!hello`` to both work if\n the ``command_prefix`` is set to ``!``. Defaults to ``False``.\n\n .. versionadded:: 1.7\n \"\"\"\n pass\n\nclass AutoShardedBot(BotBase, discord.AutoShardedBot):\n \"\"\"This is similar to :class:`.Bot` except that it is inherited from\n :class:`discord.AutoShardedBot` instead.\n \"\"\"\n pass\n", "path": "discord/ext/commands/bot.py"}], "after_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\n\nimport asyncio\nimport collections\nimport collections.abc\nimport inspect\nimport importlib.util\nimport sys\nimport traceback\nimport types\nfrom typing import Any, Callable, Mapping, List, Dict, TYPE_CHECKING, Optional, TypeVar, Type, Union\n\nimport discord\n\nfrom .core import GroupMixin\nfrom .view import StringView\nfrom .context import Context\nfrom . import errors\nfrom .help import HelpCommand, DefaultHelpCommand\nfrom .cog import Cog\n\nif TYPE_CHECKING:\n import importlib.machinery\n\n from discord.message import Message\n from ._types import (\n Check,\n CoroFunc,\n )\n\n__all__ = (\n 'when_mentioned',\n 'when_mentioned_or',\n 'Bot',\n 'AutoShardedBot',\n)\n\nMISSING: Any = discord.utils.MISSING\n\nT = TypeVar('T')\nCFT = TypeVar('CFT', bound='CoroFunc')\nCXT = TypeVar('CXT', bound='Context')\n\ndef when_mentioned(bot: Union[Bot, AutoShardedBot], msg: Message) -> List[str]:\n \"\"\"A callable that implements a command prefix equivalent to being mentioned.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n \"\"\"\n # bot.user will never be None when this is called\n return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore\n\ndef when_mentioned_or(*prefixes: str) -> Callable[[Union[Bot, AutoShardedBot], Message], List[str]]:\n \"\"\"A callable that implements when mentioned or other prefixes provided.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n\n Example\n --------\n\n .. code-block:: python3\n\n bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))\n\n\n .. note::\n\n This callable returns another callable, so if this is done inside a custom\n callable, you must call the returned callable, for example:\n\n .. code-block:: python3\n\n async def get_prefix(bot, message):\n extras = await prefixes_for(message.guild) # returns a list\n return commands.when_mentioned_or(*extras)(bot, message)\n\n\n See Also\n ----------\n :func:`.when_mentioned`\n \"\"\"\n def inner(bot, msg):\n r = list(prefixes)\n r = when_mentioned(bot, msg) + r\n return r\n\n return inner\n\ndef _is_submodule(parent: str, child: str) -> bool:\n return parent == child or child.startswith(parent + \".\")\n\nclass _DefaultRepr:\n def __repr__(self):\n return '<default-help-command>'\n\n_default = _DefaultRepr()\n\nclass BotBase(GroupMixin, discord.cog.CogMixin):\n _supports_prefixed_commands = True\n def __init__(self, command_prefix=when_mentioned, help_command=_default, **options):\n super().__init__(**options)\n self.command_prefix = command_prefix\n self._help_command = None\n self.strip_after_prefix = options.get('strip_after_prefix', False)\n\n if help_command is _default:\n self.help_command = DefaultHelpCommand()\n else:\n self.help_command = help_command\n\n @discord.utils.copy_doc(discord.Client.close)\n async def close(self) -> None:\n for extension in tuple(self.__extensions):\n try:\n self.unload_extension(extension)\n except Exception:\n pass\n\n for cog in tuple(self.__cogs):\n try:\n self.remove_cog(cog)\n except Exception:\n pass\n\n await super().close() # type: ignore\n\n async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:\n \"\"\"|coro|\n\n The default command error handler provided by the bot.\n\n By default this prints to :data:`sys.stderr` however it could be\n overridden to have a different implementation.\n\n This only fires if you do not specify any listeners for command error.\n \"\"\"\n if self.extra_events.get('on_command_error', None):\n return\n\n command = context.command\n if command and command.has_error_handler():\n return\n\n cog = context.cog\n if cog and cog.has_error_handler():\n return\n\n print(f'Ignoring exception in command {context.command}:', file=sys.stderr)\n traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)\n\n async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:\n data = self._check_once if call_once else self._checks\n\n if len(data) == 0:\n return True\n\n # type-checker doesn't distinguish between functions and methods\n return await discord.utils.async_all(f(ctx) for f in data) # type: ignore\n # help command stuff\n\n @property\n def help_command(self) -> Optional[HelpCommand]:\n return self._help_command\n\n @help_command.setter\n def help_command(self, value: Optional[HelpCommand]) -> None:\n if value is not None:\n if not isinstance(value, HelpCommand):\n raise TypeError('help_command must be a subclass of HelpCommand')\n if self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = value\n value._add_to_bot(self)\n elif self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = None\n else:\n self._help_command = None\n\n # command processing\n\n async def get_prefix(self, message: Message) -> Union[List[str], str]:\n \"\"\"|coro|\n\n Retrieves the prefix the bot is listening to\n with the message as a context.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message context to get the prefix of.\n\n Returns\n --------\n Union[List[:class:`str`], :class:`str`]\n A list of prefixes or a single prefix that the bot is\n listening for.\n \"\"\"\n prefix = ret = self.command_prefix\n if callable(prefix):\n ret = await discord.utils.maybe_coroutine(prefix, self, message)\n\n if not isinstance(ret, str):\n try:\n ret = list(ret)\n except TypeError:\n # It's possible that a generator raised this exception. Don't\n # replace it with our own error if that's the case.\n if isinstance(ret, collections.abc.Iterable):\n raise\n\n raise TypeError(\"command_prefix must be plain string, iterable of strings, or callable \"\n f\"returning either of these, not {ret.__class__.__name__}\")\n\n if not ret:\n raise ValueError(\"Iterable command_prefix must contain at least one prefix\")\n\n return ret\n\n async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:\n r\"\"\"|coro|\n\n Returns the invocation context from the message.\n\n This is a more low-level counter-part for :meth:`.process_commands`\n to allow users more fine grained control over the processing.\n\n The returned context is not guaranteed to be a valid invocation\n context, :attr:`.Context.valid` must be checked to make sure it is.\n If the context is not valid then it is not a valid candidate to be\n invoked under :meth:`~.Bot.invoke`.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.Context`. Should a custom\n class be provided, it must be similar enough to :class:`.Context`\\'s\n interface.\n\n Returns\n --------\n :class:`.Context`\n The invocation context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n\n view = StringView(message.content)\n ctx = cls(prefix=None, view=view, bot=self, message=message)\n\n if message.author.id == self.user.id: # type: ignore\n return ctx\n\n prefix = await self.get_prefix(message)\n invoked_prefix = prefix\n\n if isinstance(prefix, str):\n if not view.skip_string(prefix):\n return ctx\n else:\n try:\n # if the context class' __init__ consumes something from the view this\n # will be wrong. That seems unreasonable though.\n if message.content.startswith(tuple(prefix)):\n invoked_prefix = discord.utils.find(view.skip_string, prefix)\n else:\n return ctx\n\n except TypeError:\n if not isinstance(prefix, list):\n raise TypeError(\"get_prefix must return either a string or a list of string, \"\n f\"not {prefix.__class__.__name__}\")\n\n # It's possible a bad command_prefix got us here.\n for value in prefix:\n if not isinstance(value, str):\n raise TypeError(\"Iterable command_prefix or list returned from get_prefix must \"\n f\"contain only strings, not {value.__class__.__name__}\")\n\n # Getting here shouldn't happen\n raise\n\n if self.strip_after_prefix:\n view.skip_ws()\n\n invoker = view.get_word()\n ctx.invoked_with = invoker\n # type-checker fails to narrow invoked_prefix type.\n ctx.prefix = invoked_prefix # type: ignore\n ctx.command = self.prefixed_commands.get(invoker)\n return ctx\n\n async def invoke(self, ctx: Context) -> None:\n \"\"\"|coro|\n\n Invokes the command given under the invocation context and\n handles all the internal event dispatch mechanisms.\n\n Parameters\n -----------\n ctx: :class:`.Context`\n The invocation context to invoke.\n \"\"\"\n if ctx.command is not None:\n self.dispatch('command', ctx)\n try:\n if await self.can_run(ctx, call_once=True):\n await ctx.command.invoke(ctx)\n else:\n raise errors.CheckFailure('The global check once functions failed.')\n except errors.CommandError as exc:\n await ctx.command.dispatch_error(ctx, exc)\n else:\n self.dispatch('command_completion', ctx)\n elif ctx.invoked_with:\n exc = errors.CommandNotFound(f'Command \"{ctx.invoked_with}\" is not found')\n self.dispatch('command_error', ctx, exc)\n\n async def process_commands(self, message: Message) -> None:\n \"\"\"|coro|\n\n This function processes the commands that have been registered\n to the bot and other groups. Without this coroutine, none of the\n commands will be triggered.\n\n By default, this coroutine is called inside the :func:`.on_message`\n event. If you choose to override the :func:`.on_message` event, then\n you should invoke this coroutine as well.\n\n This is built using other low level tools, and is equivalent to a\n call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.\n\n This also checks if the message's author is a bot and doesn't\n call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to process commands for.\n \"\"\"\n if message.author.bot:\n return\n\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n\n async def on_message(self, message):\n await self.process_commands(message)\n\n\nclass Bot(BotBase, discord.Bot):\n \"\"\"Represents a discord bot.\n\n This class is a subclass of :class:`discord.Bot` and as a result\n anything that you can do with a :class:`discord.Bot` you can do with\n this bot.\n\n This class also subclasses :class:`.GroupMixin` to provide the functionality\n to manage commands.\n\n Attributes\n -----------\n command_prefix\n The command prefix is what the message content must contain initially\n to have a command invoked. This prefix could either be a string to\n indicate what the prefix should be, or a callable that takes in the bot\n as its first parameter and :class:`discord.Message` as its second\n parameter and returns the prefix. This is to facilitate \"dynamic\"\n command prefixes. This callable can be either a regular function or\n a coroutine.\n\n An empty string as the prefix always matches, enabling prefix-less\n command invocation. While this may be useful in DMs it should be avoided\n in servers, as it's likely to cause performance issues and unintended\n command invocations.\n\n The command prefix could also be an iterable of strings indicating that\n multiple checks for the prefix should be used and the first one to\n match will be the invocation prefix. You can get this prefix via\n :attr:`.Context.prefix`. To avoid confusion empty iterables are not\n allowed.\n\n .. note::\n\n When passing multiple prefixes be careful to not pass a prefix\n that matches a longer prefix occurring later in the sequence. For\n example, if the command prefix is ``('!', '!?')`` the ``'!?'``\n prefix will never be matched to any message as the previous one\n matches messages starting with ``!?``. This is especially important\n when passing an empty string, it should always be last as no prefix\n after it will be matched.\n case_insensitive: :class:`bool`\n Whether the commands should be case insensitive. Defaults to ``False``. This\n attribute does not carry over to groups. You must set it to every group if\n you require group commands to be case insensitive as well.\n help_command: Optional[:class:`.HelpCommand`]\n The help command implementation to use. This can be dynamically\n set at runtime. To remove the help command pass ``None``. For more\n information on implementing a help command, see :ref:`ext_commands_help_command`.\n strip_after_prefix: :class:`bool`\n Whether to strip whitespace characters after encountering the command\n prefix. This allows for ``! hello`` and ``!hello`` to both work if\n the ``command_prefix`` is set to ``!``. Defaults to ``False``.\n\n .. versionadded:: 1.7\n \"\"\"\n pass\n\nclass AutoShardedBot(BotBase, discord.AutoShardedBot):\n \"\"\"This is similar to :class:`.Bot` except that it is inherited from\n :class:`discord.AutoShardedBot` instead.\n \"\"\"\n pass\n", "path": "discord/ext/commands/bot.py"}]} |
gh_patches_debug_1422 | rasdani/github-patches | git_diff | django__channels-1886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve BaseMiddleware class docstring
The class docstring of [BaseMiddleware](https://github.com/django/channels/blob/master/channels/middleware.py#L3) should be updated to explain that `__call__` (and `__init__`) must be changed for the middleware.
> Base class for implementing ASGI middleware. Inherit from this and
> override the setup() method if you want to do things before you
> get to.
Also the purpose of the middlewares could also added to the docstring that they add additional information's in the scope with a reference to the ASGI reference or channels documentation (consumers scope section). This would help to understand the purpose of this to new users, what they can expect from the middlewares and what not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `channels/middleware.py`
Content:
```
1 class BaseMiddleware:
2 """
3 Base class for implementing ASGI middleware. Inherit from this and
4 override the setup() method if you want to do things before you
5 get to.
6
7 Note that subclasses of this are not self-safe; don't store state on
8 the instance, as it serves multiple application instances. Instead, use
9 scope.
10 """
11
12 def __init__(self, inner):
13 """
14 Middleware constructor - just takes inner application.
15 """
16 self.inner = inner
17
18 async def __call__(self, scope, receive, send):
19 """
20 ASGI application; can insert things into the scope and run asynchronous
21 code.
22 """
23 # Copy scope to stop changes going upstream
24 scope = dict(scope)
25 # Run the inner application along with the scope
26 return await self.inner(scope, receive, send)
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/channels/middleware.py b/channels/middleware.py
--- a/channels/middleware.py
+++ b/channels/middleware.py
@@ -1,8 +1,6 @@
class BaseMiddleware:
"""
- Base class for implementing ASGI middleware. Inherit from this and
- override the setup() method if you want to do things before you
- get to.
+ Base class for implementing ASGI middleware.
Note that subclasses of this are not self-safe; don't store state on
the instance, as it serves multiple application instances. Instead, use
| {"golden_diff": "diff --git a/channels/middleware.py b/channels/middleware.py\n--- a/channels/middleware.py\n+++ b/channels/middleware.py\n@@ -1,8 +1,6 @@\n class BaseMiddleware:\n \"\"\"\n- Base class for implementing ASGI middleware. Inherit from this and\n- override the setup() method if you want to do things before you\n- get to.\n+ Base class for implementing ASGI middleware.\n \n Note that subclasses of this are not self-safe; don't store state on\n the instance, as it serves multiple application instances. Instead, use\n", "issue": "Improve BaseMiddleware class docstring\nThe class docstring of [BaseMiddleware](https://github.com/django/channels/blob/master/channels/middleware.py#L3) should be updated to explain that `__call__` (and `__init__`) must be changed for the middleware.\r\n\r\n> Base class for implementing ASGI middleware. Inherit from this and\r\n> override the setup() method if you want to do things before you\r\n> get to.\r\n\r\nAlso the purpose of the middlewares could also added to the docstring that they add additional information's in the scope with a reference to the ASGI reference or channels documentation (consumers scope section). This would help to understand the purpose of this to new users, what they can expect from the middlewares and what not.\n", "before_files": [{"content": "class BaseMiddleware:\n \"\"\"\n Base class for implementing ASGI middleware. Inherit from this and\n override the setup() method if you want to do things before you\n get to.\n\n Note that subclasses of this are not self-safe; don't store state on\n the instance, as it serves multiple application instances. Instead, use\n scope.\n \"\"\"\n\n def __init__(self, inner):\n \"\"\"\n Middleware constructor - just takes inner application.\n \"\"\"\n self.inner = inner\n\n async def __call__(self, scope, receive, send):\n \"\"\"\n ASGI application; can insert things into the scope and run asynchronous\n code.\n \"\"\"\n # Copy scope to stop changes going upstream\n scope = dict(scope)\n # Run the inner application along with the scope\n return await self.inner(scope, receive, send)\n", "path": "channels/middleware.py"}], "after_files": [{"content": "class BaseMiddleware:\n \"\"\"\n Base class for implementing ASGI middleware.\n\n Note that subclasses of this are not self-safe; don't store state on\n the instance, as it serves multiple application instances. Instead, use\n scope.\n \"\"\"\n\n def __init__(self, inner):\n \"\"\"\n Middleware constructor - just takes inner application.\n \"\"\"\n self.inner = inner\n\n async def __call__(self, scope, receive, send):\n \"\"\"\n ASGI application; can insert things into the scope and run asynchronous\n code.\n \"\"\"\n # Copy scope to stop changes going upstream\n scope = dict(scope)\n # Run the inner application along with the scope\n return await self.inner(scope, receive, send)\n", "path": "channels/middleware.py"}]} |
gh_patches_debug_1423 | rasdani/github-patches | git_diff | cocotb__cocotb-745 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Waiting on an event that has already fired will hang forever
We just need to check that if we have already fired then return a NullTrigger()
[Need to modify this function](https://github.com/potentialventures/cocotb/blob/0bb751d5bb80f75e7a03284284f0d46caa209ee4/cocotb/triggers.py#L402)
```python
def wait(self):
"""This can be yielded to block this coroutine
until another wakes it"""
+ if self.fired:
+ return NullTrigger()
+
return _Event(self)
```
Originally reported by @stuarthodgson
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/triggers.py`
Content:
```
1 ''' Copyright (c) 2013 Potential Ventures Ltd
2 Copyright (c) 2013 SolarFlare Communications Inc
3 All rights reserved.
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in the
11 documentation and/or other materials provided with the distribution.
12 * Neither the name of Potential Ventures Ltd,
13 SolarFlare Communications Inc nor the
14 names of its contributors may be used to endorse or promote products
15 derived from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
27
28 """
29 A collections of triggers which a testbench can 'yield'
30 """
31 import os
32 import weakref
33
34 # For autodocumentation don't need the extension modules
35 if "SPHINX_BUILD" in os.environ:
36 simulator = None
37 else:
38 import simulator
39 from cocotb.log import SimLog
40 from cocotb.result import raise_error
41 from cocotb.utils import get_sim_steps, get_time_from_sim_steps
42
43
44 class TriggerException(Exception):
45 pass
46
47
48 class Trigger(object):
49 """Base class to derive from"""
50 def __init__(self):
51 self.log = SimLog("cocotb.%s" % (self.__class__.__name__), id(self))
52 self.signal = None
53 self.primed = False
54
55 def prime(self, *args):
56 self.primed = True
57
58 def unprime(self):
59 """Remove any pending callbacks if necessary"""
60 self.primed = False
61
62 def __del__(self):
63 """Ensure if a trigger drops out of scope we remove any pending
64 callbacks"""
65 self.unprime()
66
67 def __str__(self):
68 return self.__class__.__name__
69
70
71 class PythonTrigger(Trigger):
72 """Python triggers don't use GPI at all
73
74 For example notification of coroutine completion etc
75
76 TODO:
77 Still need to implement unprime
78 """
79 pass
80
81
82 class GPITrigger(Trigger):
83 """
84 Base Trigger class for GPI triggers
85
86 Consumes simulation time
87 """
88 def __init__(self):
89 Trigger.__init__(self)
90
91 # Required to ensure documentation can build
92 # if simulator is not None:
93 # self.cbhdl = simulator.create_callback(self)
94 # else:
95 self.cbhdl = 0
96
97 def unprime(self):
98 """Disable a primed trigger, can be reprimed"""
99 if self.cbhdl != 0:
100 simulator.deregister_callback(self.cbhdl)
101 self.cbhdl = 0
102 Trigger.unprime(self)
103
104 def __del__(self):
105 """Remove knowledge of the trigger"""
106 if self.cbhdl != 0:
107 self.unprime()
108 Trigger.__del__(self)
109
110
111 class Timer(GPITrigger):
112 """
113 Execution will resume when the specified time period expires
114
115 Consumes simulation time
116 """
117 def __init__(self, time_ps, units=None):
118 GPITrigger.__init__(self)
119 self.sim_steps = get_sim_steps(time_ps, units)
120
121 def prime(self, callback):
122 """Register for a timed callback"""
123 if self.cbhdl == 0:
124 self.cbhdl = simulator.register_timed_callback(self.sim_steps,
125 callback, self)
126 if self.cbhdl == 0:
127 raise_error(self, "Unable set up %s Trigger" % (str(self)))
128 Trigger.prime(self)
129
130 def __str__(self):
131 return self.__class__.__name__ + "(%1.2fps)" % get_time_from_sim_steps(self.sim_steps,units='ps')
132
133 class _ReadOnly(GPITrigger):
134 """
135 Execution will resume when the readonly portion of the sim cycles is
136 readched
137 """
138 def __init__(self):
139 GPITrigger.__init__(self)
140
141 def prime(self, callback):
142 if self.cbhdl == 0:
143 self.cbhdl = simulator.register_readonly_callback(callback, self)
144 if self.cbhdl == 0:
145 raise_error(self, "Unable set up %s Trigger" % (str(self)))
146 Trigger.prime(self)
147
148 def __str__(self):
149 return self.__class__.__name__ + "(readonly)"
150
151 _ro = _ReadOnly()
152
153
154 def ReadOnly():
155 return _ro
156
157
158 class _ReadWrite(GPITrigger):
159 """
160 Execution will resume when the readwrite portion of the sim cycles is
161 reached
162 """
163 def __init__(self):
164 GPITrigger.__init__(self)
165
166 def prime(self, callback):
167 if self.cbhdl == 0:
168 # import pdb
169 # pdb.set_trace()
170 self.cbhdl = simulator.register_rwsynch_callback(callback, self)
171 if self.cbhdl == 0:
172 raise_error(self, "Unable set up %s Trigger" % (str(self)))
173 Trigger.prime(self)
174
175 def __str__(self):
176 return self.__class__.__name__ + "(readwritesync)"
177
178 _rw = _ReadWrite()
179
180
181 def ReadWrite():
182 return _rw
183
184
185 class _NextTimeStep(GPITrigger):
186 """
187 Execution will resume when the next time step is started
188 """
189 def __init__(self):
190 GPITrigger.__init__(self)
191
192 def prime(self, callback):
193 if self.cbhdl == 0:
194 self.cbhdl = simulator.register_nextstep_callback(callback, self)
195 if self.cbhdl == 0:
196 raise_error(self, "Unable set up %s Trigger" % (str(self)))
197 Trigger.prime(self)
198
199 def __str__(self):
200 return self.__class__.__name__ + "(nexttimestep)"
201
202 _nxts = _NextTimeStep()
203
204
205 def NextTimeStep():
206 return _nxts
207
208
209 class _EdgeBase(GPITrigger):
210 """
211 Execution will resume when an edge occurs on the provided signal
212 """
213 @classmethod
214 @property
215 def _edge_type(self):
216 """
217 The edge type, as understood by the C code. Must be set in subclasses
218 """
219 raise NotImplementedError
220
221 # Ensure that each signal has at most one edge trigger per edge type.
222 # Using a weak dictionary ensures we don't create a reference cycle
223 _instances = weakref.WeakValueDictionary()
224
225 def __new__(cls, signal):
226 # find the existing instance, if possible - else create a new one
227 key = (signal, cls._edge_type)
228 try:
229 return cls._instances[key]
230 except KeyError:
231 instance = super(_EdgeBase, cls).__new__(cls)
232 cls._instances[key] = instance
233 return instance
234
235 def __init__(self, signal):
236 super(_EdgeBase, self).__init__()
237 self.signal = signal
238
239 def prime(self, callback):
240 """Register notification of a value change via a callback"""
241 if self.cbhdl == 0:
242 self.cbhdl = simulator.register_value_change_callback(
243 self.signal._handle, callback, type(self)._edge_type, self
244 )
245 if self.cbhdl == 0:
246 raise_error(self, "Unable set up %s Trigger" % (str(self)))
247 super(_EdgeBase, self).prime()
248
249 def __str__(self):
250 return self.__class__.__name__ + "(%s)" % self.signal._name
251
252
253 class RisingEdge(_EdgeBase):
254 """ Triggers on the rising edge of the provided signal """
255 _edge_type = 1
256
257
258 class FallingEdge(_EdgeBase):
259 """ Triggers on the falling edge of the provided signal """
260 _edge_type = 2
261
262
263 class Edge(_EdgeBase):
264 """ Triggers on either edge in a signal """
265 _edge_type = 3
266
267
268 class ClockCycles(GPITrigger):
269 """
270 Execution will resume after N rising edges or N falling edges
271 """
272 def __init__(self, signal, num_cycles, rising=True):
273 super(ClockCycles, self).__init__()
274 self.signal = signal
275 self.num_cycles = num_cycles
276 if rising is True:
277 self._rising = 1
278 else:
279 self._rising = 2
280
281 def prime(self, callback):
282 self._callback = callback
283
284 def _check(obj):
285 self.unprime()
286
287 if self.signal.value:
288 self.num_cycles -= 1
289
290 if self.num_cycles <= 0:
291 self._callback(self)
292 return
293
294 self.cbhdl = simulator.register_value_change_callback(self.signal.
295 _handle,
296 _check,
297 self._rising,
298 self)
299 if self.cbhdl == 0:
300 raise_error(self, "Unable set up %s Trigger" % (str(self)))
301
302 self.cbhdl = simulator.register_value_change_callback(self.signal.
303 _handle,
304 _check,
305 self._rising,
306 self)
307 if self.cbhdl == 0:
308 raise_error(self, "Unable set up %s Trigger" % (str(self)))
309 Trigger.prime(self)
310
311 def __str__(self):
312 return self.__class__.__name__ + "(%s)" % self.signal._name
313
314
315 class Combine(PythonTrigger):
316 """
317 Combines multiple triggers together. Coroutine will continue when all
318 triggers have fired
319 """
320
321 def __init__(self, *args):
322 PythonTrigger.__init__(self)
323 self._triggers = args
324 # TODO: check that trigger is an iterable containing
325 # only Trigger objects
326 try:
327 for trigger in self._triggers:
328 if not isinstance(trigger, Trigger):
329 raise TriggerException("All combined triggers must be "
330 "instances of Trigger! Got: %s" %
331 trigger.__class__.__name__)
332 except Exception:
333 raise TriggerException("%s requires a list of Trigger objects" %
334 self.__class__.__name__)
335
336 def prime(self, callback):
337 self._callback = callback
338 self._fired = []
339 for trigger in self._triggers:
340 trigger.prime(self._check_all_fired)
341 Trigger.prime(self)
342
343 def _check_all_fired(self, trigger):
344 self._fired.append(trigger)
345 if self._fired == self._triggers:
346 self._callback(self)
347
348 def unprime(self):
349 for trigger in self._triggers:
350 trigger.unprime()
351
352
353 class _Event(PythonTrigger):
354 """
355 Unique instance used by the Event object.
356
357 One created for each attempt to wait on the event so that the scheduler
358 can maintain a dictionary of indexing each individual coroutine
359
360 FIXME: This will leak - need to use peers to ensure everything is removed
361 """
362 def __init__(self, parent):
363 PythonTrigger.__init__(self)
364 self.parent = parent
365
366 def prime(self, callback):
367 self._callback = callback
368 self.parent.prime(callback, self)
369 Trigger.prime(self)
370
371 def __call__(self):
372 self._callback(self)
373
374
375 class Event(PythonTrigger):
376 """
377 Event to permit synchronisation between two coroutines
378 """
379 def __init__(self, name=""):
380 PythonTrigger.__init__(self)
381 self._pending = []
382 self.name = name
383 self.fired = False
384 self.data = None
385
386 def prime(self, callback, trigger):
387 self._pending.append(trigger)
388 Trigger.prime(self)
389
390 def set(self, data=None):
391 """Wake up any coroutines blocked on this event"""
392 self.fired = True
393 self.data = data
394
395 p = self._pending[:]
396
397 self._pending = []
398
399 for trigger in p:
400 trigger()
401
402 def wait(self):
403 """This can be yielded to block this coroutine
404 until another wakes it"""
405 return _Event(self)
406
407 def clear(self):
408 """Clear this event that's fired.
409
410 Subsequent calls to wait will block until set() is called again"""
411 self.fired = False
412
413 def __str__(self):
414 return self.__class__.__name__ + "(%s)" % self.name
415
416
417 class _Lock(PythonTrigger):
418 """
419 Unique instance used by the Lock object.
420
421 One created for each attempt to acquire the Lock so that the scheduler
422 can maintain a dictionary of indexing each individual coroutine
423
424 FIXME: This will leak - need to use peers to ensure everything is removed
425 """
426 def __init__(self, parent):
427 PythonTrigger.__init__(self)
428 self.parent = parent
429
430 def prime(self, callback):
431 self._callback = callback
432 self.parent.prime(callback, self)
433 Trigger.prime(self)
434
435 def __call__(self):
436 self._callback(self)
437
438
439 class Lock(PythonTrigger):
440 """
441 Lock primitive (not re-entrant)
442 """
443
444 def __init__(self, name=""):
445 PythonTrigger.__init__(self)
446 self._pending_unprimed = []
447 self._pending_primed = []
448 self.name = name
449 self.locked = False
450
451 def prime(self, callback, trigger):
452 Trigger.prime(self)
453
454 self._pending_unprimed.remove(trigger)
455
456 if not self.locked:
457 self.locked = True
458 callback(trigger)
459 else:
460 self._pending_primed.append(trigger)
461
462 def acquire(self):
463 """This can be yielded to block until the lock is acquired"""
464 trig = _Lock(self)
465 self._pending_unprimed.append(trig)
466 return trig
467
468 def release(self):
469
470 if not self.locked:
471 raise_error(self, "Attempt to release an unacquired Lock %s" %
472 (str(self)))
473
474 self.locked = False
475
476 # nobody waiting for this lock
477 if not self._pending_primed:
478 return
479
480 trigger = self._pending_primed.pop(0)
481 self.locked = True
482 trigger()
483
484 def __str__(self):
485 return "%s(%s) [%s waiting]" % (str(self.__class__.__name__),
486 self.name,
487 len(self._pending_primed))
488
489 def __nonzero__(self):
490 """Provide boolean of a Lock"""
491 return self.locked
492
493 __bool__ = __nonzero__
494
495
496 class NullTrigger(Trigger):
497 """
498 Trigger for internal interfacing use call the callback as soon
499 as it is primed and then remove it's self from the scheduler
500 """
501 def __init__(self, name=""):
502 Trigger.__init__(self)
503 self._callback = None
504 self.name = name
505
506 def prime(self, callback):
507 callback(self)
508
509
510 class Join(PythonTrigger):
511 """
512 Join a coroutine, firing when it exits
513 """
514 # Ensure that each coroutine has at most one join trigger.
515 # Using a weak dictionary ensures we don't create a reference cycle
516 _instances = weakref.WeakValueDictionary()
517
518 def __new__(cls, coroutine):
519 # find the existing instance, if possible - else create a new one
520 try:
521 return cls._instances[coroutine]
522 except KeyError:
523 instance = super(Join, cls).__new__(cls)
524 cls._instances[coroutine] = instance
525 return instance
526
527 def __init__(self, coroutine):
528 super(Join, self).__init__()
529 self._coroutine = coroutine
530 self.pass_retval = True
531
532 @property
533 def retval(self):
534 return self._coroutine.retval
535
536 def prime(self, callback):
537 if self._coroutine._finished:
538 callback(self)
539 else:
540 super(Join, self).prime(callback)
541
542 def __str__(self):
543 return self.__class__.__name__ + "(%s)" % self._coroutine.__name__
544
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cocotb/triggers.py b/cocotb/triggers.py
--- a/cocotb/triggers.py
+++ b/cocotb/triggers.py
@@ -401,7 +401,13 @@
def wait(self):
"""This can be yielded to block this coroutine
- until another wakes it"""
+ until another wakes it
+
+ If the Event has already been fired, this returns NullTrigger()
+ To reset the event (and enable the use of wait() again), clear() should be called
+ """
+ if self.fired:
+ return NullTrigger()
return _Event(self)
def clear(self):
| {"golden_diff": "diff --git a/cocotb/triggers.py b/cocotb/triggers.py\n--- a/cocotb/triggers.py\n+++ b/cocotb/triggers.py\n@@ -401,7 +401,13 @@\n \n def wait(self):\n \"\"\"This can be yielded to block this coroutine\n- until another wakes it\"\"\"\n+ until another wakes it\n+\n+ If the Event has already been fired, this returns NullTrigger()\n+ To reset the event (and enable the use of wait() again), clear() should be called\n+ \"\"\"\n+ if self.fired:\n+ return NullTrigger()\n return _Event(self)\n \n def clear(self):\n", "issue": "Waiting on an event that has already fired will hang forever\nWe just need to check that if we have already fired then return a NullTrigger()\r\n\r\n[Need to modify this function](https://github.com/potentialventures/cocotb/blob/0bb751d5bb80f75e7a03284284f0d46caa209ee4/cocotb/triggers.py#L402)\r\n\r\n```python\r\n def wait(self):\r\n \"\"\"This can be yielded to block this coroutine\r\n until another wakes it\"\"\"\r\n+ if self.fired:\r\n+ return NullTrigger()\r\n+\r\n return _Event(self)\r\n```\r\n\r\nOriginally reported by @stuarthodgson \n", "before_files": [{"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n A collections of triggers which a testbench can 'yield'\n\"\"\"\nimport os\nimport weakref\n\n# For autodocumentation don't need the extension modules\nif \"SPHINX_BUILD\" in os.environ:\n simulator = None\nelse:\n import simulator\nfrom cocotb.log import SimLog\nfrom cocotb.result import raise_error\nfrom cocotb.utils import get_sim_steps, get_time_from_sim_steps\n\n\nclass TriggerException(Exception):\n pass\n\n\nclass Trigger(object):\n \"\"\"Base class to derive from\"\"\"\n def __init__(self):\n self.log = SimLog(\"cocotb.%s\" % (self.__class__.__name__), id(self))\n self.signal = None\n self.primed = False\n\n def prime(self, *args):\n self.primed = True\n\n def unprime(self):\n \"\"\"Remove any pending callbacks if necessary\"\"\"\n self.primed = False\n\n def __del__(self):\n \"\"\"Ensure if a trigger drops out of scope we remove any pending\n callbacks\"\"\"\n self.unprime()\n\n def __str__(self):\n return self.__class__.__name__\n\n\nclass PythonTrigger(Trigger):\n \"\"\"Python triggers don't use GPI at all\n\n For example notification of coroutine completion etc\n\n TODO:\n Still need to implement unprime\n \"\"\"\n pass\n\n\nclass GPITrigger(Trigger):\n \"\"\"\n Base Trigger class for GPI triggers\n\n Consumes simulation time\n \"\"\"\n def __init__(self):\n Trigger.__init__(self)\n\n # Required to ensure documentation can build\n # if simulator is not None:\n # self.cbhdl = simulator.create_callback(self)\n # else:\n self.cbhdl = 0\n\n def unprime(self):\n \"\"\"Disable a primed trigger, can be reprimed\"\"\"\n if self.cbhdl != 0:\n simulator.deregister_callback(self.cbhdl)\n self.cbhdl = 0\n Trigger.unprime(self)\n\n def __del__(self):\n \"\"\"Remove knowledge of the trigger\"\"\"\n if self.cbhdl != 0:\n self.unprime()\n Trigger.__del__(self)\n\n\nclass Timer(GPITrigger):\n \"\"\"\n Execution will resume when the specified time period expires\n\n Consumes simulation time\n \"\"\"\n def __init__(self, time_ps, units=None):\n GPITrigger.__init__(self)\n self.sim_steps = get_sim_steps(time_ps, units)\n\n def prime(self, callback):\n \"\"\"Register for a timed callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_timed_callback(self.sim_steps,\n callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%1.2fps)\" % get_time_from_sim_steps(self.sim_steps,units='ps')\n\nclass _ReadOnly(GPITrigger):\n \"\"\"\n Execution will resume when the readonly portion of the sim cycles is\n readched\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_readonly_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readonly)\"\n\n_ro = _ReadOnly()\n\n\ndef ReadOnly():\n return _ro\n\n\nclass _ReadWrite(GPITrigger):\n \"\"\"\n Execution will resume when the readwrite portion of the sim cycles is\n reached\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n # import pdb\n # pdb.set_trace()\n self.cbhdl = simulator.register_rwsynch_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readwritesync)\"\n\n_rw = _ReadWrite()\n\n\ndef ReadWrite():\n return _rw\n\n\nclass _NextTimeStep(GPITrigger):\n \"\"\"\n Execution will resume when the next time step is started\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_nextstep_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(nexttimestep)\"\n\n_nxts = _NextTimeStep()\n\n\ndef NextTimeStep():\n return _nxts\n\n\nclass _EdgeBase(GPITrigger):\n \"\"\"\n Execution will resume when an edge occurs on the provided signal\n \"\"\"\n @classmethod\n @property\n def _edge_type(self):\n \"\"\"\n The edge type, as understood by the C code. Must be set in subclasses\n \"\"\"\n raise NotImplementedError\n\n # Ensure that each signal has at most one edge trigger per edge type.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, signal):\n # find the existing instance, if possible - else create a new one\n key = (signal, cls._edge_type)\n try:\n return cls._instances[key]\n except KeyError:\n instance = super(_EdgeBase, cls).__new__(cls)\n cls._instances[key] = instance\n return instance\n\n def __init__(self, signal):\n super(_EdgeBase, self).__init__()\n self.signal = signal\n\n def prime(self, callback):\n \"\"\"Register notification of a value change via a callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_value_change_callback(\n self.signal._handle, callback, type(self)._edge_type, self\n )\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n super(_EdgeBase, self).prime()\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass RisingEdge(_EdgeBase):\n \"\"\" Triggers on the rising edge of the provided signal \"\"\"\n _edge_type = 1\n\n\nclass FallingEdge(_EdgeBase):\n \"\"\" Triggers on the falling edge of the provided signal \"\"\"\n _edge_type = 2\n\n\nclass Edge(_EdgeBase):\n \"\"\" Triggers on either edge in a signal \"\"\"\n _edge_type = 3\n\n\nclass ClockCycles(GPITrigger):\n \"\"\"\n Execution will resume after N rising edges or N falling edges\n \"\"\"\n def __init__(self, signal, num_cycles, rising=True):\n super(ClockCycles, self).__init__()\n self.signal = signal\n self.num_cycles = num_cycles\n if rising is True:\n self._rising = 1\n else:\n self._rising = 2\n\n def prime(self, callback):\n self._callback = callback\n\n def _check(obj):\n self.unprime()\n\n if self.signal.value:\n self.num_cycles -= 1\n\n if self.num_cycles <= 0:\n self._callback(self)\n return\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass Combine(PythonTrigger):\n \"\"\"\n Combines multiple triggers together. Coroutine will continue when all\n triggers have fired\n \"\"\"\n\n def __init__(self, *args):\n PythonTrigger.__init__(self)\n self._triggers = args\n # TODO: check that trigger is an iterable containing\n # only Trigger objects\n try:\n for trigger in self._triggers:\n if not isinstance(trigger, Trigger):\n raise TriggerException(\"All combined triggers must be \"\n \"instances of Trigger! Got: %s\" %\n trigger.__class__.__name__)\n except Exception:\n raise TriggerException(\"%s requires a list of Trigger objects\" %\n self.__class__.__name__)\n\n def prime(self, callback):\n self._callback = callback\n self._fired = []\n for trigger in self._triggers:\n trigger.prime(self._check_all_fired)\n Trigger.prime(self)\n\n def _check_all_fired(self, trigger):\n self._fired.append(trigger)\n if self._fired == self._triggers:\n self._callback(self)\n\n def unprime(self):\n for trigger in self._triggers:\n trigger.unprime()\n\n\nclass _Event(PythonTrigger):\n \"\"\"\n Unique instance used by the Event object.\n\n One created for each attempt to wait on the event so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Event(PythonTrigger):\n \"\"\"\n Event to permit synchronisation between two coroutines\n \"\"\"\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending = []\n self.name = name\n self.fired = False\n self.data = None\n\n def prime(self, callback, trigger):\n self._pending.append(trigger)\n Trigger.prime(self)\n\n def set(self, data=None):\n \"\"\"Wake up any coroutines blocked on this event\"\"\"\n self.fired = True\n self.data = data\n\n p = self._pending[:]\n\n self._pending = []\n\n for trigger in p:\n trigger()\n\n def wait(self):\n \"\"\"This can be yielded to block this coroutine\n until another wakes it\"\"\"\n return _Event(self)\n\n def clear(self):\n \"\"\"Clear this event that's fired.\n\n Subsequent calls to wait will block until set() is called again\"\"\"\n self.fired = False\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.name\n\n\nclass _Lock(PythonTrigger):\n \"\"\"\n Unique instance used by the Lock object.\n\n One created for each attempt to acquire the Lock so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Lock(PythonTrigger):\n \"\"\"\n Lock primitive (not re-entrant)\n \"\"\"\n\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending_unprimed = []\n self._pending_primed = []\n self.name = name\n self.locked = False\n\n def prime(self, callback, trigger):\n Trigger.prime(self)\n\n self._pending_unprimed.remove(trigger)\n\n if not self.locked:\n self.locked = True\n callback(trigger)\n else:\n self._pending_primed.append(trigger)\n\n def acquire(self):\n \"\"\"This can be yielded to block until the lock is acquired\"\"\"\n trig = _Lock(self)\n self._pending_unprimed.append(trig)\n return trig\n\n def release(self):\n\n if not self.locked:\n raise_error(self, \"Attempt to release an unacquired Lock %s\" %\n (str(self)))\n\n self.locked = False\n\n # nobody waiting for this lock\n if not self._pending_primed:\n return\n\n trigger = self._pending_primed.pop(0)\n self.locked = True\n trigger()\n\n def __str__(self):\n return \"%s(%s) [%s waiting]\" % (str(self.__class__.__name__),\n self.name,\n len(self._pending_primed))\n\n def __nonzero__(self):\n \"\"\"Provide boolean of a Lock\"\"\"\n return self.locked\n\n __bool__ = __nonzero__\n\n\nclass NullTrigger(Trigger):\n \"\"\"\n Trigger for internal interfacing use call the callback as soon\n as it is primed and then remove it's self from the scheduler\n \"\"\"\n def __init__(self, name=\"\"):\n Trigger.__init__(self)\n self._callback = None\n self.name = name\n\n def prime(self, callback):\n callback(self)\n\n\nclass Join(PythonTrigger):\n \"\"\"\n Join a coroutine, firing when it exits\n \"\"\"\n # Ensure that each coroutine has at most one join trigger.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, coroutine):\n # find the existing instance, if possible - else create a new one\n try:\n return cls._instances[coroutine]\n except KeyError:\n instance = super(Join, cls).__new__(cls)\n cls._instances[coroutine] = instance\n return instance\n\n def __init__(self, coroutine):\n super(Join, self).__init__()\n self._coroutine = coroutine\n self.pass_retval = True\n\n @property\n def retval(self):\n return self._coroutine.retval\n\n def prime(self, callback):\n if self._coroutine._finished:\n callback(self)\n else:\n super(Join, self).prime(callback)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self._coroutine.__name__\n", "path": "cocotb/triggers.py"}], "after_files": [{"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n A collections of triggers which a testbench can 'yield'\n\"\"\"\nimport os\nimport weakref\n\n# For autodocumentation don't need the extension modules\nif \"SPHINX_BUILD\" in os.environ:\n simulator = None\nelse:\n import simulator\nfrom cocotb.log import SimLog\nfrom cocotb.result import raise_error\nfrom cocotb.utils import get_sim_steps, get_time_from_sim_steps\n\n\nclass TriggerException(Exception):\n pass\n\n\nclass Trigger(object):\n \"\"\"Base class to derive from\"\"\"\n def __init__(self):\n self.log = SimLog(\"cocotb.%s\" % (self.__class__.__name__), id(self))\n self.signal = None\n self.primed = False\n\n def prime(self, *args):\n self.primed = True\n\n def unprime(self):\n \"\"\"Remove any pending callbacks if necessary\"\"\"\n self.primed = False\n\n def __del__(self):\n \"\"\"Ensure if a trigger drops out of scope we remove any pending\n callbacks\"\"\"\n self.unprime()\n\n def __str__(self):\n return self.__class__.__name__\n\n\nclass PythonTrigger(Trigger):\n \"\"\"Python triggers don't use GPI at all\n\n For example notification of coroutine completion etc\n\n TODO:\n Still need to implement unprime\n \"\"\"\n pass\n\n\nclass GPITrigger(Trigger):\n \"\"\"\n Base Trigger class for GPI triggers\n\n Consumes simulation time\n \"\"\"\n def __init__(self):\n Trigger.__init__(self)\n\n # Required to ensure documentation can build\n # if simulator is not None:\n # self.cbhdl = simulator.create_callback(self)\n # else:\n self.cbhdl = 0\n\n def unprime(self):\n \"\"\"Disable a primed trigger, can be reprimed\"\"\"\n if self.cbhdl != 0:\n simulator.deregister_callback(self.cbhdl)\n self.cbhdl = 0\n Trigger.unprime(self)\n\n def __del__(self):\n \"\"\"Remove knowledge of the trigger\"\"\"\n if self.cbhdl != 0:\n self.unprime()\n Trigger.__del__(self)\n\n\nclass Timer(GPITrigger):\n \"\"\"\n Execution will resume when the specified time period expires\n\n Consumes simulation time\n \"\"\"\n def __init__(self, time_ps, units=None):\n GPITrigger.__init__(self)\n self.sim_steps = get_sim_steps(time_ps, units)\n\n def prime(self, callback):\n \"\"\"Register for a timed callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_timed_callback(self.sim_steps,\n callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%1.2fps)\" % get_time_from_sim_steps(self.sim_steps,units='ps')\n\nclass _ReadOnly(GPITrigger):\n \"\"\"\n Execution will resume when the readonly portion of the sim cycles is\n readched\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_readonly_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readonly)\"\n\n_ro = _ReadOnly()\n\n\ndef ReadOnly():\n return _ro\n\n\nclass _ReadWrite(GPITrigger):\n \"\"\"\n Execution will resume when the readwrite portion of the sim cycles is\n reached\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n # import pdb\n # pdb.set_trace()\n self.cbhdl = simulator.register_rwsynch_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readwritesync)\"\n\n_rw = _ReadWrite()\n\n\ndef ReadWrite():\n return _rw\n\n\nclass _NextTimeStep(GPITrigger):\n \"\"\"\n Execution will resume when the next time step is started\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_nextstep_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(nexttimestep)\"\n\n_nxts = _NextTimeStep()\n\n\ndef NextTimeStep():\n return _nxts\n\n\nclass _EdgeBase(GPITrigger):\n \"\"\"\n Execution will resume when an edge occurs on the provided signal\n \"\"\"\n @classmethod\n @property\n def _edge_type(self):\n \"\"\"\n The edge type, as understood by the C code. Must be set in subclasses\n \"\"\"\n raise NotImplementedError\n\n # Ensure that each signal has at most one edge trigger per edge type.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, signal):\n # find the existing instance, if possible - else create a new one\n key = (signal, cls._edge_type)\n try:\n return cls._instances[key]\n except KeyError:\n instance = super(_EdgeBase, cls).__new__(cls)\n cls._instances[key] = instance\n return instance\n\n def __init__(self, signal):\n super(_EdgeBase, self).__init__()\n self.signal = signal\n\n def prime(self, callback):\n \"\"\"Register notification of a value change via a callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_value_change_callback(\n self.signal._handle, callback, type(self)._edge_type, self\n )\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n super(_EdgeBase, self).prime()\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass RisingEdge(_EdgeBase):\n \"\"\" Triggers on the rising edge of the provided signal \"\"\"\n _edge_type = 1\n\n\nclass FallingEdge(_EdgeBase):\n \"\"\" Triggers on the falling edge of the provided signal \"\"\"\n _edge_type = 2\n\n\nclass Edge(_EdgeBase):\n \"\"\" Triggers on either edge in a signal \"\"\"\n _edge_type = 3\n\n\nclass ClockCycles(GPITrigger):\n \"\"\"\n Execution will resume after N rising edges or N falling edges\n \"\"\"\n def __init__(self, signal, num_cycles, rising=True):\n super(ClockCycles, self).__init__()\n self.signal = signal\n self.num_cycles = num_cycles\n if rising is True:\n self._rising = 1\n else:\n self._rising = 2\n\n def prime(self, callback):\n self._callback = callback\n\n def _check(obj):\n self.unprime()\n\n if self.signal.value:\n self.num_cycles -= 1\n\n if self.num_cycles <= 0:\n self._callback(self)\n return\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass Combine(PythonTrigger):\n \"\"\"\n Combines multiple triggers together. Coroutine will continue when all\n triggers have fired\n \"\"\"\n\n def __init__(self, *args):\n PythonTrigger.__init__(self)\n self._triggers = args\n # TODO: check that trigger is an iterable containing\n # only Trigger objects\n try:\n for trigger in self._triggers:\n if not isinstance(trigger, Trigger):\n raise TriggerException(\"All combined triggers must be \"\n \"instances of Trigger! Got: %s\" %\n trigger.__class__.__name__)\n except Exception:\n raise TriggerException(\"%s requires a list of Trigger objects\" %\n self.__class__.__name__)\n\n def prime(self, callback):\n self._callback = callback\n self._fired = []\n for trigger in self._triggers:\n trigger.prime(self._check_all_fired)\n Trigger.prime(self)\n\n def _check_all_fired(self, trigger):\n self._fired.append(trigger)\n if self._fired == self._triggers:\n self._callback(self)\n\n def unprime(self):\n for trigger in self._triggers:\n trigger.unprime()\n\n\nclass _Event(PythonTrigger):\n \"\"\"\n Unique instance used by the Event object.\n\n One created for each attempt to wait on the event so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Event(PythonTrigger):\n \"\"\"\n Event to permit synchronisation between two coroutines\n \"\"\"\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending = []\n self.name = name\n self.fired = False\n self.data = None\n\n def prime(self, callback, trigger):\n self._pending.append(trigger)\n Trigger.prime(self)\n\n def set(self, data=None):\n \"\"\"Wake up any coroutines blocked on this event\"\"\"\n self.fired = True\n self.data = data\n\n p = self._pending[:]\n\n self._pending = []\n\n for trigger in p:\n trigger()\n\n def wait(self):\n \"\"\"This can be yielded to block this coroutine\n until another wakes it\n\n If the Event has already been fired, this returns NullTrigger()\n To reset the event (and enable the use of wait() again), clear() should be called\n \"\"\"\n if self.fired:\n return NullTrigger()\n return _Event(self)\n\n def clear(self):\n \"\"\"Clear this event that's fired.\n\n Subsequent calls to wait will block until set() is called again\"\"\"\n self.fired = False\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.name\n\n\nclass _Lock(PythonTrigger):\n \"\"\"\n Unique instance used by the Lock object.\n\n One created for each attempt to acquire the Lock so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Lock(PythonTrigger):\n \"\"\"\n Lock primitive (not re-entrant)\n \"\"\"\n\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending_unprimed = []\n self._pending_primed = []\n self.name = name\n self.locked = False\n\n def prime(self, callback, trigger):\n Trigger.prime(self)\n\n self._pending_unprimed.remove(trigger)\n\n if not self.locked:\n self.locked = True\n callback(trigger)\n else:\n self._pending_primed.append(trigger)\n\n def acquire(self):\n \"\"\"This can be yielded to block until the lock is acquired\"\"\"\n trig = _Lock(self)\n self._pending_unprimed.append(trig)\n return trig\n\n def release(self):\n\n if not self.locked:\n raise_error(self, \"Attempt to release an unacquired Lock %s\" %\n (str(self)))\n\n self.locked = False\n\n # nobody waiting for this lock\n if not self._pending_primed:\n return\n\n trigger = self._pending_primed.pop(0)\n self.locked = True\n trigger()\n\n def __str__(self):\n return \"%s(%s) [%s waiting]\" % (str(self.__class__.__name__),\n self.name,\n len(self._pending_primed))\n\n def __nonzero__(self):\n \"\"\"Provide boolean of a Lock\"\"\"\n return self.locked\n\n __bool__ = __nonzero__\n\n\nclass NullTrigger(Trigger):\n \"\"\"\n Trigger for internal interfacing use call the callback as soon\n as it is primed and then remove it's self from the scheduler\n \"\"\"\n def __init__(self, name=\"\"):\n Trigger.__init__(self)\n self._callback = None\n self.name = name\n\n def prime(self, callback):\n callback(self)\n\n\nclass Join(PythonTrigger):\n \"\"\"\n Join a coroutine, firing when it exits\n \"\"\"\n # Ensure that each coroutine has at most one join trigger.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, coroutine):\n # find the existing instance, if possible - else create a new one\n try:\n return cls._instances[coroutine]\n except KeyError:\n instance = super(Join, cls).__new__(cls)\n cls._instances[coroutine] = instance\n return instance\n\n def __init__(self, coroutine):\n super(Join, self).__init__()\n self._coroutine = coroutine\n self.pass_retval = True\n\n @property\n def retval(self):\n return self._coroutine.retval\n\n def prime(self, callback):\n if self._coroutine._finished:\n callback(self)\n else:\n super(Join, self).prime(callback)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self._coroutine.__name__\n", "path": "cocotb/triggers.py"}]} |
gh_patches_debug_1424 | rasdani/github-patches | git_diff | e-valuation__EvaP-728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warning in courses with small number of participants
In courses with 5 or less participants a warning should be shown above the course's questionnaire:
_This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/student/views.py`
Content:
```
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.db import transaction
4 from django.shortcuts import get_object_or_404, redirect, render
5 from django.utils.translation import ugettext as _
6
7 from evap.evaluation.auth import participant_required
8 from evap.evaluation.models import Course, Semester
9 from evap.evaluation.tools import STUDENT_STATES_ORDERED
10
11 from evap.student.forms import QuestionsForm
12 from evap.student.tools import make_form_identifier
13
14 from collections import OrderedDict
15
16 @participant_required
17 def index(request):
18 # retrieve all courses, where the user is a participant and that are not new
19 courses = list(set(Course.objects.filter(participants=request.user).exclude(state="new")))
20 voted_courses = list(set(Course.objects.filter(voters=request.user)))
21 due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))
22
23 sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)
24 courses.sort(key=sorter)
25
26 semesters = Semester.objects.all()
27 semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]
28
29 template_data = dict(
30 semester_list=semester_list,
31 voted_courses=voted_courses,
32 due_courses=due_courses,
33 can_download_grades=request.user.can_download_grades,
34 )
35 return render(request, "student_index.html", template_data)
36
37
38 def vote_preview(request, course):
39 """
40 Renders a preview of the voting page for the given course.
41 Not used by the student app itself, but by staff and contributor.
42 """
43 form_groups = helper_create_voting_form_groups(request, course.contributions.all())
44 course_form_group = form_groups.pop(course.general_contribution)
45 contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())
46
47 template_data = dict(
48 errors_exist=False,
49 course_form_group=course_form_group,
50 contributor_form_groups=contributor_form_groups,
51 course=course,
52 preview=True)
53 return render(request, "student_vote.html", template_data)
54
55
56 @participant_required
57 def vote(request, course_id):
58 # retrieve course and make sure that the user is allowed to vote
59 course = get_object_or_404(Course, id=course_id)
60 if not course.can_user_vote(request.user):
61 raise PermissionDenied
62
63 # prevent a user from voting on themselves.
64 contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()
65 form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)
66
67 if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):
68 errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())
69
70 course_form_group = form_groups.pop(course.general_contribution)
71
72 contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())
73
74 template_data = dict(
75 errors_exist=errors_exist,
76 course_form_group=course_form_group,
77 contributor_form_groups=contributor_form_groups,
78 course=course,
79 preview=False)
80 return render(request, "student_vote.html", template_data)
81
82 # all forms are valid, begin vote operation
83 with transaction.atomic():
84 for contribution, form_group in form_groups.items():
85 for questionnaire_form in form_group:
86 questionnaire = questionnaire_form.questionnaire
87 for question in questionnaire.question_set.all():
88 identifier = make_form_identifier(contribution, questionnaire, question)
89 value = questionnaire_form.cleaned_data.get(identifier)
90
91 if question.is_text_question:
92 if value:
93 question.answer_class.objects.create(
94 contribution=contribution,
95 question=question,
96 answer=value)
97 else:
98 if value != 6:
99 answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)
100 answer_counter.add_vote()
101 answer_counter.save()
102
103 # remember that the user voted already
104 course.voters.add(request.user)
105
106 course.was_evaluated(request)
107
108 messages.success(request, _("Your vote was recorded."))
109 return redirect('student:index')
110
111
112 def helper_create_form_group(request, contribution):
113 return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())
114
115 def helper_create_voting_form_groups(request, contributions):
116 form_groups = OrderedDict()
117 for contribution in contributions:
118 form_groups[contribution] = helper_create_form_group(request, contribution)
119 return form_groups
120
121 def helper_has_errors(form_group):
122 return any(form.errors for form in form_group)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/student/views.py b/evap/student/views.py
--- a/evap/student/views.py
+++ b/evap/student/views.py
@@ -76,6 +76,7 @@
course_form_group=course_form_group,
contributor_form_groups=contributor_form_groups,
course=course,
+ participants_warning=course.num_participants <= 5,
preview=False)
return render(request, "student_vote.html", template_data)
| {"golden_diff": "diff --git a/evap/student/views.py b/evap/student/views.py\n--- a/evap/student/views.py\n+++ b/evap/student/views.py\n@@ -76,6 +76,7 @@\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n+ participants_warning=course.num_participants <= 5,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n", "issue": "Warning in courses with small number of participants\nIn courses with 5 or less participants a warning should be shown above the course's questionnaire:\n\n_This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published._\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import participant_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import STUDENT_STATES_ORDERED\n\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom collections import OrderedDict\n\n@participant_required\ndef index(request):\n # retrieve all courses, where the user is a participant and that are not new\n courses = list(set(Course.objects.filter(participants=request.user).exclude(state=\"new\")))\n voted_courses = list(set(Course.objects.filter(voters=request.user)))\n due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))\n\n sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)\n courses.sort(key=sorter)\n\n semesters = Semester.objects.all()\n semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n voted_courses=voted_courses,\n due_courses=due_courses,\n can_download_grades=request.user.can_download_grades,\n )\n return render(request, \"student_index.html\", template_data)\n\n\ndef vote_preview(request, course):\n \"\"\"\n Renders a preview of the voting page for the given course.\n Not used by the student app itself, but by staff and contributor.\n \"\"\"\n form_groups = helper_create_voting_form_groups(request, course.contributions.all())\n course_form_group = form_groups.pop(course.general_contribution)\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=False,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=True)\n return render(request, \"student_vote.html\", template_data)\n\n\n@participant_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # prevent a user from voting on themselves.\n contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()\n form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)\n\n if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):\n errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())\n\n course_form_group = form_groups.pop(course.general_contribution)\n\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=errors_exist,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n\n # all forms are valid, begin vote operation\n with transaction.atomic():\n for contribution, form_group in form_groups.items():\n for questionnaire_form in form_group:\n questionnaire = questionnaire_form.questionnaire\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = questionnaire_form.cleaned_data.get(identifier)\n\n if question.is_text_question:\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n else:\n if value != 6:\n answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)\n answer_counter.add_vote()\n answer_counter.save()\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n course.was_evaluated(request)\n\n messages.success(request, _(\"Your vote was recorded.\"))\n return redirect('student:index')\n\n\ndef helper_create_form_group(request, contribution):\n return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())\n\ndef helper_create_voting_form_groups(request, contributions):\n form_groups = OrderedDict()\n for contribution in contributions:\n form_groups[contribution] = helper_create_form_group(request, contribution)\n return form_groups\n\ndef helper_has_errors(form_group):\n return any(form.errors for form in form_group)\n", "path": "evap/student/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import participant_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import STUDENT_STATES_ORDERED\n\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom collections import OrderedDict\n\n@participant_required\ndef index(request):\n # retrieve all courses, where the user is a participant and that are not new\n courses = list(set(Course.objects.filter(participants=request.user).exclude(state=\"new\")))\n voted_courses = list(set(Course.objects.filter(voters=request.user)))\n due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))\n\n sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)\n courses.sort(key=sorter)\n\n semesters = Semester.objects.all()\n semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n voted_courses=voted_courses,\n due_courses=due_courses,\n can_download_grades=request.user.can_download_grades,\n )\n return render(request, \"student_index.html\", template_data)\n\n\ndef vote_preview(request, course):\n \"\"\"\n Renders a preview of the voting page for the given course.\n Not used by the student app itself, but by staff and contributor.\n \"\"\"\n form_groups = helper_create_voting_form_groups(request, course.contributions.all())\n course_form_group = form_groups.pop(course.general_contribution)\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=False,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=True)\n return render(request, \"student_vote.html\", template_data)\n\n\n@participant_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # prevent a user from voting on themselves.\n contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()\n form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)\n\n if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):\n errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())\n\n course_form_group = form_groups.pop(course.general_contribution)\n\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=errors_exist,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n participants_warning=course.num_participants <= 5,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n\n # all forms are valid, begin vote operation\n with transaction.atomic():\n for contribution, form_group in form_groups.items():\n for questionnaire_form in form_group:\n questionnaire = questionnaire_form.questionnaire\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = questionnaire_form.cleaned_data.get(identifier)\n\n if question.is_text_question:\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n else:\n if value != 6:\n answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)\n answer_counter.add_vote()\n answer_counter.save()\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n course.was_evaluated(request)\n\n messages.success(request, _(\"Your vote was recorded.\"))\n return redirect('student:index')\n\n\ndef helper_create_form_group(request, contribution):\n return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())\n\ndef helper_create_voting_form_groups(request, contributions):\n form_groups = OrderedDict()\n for contribution in contributions:\n form_groups[contribution] = helper_create_form_group(request, contribution)\n return form_groups\n\ndef helper_has_errors(form_group):\n return any(form.errors for form in form_group)\n", "path": "evap/student/views.py"}]} |
gh_patches_debug_1425 | rasdani/github-patches | git_diff | WordPress__openverse-api-723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Swagger/ReDoc page raises an error
## Description
<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->
<!-- For example: "I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page." -->
While deploying [v2.5.2](https://github.com/WordPress/openverse-api/releases/tag/v2.5.2) to staging, we noticed that the API documentation page failed to render and caused this error:
```
[2022-05-25 17:02:32,253 - django.request - 241][ERROR] Internal Server Error: /v1/
Traceback (most recent call last):
File "/venv/lib/python3.10/site-packages/drf_yasg/openapi.py", line 110, in __getattr__
return self[make_swagger_name(item)]
KeyError: 'name'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 55, in inner
response = get_response(request)
File "/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 197, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/venv/lib/python3.10/site-packages/sentry_sdk/integrations/django/views.py", line 67, in sentry_wrapped_callback
return callback(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/drf_yasg/views.py", line 34, in _wrapped_view_func
response = view_func(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/utils/decorators.py", line 133, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/views/decorators/vary.py", line 21, in inner_func
response = func(*args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/views/generic/base.py", line 84, in view
return self.dispatch(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 509, in dispatch
response = self.handle_exception(exc)
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 469, in handle_exception
self.raise_uncaught_exception(exc)
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
raise exc
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/drf_yasg/views.py", line 94, in get
schema = generator.get_schema(request, self.public)
File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 246, in get_schema
paths, prefix = self.get_paths(endpoints, components, request, public)
File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 404, in get_paths
operation = self.get_operation(view, path, prefix, method, components, request)
File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 446, in get_operation
operation = view_inspector.get_operation(operation_keys)
File "/api/catalog/custom_auto_schema.py", line 14, in get_operation
query = self.get_query_parameters()
File "/venv/lib/python3.10/site-packages/drf_yasg/inspectors/view.py", line 298, in get_query_parameters
if len(set(param_list_to_odict(natural_parameters)) & set(param_list_to_odict(serializer_parameters))) != 0:
File "/venv/lib/python3.10/site-packages/drf_yasg/utils.py", line 266, in param_list_to_odict
result = OrderedDict(((param.name, param.in_), param) for param in parameters)
File "/venv/lib/python3.10/site-packages/drf_yasg/utils.py", line 266, in <genexpr>
result = OrderedDict(((param.name, param.in_), param) for param in parameters)
File "/venv/lib/python3.10/site-packages/drf_yasg/openapi.py", line 113, in __getattr__
raise AttributeError("object of class " + type(self).__name__ + " has no attribute " + item)
AttributeError: object of class Parameter has no attribute name
```
Here's the error the page presents:
```
Something went wrong...
Error downloading http://localhost:8000/v1/?format=openapi HTTP ERROR 500
Stack trace
s/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651
read/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:36080
s/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651
read/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:35658
ReDoc Version: 2.0.0-rc.40
Commit: 17b9873
```
## Reproduction
<!-- Provide detailed steps to reproduce the bug. -->
1. `git checkout v2.5.2`
2. `just build`
3. `just recreate && just init`
4. Visit localhost:8000 and observe error
## Additional context
<!-- Add any other context about the problem here; or delete the section entirely. -->
Sentry issue: https://sentry.io/share/issue/83044216200d47538f3733a16df46adc/
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/custom_auto_schema.py`
Content:
```
1 from drf_yasg import openapi
2 from drf_yasg.inspectors import SwaggerAutoSchema
3 from drf_yasg.utils import filter_none, force_real_str
4
5
6 class CustomAutoSchema(SwaggerAutoSchema):
7 def get_operation(self, operation_keys=None):
8 operation_keys = operation_keys or self.operation_keys
9
10 consumes = self.get_consumes()
11 produces = self.get_produces()
12
13 body = self.get_request_body_parameters(consumes)
14 query = self.get_query_parameters()
15 parameters = body + query
16 parameters = filter_none(parameters)
17 parameters = self.add_manual_parameters(parameters)
18
19 operation_id = self.get_operation_id(operation_keys)
20 summary, description = self.get_summary_and_description()
21 security = self.get_security()
22 assert security is None or isinstance(
23 security, list
24 ), "security must be a list of security requirement objects"
25 deprecated = self.is_deprecated()
26 tags = self.get_tags(operation_keys)
27
28 responses = self.get_responses()
29
30 return openapi.Operation(
31 operation_id=operation_id,
32 description=force_real_str(description),
33 summary=force_real_str(summary),
34 responses=responses,
35 parameters=parameters,
36 consumes=consumes,
37 produces=produces,
38 tags=tags,
39 security=security,
40 deprecated=deprecated,
41 **{"x-code-samples": self.overrides.get("code_examples")}
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/catalog/custom_auto_schema.py b/api/catalog/custom_auto_schema.py
--- a/api/catalog/custom_auto_schema.py
+++ b/api/catalog/custom_auto_schema.py
@@ -4,6 +4,14 @@
class CustomAutoSchema(SwaggerAutoSchema):
+ def get_pagination_parameters(self):
+ """
+ Since the pagination params are a part of the ``MediaSearchRequestSerializer``,
+ they need not be added again as pagination params.
+ """
+
+ return []
+
def get_operation(self, operation_keys=None):
operation_keys = operation_keys or self.operation_keys
| {"golden_diff": "diff --git a/api/catalog/custom_auto_schema.py b/api/catalog/custom_auto_schema.py\n--- a/api/catalog/custom_auto_schema.py\n+++ b/api/catalog/custom_auto_schema.py\n@@ -4,6 +4,14 @@\n \n \n class CustomAutoSchema(SwaggerAutoSchema):\n+ def get_pagination_parameters(self):\n+ \"\"\"\n+ Since the pagination params are a part of the ``MediaSearchRequestSerializer``,\n+ they need not be added again as pagination params.\n+ \"\"\"\n+\n+ return []\n+\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n", "issue": "Swagger/ReDoc page raises an error\n## Description\n<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->\n<!-- For example: \"I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page.\" -->\nWhile deploying [v2.5.2](https://github.com/WordPress/openverse-api/releases/tag/v2.5.2) to staging, we noticed that the API documentation page failed to render and caused this error:\n\n```\n[2022-05-25 17:02:32,253 - django.request - 241][ERROR] Internal Server Error: /v1/\nTraceback (most recent call last):\n File \"/venv/lib/python3.10/site-packages/drf_yasg/openapi.py\", line 110, in __getattr__\n return self[make_swagger_name(item)]\nKeyError: 'name'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/venv/lib/python3.10/site-packages/django/core/handlers/exception.py\", line 55, in inner\n response = get_response(request)\n File \"/venv/lib/python3.10/site-packages/django/core/handlers/base.py\", line 197, in _get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"/venv/lib/python3.10/site-packages/sentry_sdk/integrations/django/views.py\", line 67, in sentry_wrapped_callback\n return callback(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/views.py\", line 34, in _wrapped_view_func\n response = view_func(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/utils/decorators.py\", line 133, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/views/decorators/vary.py\", line 21, in inner_func\n response = func(*args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/views/decorators/csrf.py\", line 54, in wrapped_view\n return view_func(*args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/views/generic/base.py\", line 84, in view\n return self.dispatch(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 509, in dispatch\n response = self.handle_exception(exc)\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 469, in handle_exception\n self.raise_uncaught_exception(exc)\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 480, in raise_uncaught_exception\n raise exc\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 506, in dispatch\n response = handler(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/views.py\", line 94, in get\n schema = generator.get_schema(request, self.public)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/generators.py\", line 246, in get_schema\n paths, prefix = self.get_paths(endpoints, components, request, public)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/generators.py\", line 404, in get_paths\n operation = self.get_operation(view, path, prefix, method, components, request)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/generators.py\", line 446, in get_operation\n operation = view_inspector.get_operation(operation_keys)\n File \"/api/catalog/custom_auto_schema.py\", line 14, in get_operation\n query = self.get_query_parameters()\n File \"/venv/lib/python3.10/site-packages/drf_yasg/inspectors/view.py\", line 298, in get_query_parameters\n if len(set(param_list_to_odict(natural_parameters)) & set(param_list_to_odict(serializer_parameters))) != 0:\n File \"/venv/lib/python3.10/site-packages/drf_yasg/utils.py\", line 266, in param_list_to_odict\n result = OrderedDict(((param.name, param.in_), param) for param in parameters)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/utils.py\", line 266, in <genexpr>\n result = OrderedDict(((param.name, param.in_), param) for param in parameters)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/openapi.py\", line 113, in __getattr__\n raise AttributeError(\"object of class \" + type(self).__name__ + \" has no attribute \" + item)\nAttributeError: object of class Parameter has no attribute name\n```\n\nHere's the error the page presents:\n```\nSomething went wrong...\nError downloading http://localhost:8000/v1/?format=openapi HTTP ERROR 500\n\nStack trace\n\ns/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651\nread/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:36080\n\n\ns/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651\nread/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:35658\n\nReDoc Version: 2.0.0-rc.40\nCommit: 17b9873\n```\n\n## Reproduction\n<!-- Provide detailed steps to reproduce the bug. -->\n1. `git checkout v2.5.2`\n2. `just build`\n3. `just recreate && just init`\n4. Visit localhost:8000 and observe error\n\n## Additional context\n<!-- Add any other context about the problem here; or delete the section entirely. -->\nSentry issue: https://sentry.io/share/issue/83044216200d47538f3733a16df46adc/\n\n## Resolution\n<!-- Replace the [ ] with [x] to check the box. -->\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\n\n", "before_files": [{"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema\nfrom drf_yasg.utils import filter_none, force_real_str\n\n\nclass CustomAutoSchema(SwaggerAutoSchema):\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n\n consumes = self.get_consumes()\n produces = self.get_produces()\n\n body = self.get_request_body_parameters(consumes)\n query = self.get_query_parameters()\n parameters = body + query\n parameters = filter_none(parameters)\n parameters = self.add_manual_parameters(parameters)\n\n operation_id = self.get_operation_id(operation_keys)\n summary, description = self.get_summary_and_description()\n security = self.get_security()\n assert security is None or isinstance(\n security, list\n ), \"security must be a list of security requirement objects\"\n deprecated = self.is_deprecated()\n tags = self.get_tags(operation_keys)\n\n responses = self.get_responses()\n\n return openapi.Operation(\n operation_id=operation_id,\n description=force_real_str(description),\n summary=force_real_str(summary),\n responses=responses,\n parameters=parameters,\n consumes=consumes,\n produces=produces,\n tags=tags,\n security=security,\n deprecated=deprecated,\n **{\"x-code-samples\": self.overrides.get(\"code_examples\")}\n )\n", "path": "api/catalog/custom_auto_schema.py"}], "after_files": [{"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema\nfrom drf_yasg.utils import filter_none, force_real_str\n\n\nclass CustomAutoSchema(SwaggerAutoSchema):\n def get_pagination_parameters(self):\n \"\"\"\n Since the pagination params are a part of the ``MediaSearchRequestSerializer``,\n they need not be added again as pagination params.\n \"\"\"\n\n return []\n\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n\n consumes = self.get_consumes()\n produces = self.get_produces()\n\n body = self.get_request_body_parameters(consumes)\n query = self.get_query_parameters()\n parameters = body + query\n parameters = filter_none(parameters)\n parameters = self.add_manual_parameters(parameters)\n\n operation_id = self.get_operation_id(operation_keys)\n summary, description = self.get_summary_and_description()\n security = self.get_security()\n assert security is None or isinstance(\n security, list\n ), \"security must be a list of security requirement objects\"\n deprecated = self.is_deprecated()\n tags = self.get_tags(operation_keys)\n\n responses = self.get_responses()\n\n return openapi.Operation(\n operation_id=operation_id,\n description=force_real_str(description),\n summary=force_real_str(summary),\n responses=responses,\n parameters=parameters,\n consumes=consumes,\n produces=produces,\n tags=tags,\n security=security,\n deprecated=deprecated,\n **{\"x-code-samples\": self.overrides.get(\"code_examples\")}\n )\n", "path": "api/catalog/custom_auto_schema.py"}]} |
gh_patches_debug_1426 | rasdani/github-patches | git_diff | xorbitsai__inference-758 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG:vllm bug
### Describe the bug
KeyError: [address=172.22.227.26:33767, pid=20969] 'stream'
### To Reproduce
To help us to reproduce this bug, please provide information below:
```
Traceback (most recent call last):
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/api/restful_api.py", line 824, in create_chat_completion
data = await model.chat(prompt, system_prompt, chat_history, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/context.py", line 227, in send
return self._process_result_message(result)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/context.py", line 102, in _process_result_message
raise message.as_instanceof_cause()
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/pool.py", line 657, in send
result = await self._run_coro(message.message_id, coro)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/pool.py", line 368, in _run_coro
return await coro
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/api.py", line 306, in __on_receive__
return await super().__on_receive__(message) # type: ignore
^^^^^^^^^^^^^^^^^
File "xoscar/core.pyx", line 558, in __on_receive__
raise ex
File "xoscar/core.pyx", line 520, in xoscar.core._BaseActor.__on_receive__
async with self._lock:
^^^^^^^^^^^^^^^^^
File "xoscar/core.pyx", line 521, in xoscar.core._BaseActor.__on_receive__
with debug_async_timeout('actor_lock_timeout',
^^^^^^^^^^^^^^^^^
File "xoscar/core.pyx", line 526, in xoscar.core._BaseActor.__on_receive__
result = await result
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/utils.py", line 33, in wrapped
ret = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 77, in wrapped_func
ret = await fn(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 272, in chat
return await self._call_async_wrapper(_async_wrapper)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 223, in _call_async_wrapper
return await asyncio.create_task(_wrapper())
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 268, in _async_wrapper
await getattr(self._model, "async_chat")(prompt, *args, **kwargs)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/model/llm/vllm/core.py", line 348, in async_chat
stream = sanitized["stream"]
^^^^^^^^^^^^^^^^^
KeyError: [address=172.22.227.26:33767, pid=20969] 'stream'
```
1. Your Python version.
2. The version of xinference you use.
3. Versions of crucial packages.
4. Full stack of the error.
5. Minimized code to reproduce the error.
### Expected behavior
A clear and concise description of what you expected to happen.
### Additional context
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xinference/model/llm/vllm/core.py`
Content:
```
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import time
17 import uuid
18 from typing import TYPE_CHECKING, AsyncGenerator, Dict, List, Optional, TypedDict, Union
19
20 from ....constants import XINFERENCE_DISABLE_VLLM
21 from ....types import (
22 ChatCompletion,
23 ChatCompletionChunk,
24 ChatCompletionMessage,
25 Completion,
26 CompletionChoice,
27 CompletionChunk,
28 CompletionUsage,
29 )
30 from .. import LLM, LLMFamilyV1, LLMSpecV1
31 from ..utils import ChatModelMixin
32
33 logger = logging.getLogger(__name__)
34
35 if TYPE_CHECKING:
36 from vllm.outputs import RequestOutput
37
38
39 class VLLMModelConfig(TypedDict, total=False):
40 tokenizer_mode: Optional[str]
41 trust_remote_code: bool
42 tensor_parallel_size: int
43 block_size: int
44 swap_space: int # GiB
45 gpu_memory_utilization: float
46 max_num_batched_tokens: int
47 max_num_seqs: int
48 quantization: Optional[str]
49
50
51 class VLLMGenerateConfig(TypedDict, total=False):
52 n: int
53 best_of: Optional[int]
54 presence_penalty: float
55 frequency_penalty: float
56 temperature: float
57 top_p: float
58 max_tokens: int
59 stop_token_ids: Optional[List[int]]
60 stop: Optional[Union[str, List[str]]]
61 stream: bool # non-sampling param, should not be passed to the engine.
62
63
64 try:
65 import vllm # noqa: F401
66
67 VLLM_INSTALLED = True
68 except ImportError:
69 VLLM_INSTALLED = False
70
71 VLLM_SUPPORTED_MODELS = ["llama-2", "baichuan", "internlm-16k", "mistral-v0.1"]
72 VLLM_SUPPORTED_CHAT_MODELS = [
73 "llama-2-chat",
74 "vicuna-v1.3",
75 "vicuna-v1.5",
76 "baichuan-chat",
77 "internlm-chat-7b",
78 "internlm-chat-8k",
79 "internlm-chat-20b",
80 "qwen-chat",
81 "Yi",
82 "Yi-chat",
83 "code-llama",
84 "code-llama-python",
85 "code-llama-instruct",
86 "mistral-instruct-v0.1",
87 "chatglm3",
88 ]
89
90
91 class VLLMModel(LLM):
92 def __init__(
93 self,
94 model_uid: str,
95 model_family: "LLMFamilyV1",
96 model_spec: "LLMSpecV1",
97 quantization: str,
98 model_path: str,
99 model_config: Optional[VLLMModelConfig],
100 ):
101 super().__init__(model_uid, model_family, model_spec, quantization, model_path)
102 self._model_config = model_config
103 self._engine = None
104
105 def load(self):
106 try:
107 from vllm.engine.arg_utils import AsyncEngineArgs
108 from vllm.engine.async_llm_engine import AsyncLLMEngine
109 except ImportError:
110 error_message = "Failed to import module 'vllm'"
111 installation_guide = [
112 "Please make sure 'vllm' is installed. ",
113 "You can install it by `pip install vllm`\n",
114 ]
115
116 raise ImportError(f"{error_message}\n\n{''.join(installation_guide)}")
117
118 self._model_config = self._sanitize_model_config(self._model_config)
119 logger.info(
120 f"Loading {self.model_uid} with following model config: {self._model_config}"
121 )
122
123 engine_args = AsyncEngineArgs(model=self.model_path, **self._model_config)
124 self._engine = AsyncLLMEngine.from_engine_args(engine_args)
125
126 def _sanitize_model_config(
127 self, model_config: Optional[VLLMModelConfig]
128 ) -> VLLMModelConfig:
129 if model_config is None:
130 model_config = VLLMModelConfig()
131
132 cuda_count = self._get_cuda_count()
133
134 model_config.setdefault("tokenizer_mode", "auto")
135 model_config.setdefault("trust_remote_code", True)
136 model_config.setdefault("tensor_parallel_size", cuda_count)
137 model_config.setdefault("block_size", 16)
138 model_config.setdefault("swap_space", 4)
139 model_config.setdefault("gpu_memory_utilization", 0.90)
140 model_config.setdefault("max_num_seqs", 256)
141 model_config.setdefault("quantization", None)
142
143 return model_config
144
145 @staticmethod
146 def _sanitize_generate_config(
147 generate_config: Optional[Dict] = None,
148 ) -> VLLMGenerateConfig:
149 if not generate_config:
150 generate_config = {}
151
152 sanitized = VLLMGenerateConfig()
153 sanitized.setdefault("n", generate_config.get("n", 1))
154 sanitized.setdefault("best_of", generate_config.get("best_of", None))
155 sanitized.setdefault(
156 "presence_penalty", generate_config.get("presence_penalty", 0.0)
157 )
158 sanitized.setdefault(
159 "frequency_penalty", generate_config.get("frequency_penalty", 0.0)
160 )
161 sanitized.setdefault("temperature", generate_config.get("temperature", 1.0))
162 sanitized.setdefault("top_p", generate_config.get("top_p", 1.0))
163 sanitized.setdefault("max_tokens", generate_config.get("max_tokens", 16))
164 sanitized.setdefault("stop", generate_config.get("stop", None))
165 sanitized.setdefault(
166 "stop_token_ids", generate_config.get("stop_token_ids", None)
167 )
168 sanitized.setdefault("stream", generate_config.get("stream", None))
169
170 return sanitized
171
172 @classmethod
173 def match(
174 cls, llm_family: "LLMFamilyV1", llm_spec: "LLMSpecV1", quantization: str
175 ) -> bool:
176 if XINFERENCE_DISABLE_VLLM:
177 return False
178 if not cls._has_cuda_device():
179 return False
180 if not cls._is_linux():
181 return False
182 if quantization != "none":
183 return False
184 if llm_spec.model_format != "pytorch":
185 return False
186 if llm_family.model_name not in VLLM_SUPPORTED_MODELS:
187 return False
188 if "generate" not in llm_family.model_ability:
189 return False
190 return VLLM_INSTALLED
191
192 @staticmethod
193 def _convert_request_output_to_completion_chunk(
194 request_id: str, model: str, request_output: "RequestOutput"
195 ) -> CompletionChunk:
196 choices: List[CompletionChoice] = []
197 for output in request_output.outputs:
198 choices.append(
199 CompletionChoice(
200 text=output.text,
201 index=output.index,
202 logprobs=None, # TODO: support logprobs.
203 finish_reason=output.finish_reason,
204 )
205 )
206 return CompletionChunk(
207 id=request_id,
208 object="text_completion",
209 created=int(time.time()),
210 model=model,
211 choices=choices,
212 )
213
214 @staticmethod
215 def _convert_request_output_to_completion(
216 request_id: str, model: str, request_output: "RequestOutput"
217 ) -> Completion:
218 choices = []
219 for output in request_output.outputs:
220 choices.append(
221 CompletionChoice(
222 text=output.text,
223 index=output.index,
224 logprobs=None, # TODO: support logprobs.
225 finish_reason=output.finish_reason,
226 )
227 )
228
229 prompt_tokens = len(request_output.prompt_token_ids)
230 completion_tokens = sum(
231 len(output.token_ids) for output in request_output.outputs
232 )
233 usage = CompletionUsage(
234 prompt_tokens=prompt_tokens,
235 completion_tokens=completion_tokens,
236 total_tokens=prompt_tokens + completion_tokens,
237 )
238 return Completion(
239 id=request_id,
240 object="text_completion",
241 created=int(time.time()),
242 model=model,
243 choices=choices,
244 usage=usage,
245 )
246
247 async def async_generate(
248 self,
249 prompt: str,
250 generate_config: Optional[Dict] = None,
251 ) -> Union[Completion, AsyncGenerator[CompletionChunk, None]]:
252 try:
253 from vllm.sampling_params import SamplingParams
254 except ImportError:
255 error_message = "Failed to import module 'vllm'"
256 installation_guide = [
257 "Please make sure 'vllm' is installed. ",
258 "You can install it by `pip install vllm`\n",
259 ]
260
261 raise ImportError(f"{error_message}\n\n{''.join(installation_guide)}")
262
263 sanitized_generate_config = self._sanitize_generate_config(generate_config)
264 logger.debug(
265 "Enter generate, prompt: %s, generate config: %s", prompt, generate_config
266 )
267
268 stream = sanitized_generate_config.pop("stream")
269 sampling_params = SamplingParams(**sanitized_generate_config)
270 request_id = str(uuid.uuid1())
271
272 assert self._engine is not None
273 results_generator = self._engine.generate(prompt, sampling_params, request_id)
274
275 async def stream_results() -> AsyncGenerator[CompletionChunk, None]:
276 previous_texts = [""] * sanitized_generate_config["n"]
277 async for _request_output in results_generator:
278 chunk = self._convert_request_output_to_completion_chunk(
279 request_id=request_id,
280 model=self.model_uid,
281 request_output=_request_output,
282 )
283 for i, choice in enumerate(chunk["choices"]):
284 delta = choice["text"][len(previous_texts[i]) :]
285 previous_texts[i] = choice["text"]
286 choice["text"] = delta
287 yield chunk
288
289 if stream:
290 return stream_results()
291 else:
292 final_output = None
293 async for request_output in results_generator:
294 final_output = request_output
295
296 assert final_output is not None
297 return self._convert_request_output_to_completion(
298 request_id, model=self.model_uid, request_output=final_output
299 )
300
301
302 class VLLMChatModel(VLLMModel, ChatModelMixin):
303 @classmethod
304 def match(
305 cls, llm_family: "LLMFamilyV1", llm_spec: "LLMSpecV1", quantization: str
306 ) -> bool:
307 if XINFERENCE_DISABLE_VLLM:
308 return False
309 if quantization != "none":
310 return False
311 if llm_spec.model_format != "pytorch":
312 return False
313 if llm_family.model_name not in VLLM_SUPPORTED_CHAT_MODELS:
314 return False
315 if "chat" not in llm_family.model_ability:
316 return False
317 return VLLM_INSTALLED
318
319 def _sanitize_chat_config(
320 self,
321 generate_config: Optional[Dict] = None,
322 ) -> Dict:
323 if not generate_config:
324 generate_config = {}
325 if self.model_family.prompt_style:
326 if (
327 not generate_config.get("stop")
328 ) and self.model_family.prompt_style.stop:
329 generate_config["stop"] = self.model_family.prompt_style.stop.copy()
330 if self.model_family.prompt_style.stop_token_ids:
331 generate_config.setdefault(
332 "stop_token_ids",
333 self.model_family.prompt_style.stop_token_ids.copy(),
334 )
335 return generate_config
336
337 async def async_chat(
338 self,
339 prompt: str,
340 system_prompt: Optional[str] = None,
341 chat_history: Optional[List[ChatCompletionMessage]] = None,
342 generate_config: Optional[Dict] = None,
343 ) -> Union[ChatCompletion, AsyncGenerator[ChatCompletionChunk, None]]:
344 assert self.model_family.prompt_style is not None
345 prompt_style = self.model_family.prompt_style.copy()
346 if system_prompt:
347 prompt_style.system_prompt = system_prompt
348 chat_history = chat_history or []
349 full_prompt = self.get_prompt(prompt, chat_history, prompt_style)
350
351 sanitized = self._sanitize_chat_config(generate_config)
352 stream = sanitized["stream"]
353
354 if stream:
355 agen = await self.async_generate(full_prompt, sanitized)
356 assert isinstance(agen, AsyncGenerator)
357 return self._async_to_chat_completion_chunks(agen)
358 else:
359 c = await self.async_generate(full_prompt, sanitized)
360 assert not isinstance(c, AsyncGenerator)
361 return self._to_chat_completion(c)
362
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xinference/model/llm/vllm/core.py b/xinference/model/llm/vllm/core.py
--- a/xinference/model/llm/vllm/core.py
+++ b/xinference/model/llm/vllm/core.py
@@ -349,7 +349,7 @@
full_prompt = self.get_prompt(prompt, chat_history, prompt_style)
sanitized = self._sanitize_chat_config(generate_config)
- stream = sanitized["stream"]
+ stream = sanitized.get("stream", None)
if stream:
agen = await self.async_generate(full_prompt, sanitized)
| {"golden_diff": "diff --git a/xinference/model/llm/vllm/core.py b/xinference/model/llm/vllm/core.py\n--- a/xinference/model/llm/vllm/core.py\n+++ b/xinference/model/llm/vllm/core.py\n@@ -349,7 +349,7 @@\n full_prompt = self.get_prompt(prompt, chat_history, prompt_style)\n \n sanitized = self._sanitize_chat_config(generate_config)\n- stream = sanitized[\"stream\"]\n+ stream = sanitized.get(\"stream\", None)\n \n if stream:\n agen = await self.async_generate(full_prompt, sanitized)\n", "issue": "BUG:vllm bug\n### Describe the bug\r\nKeyError: [address=172.22.227.26:33767, pid=20969] 'stream'\r\n\r\n### To Reproduce\r\nTo help us to reproduce this bug, please provide information below:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/api/restful_api.py\", line 824, in create_chat_completion\r\n data = await model.chat(prompt, system_prompt, chat_history, kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/context.py\", line 227, in send\r\n return self._process_result_message(result)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/context.py\", line 102, in _process_result_message\r\n raise message.as_instanceof_cause()\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/pool.py\", line 657, in send\r\n result = await self._run_coro(message.message_id, coro)\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/pool.py\", line 368, in _run_coro\r\n return await coro\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/api.py\", line 306, in __on_receive__\r\n return await super().__on_receive__(message) # type: ignore\r\n ^^^^^^^^^^^^^^^^^\r\n File \"xoscar/core.pyx\", line 558, in __on_receive__\r\n raise ex\r\n File \"xoscar/core.pyx\", line 520, in xoscar.core._BaseActor.__on_receive__\r\n async with self._lock:\r\n ^^^^^^^^^^^^^^^^^\r\n File \"xoscar/core.pyx\", line 521, in xoscar.core._BaseActor.__on_receive__\r\n with debug_async_timeout('actor_lock_timeout',\r\n ^^^^^^^^^^^^^^^^^\r\n File \"xoscar/core.pyx\", line 526, in xoscar.core._BaseActor.__on_receive__\r\n result = await result\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/utils.py\", line 33, in wrapped\r\n ret = await func(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py\", line 77, in wrapped_func\r\n ret = await fn(self, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py\", line 272, in chat\r\n return await self._call_async_wrapper(_async_wrapper)\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py\", line 223, in _call_async_wrapper\r\n return await asyncio.create_task(_wrapper())\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py\", line 268, in _async_wrapper\r\n await getattr(self._model, \"async_chat\")(prompt, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/model/llm/vllm/core.py\", line 348, in async_chat\r\n stream = sanitized[\"stream\"]\r\n ^^^^^^^^^^^^^^^^^\r\nKeyError: [address=172.22.227.26:33767, pid=20969] 'stream'\r\n\r\n``` \r\n\r\n1. Your Python version.\r\n2. The version of xinference you use.\r\n3. Versions of crucial packages.\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n### Expected behavior\r\nA clear and concise description of what you expected to happen.\r\n\r\n### Additional context\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom typing import TYPE_CHECKING, AsyncGenerator, Dict, List, Optional, TypedDict, Union\n\nfrom ....constants import XINFERENCE_DISABLE_VLLM\nfrom ....types import (\n ChatCompletion,\n ChatCompletionChunk,\n ChatCompletionMessage,\n Completion,\n CompletionChoice,\n CompletionChunk,\n CompletionUsage,\n)\nfrom .. import LLM, LLMFamilyV1, LLMSpecV1\nfrom ..utils import ChatModelMixin\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from vllm.outputs import RequestOutput\n\n\nclass VLLMModelConfig(TypedDict, total=False):\n tokenizer_mode: Optional[str]\n trust_remote_code: bool\n tensor_parallel_size: int\n block_size: int\n swap_space: int # GiB\n gpu_memory_utilization: float\n max_num_batched_tokens: int\n max_num_seqs: int\n quantization: Optional[str]\n\n\nclass VLLMGenerateConfig(TypedDict, total=False):\n n: int\n best_of: Optional[int]\n presence_penalty: float\n frequency_penalty: float\n temperature: float\n top_p: float\n max_tokens: int\n stop_token_ids: Optional[List[int]]\n stop: Optional[Union[str, List[str]]]\n stream: bool # non-sampling param, should not be passed to the engine.\n\n\ntry:\n import vllm # noqa: F401\n\n VLLM_INSTALLED = True\nexcept ImportError:\n VLLM_INSTALLED = False\n\nVLLM_SUPPORTED_MODELS = [\"llama-2\", \"baichuan\", \"internlm-16k\", \"mistral-v0.1\"]\nVLLM_SUPPORTED_CHAT_MODELS = [\n \"llama-2-chat\",\n \"vicuna-v1.3\",\n \"vicuna-v1.5\",\n \"baichuan-chat\",\n \"internlm-chat-7b\",\n \"internlm-chat-8k\",\n \"internlm-chat-20b\",\n \"qwen-chat\",\n \"Yi\",\n \"Yi-chat\",\n \"code-llama\",\n \"code-llama-python\",\n \"code-llama-instruct\",\n \"mistral-instruct-v0.1\",\n \"chatglm3\",\n]\n\n\nclass VLLMModel(LLM):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n model_config: Optional[VLLMModelConfig],\n ):\n super().__init__(model_uid, model_family, model_spec, quantization, model_path)\n self._model_config = model_config\n self._engine = None\n\n def load(self):\n try:\n from vllm.engine.arg_utils import AsyncEngineArgs\n from vllm.engine.async_llm_engine import AsyncLLMEngine\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n self._model_config = self._sanitize_model_config(self._model_config)\n logger.info(\n f\"Loading {self.model_uid} with following model config: {self._model_config}\"\n )\n\n engine_args = AsyncEngineArgs(model=self.model_path, **self._model_config)\n self._engine = AsyncLLMEngine.from_engine_args(engine_args)\n\n def _sanitize_model_config(\n self, model_config: Optional[VLLMModelConfig]\n ) -> VLLMModelConfig:\n if model_config is None:\n model_config = VLLMModelConfig()\n\n cuda_count = self._get_cuda_count()\n\n model_config.setdefault(\"tokenizer_mode\", \"auto\")\n model_config.setdefault(\"trust_remote_code\", True)\n model_config.setdefault(\"tensor_parallel_size\", cuda_count)\n model_config.setdefault(\"block_size\", 16)\n model_config.setdefault(\"swap_space\", 4)\n model_config.setdefault(\"gpu_memory_utilization\", 0.90)\n model_config.setdefault(\"max_num_seqs\", 256)\n model_config.setdefault(\"quantization\", None)\n\n return model_config\n\n @staticmethod\n def _sanitize_generate_config(\n generate_config: Optional[Dict] = None,\n ) -> VLLMGenerateConfig:\n if not generate_config:\n generate_config = {}\n\n sanitized = VLLMGenerateConfig()\n sanitized.setdefault(\"n\", generate_config.get(\"n\", 1))\n sanitized.setdefault(\"best_of\", generate_config.get(\"best_of\", None))\n sanitized.setdefault(\n \"presence_penalty\", generate_config.get(\"presence_penalty\", 0.0)\n )\n sanitized.setdefault(\n \"frequency_penalty\", generate_config.get(\"frequency_penalty\", 0.0)\n )\n sanitized.setdefault(\"temperature\", generate_config.get(\"temperature\", 1.0))\n sanitized.setdefault(\"top_p\", generate_config.get(\"top_p\", 1.0))\n sanitized.setdefault(\"max_tokens\", generate_config.get(\"max_tokens\", 16))\n sanitized.setdefault(\"stop\", generate_config.get(\"stop\", None))\n sanitized.setdefault(\n \"stop_token_ids\", generate_config.get(\"stop_token_ids\", None)\n )\n sanitized.setdefault(\"stream\", generate_config.get(\"stream\", None))\n\n return sanitized\n\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if not cls._has_cuda_device():\n return False\n if not cls._is_linux():\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_MODELS:\n return False\n if \"generate\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n @staticmethod\n def _convert_request_output_to_completion_chunk(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> CompletionChunk:\n choices: List[CompletionChoice] = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n return CompletionChunk(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n )\n\n @staticmethod\n def _convert_request_output_to_completion(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> Completion:\n choices = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n\n prompt_tokens = len(request_output.prompt_token_ids)\n completion_tokens = sum(\n len(output.token_ids) for output in request_output.outputs\n )\n usage = CompletionUsage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n )\n return Completion(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n usage=usage,\n )\n\n async def async_generate(\n self,\n prompt: str,\n generate_config: Optional[Dict] = None,\n ) -> Union[Completion, AsyncGenerator[CompletionChunk, None]]:\n try:\n from vllm.sampling_params import SamplingParams\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n sanitized_generate_config = self._sanitize_generate_config(generate_config)\n logger.debug(\n \"Enter generate, prompt: %s, generate config: %s\", prompt, generate_config\n )\n\n stream = sanitized_generate_config.pop(\"stream\")\n sampling_params = SamplingParams(**sanitized_generate_config)\n request_id = str(uuid.uuid1())\n\n assert self._engine is not None\n results_generator = self._engine.generate(prompt, sampling_params, request_id)\n\n async def stream_results() -> AsyncGenerator[CompletionChunk, None]:\n previous_texts = [\"\"] * sanitized_generate_config[\"n\"]\n async for _request_output in results_generator:\n chunk = self._convert_request_output_to_completion_chunk(\n request_id=request_id,\n model=self.model_uid,\n request_output=_request_output,\n )\n for i, choice in enumerate(chunk[\"choices\"]):\n delta = choice[\"text\"][len(previous_texts[i]) :]\n previous_texts[i] = choice[\"text\"]\n choice[\"text\"] = delta\n yield chunk\n\n if stream:\n return stream_results()\n else:\n final_output = None\n async for request_output in results_generator:\n final_output = request_output\n\n assert final_output is not None\n return self._convert_request_output_to_completion(\n request_id, model=self.model_uid, request_output=final_output\n )\n\n\nclass VLLMChatModel(VLLMModel, ChatModelMixin):\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_CHAT_MODELS:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n def _sanitize_chat_config(\n self,\n generate_config: Optional[Dict] = None,\n ) -> Dict:\n if not generate_config:\n generate_config = {}\n if self.model_family.prompt_style:\n if (\n not generate_config.get(\"stop\")\n ) and self.model_family.prompt_style.stop:\n generate_config[\"stop\"] = self.model_family.prompt_style.stop.copy()\n if self.model_family.prompt_style.stop_token_ids:\n generate_config.setdefault(\n \"stop_token_ids\",\n self.model_family.prompt_style.stop_token_ids.copy(),\n )\n return generate_config\n\n async def async_chat(\n self,\n prompt: str,\n system_prompt: Optional[str] = None,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[Dict] = None,\n ) -> Union[ChatCompletion, AsyncGenerator[ChatCompletionChunk, None]]:\n assert self.model_family.prompt_style is not None\n prompt_style = self.model_family.prompt_style.copy()\n if system_prompt:\n prompt_style.system_prompt = system_prompt\n chat_history = chat_history or []\n full_prompt = self.get_prompt(prompt, chat_history, prompt_style)\n\n sanitized = self._sanitize_chat_config(generate_config)\n stream = sanitized[\"stream\"]\n\n if stream:\n agen = await self.async_generate(full_prompt, sanitized)\n assert isinstance(agen, AsyncGenerator)\n return self._async_to_chat_completion_chunks(agen)\n else:\n c = await self.async_generate(full_prompt, sanitized)\n assert not isinstance(c, AsyncGenerator)\n return self._to_chat_completion(c)\n", "path": "xinference/model/llm/vllm/core.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom typing import TYPE_CHECKING, AsyncGenerator, Dict, List, Optional, TypedDict, Union\n\nfrom ....constants import XINFERENCE_DISABLE_VLLM\nfrom ....types import (\n ChatCompletion,\n ChatCompletionChunk,\n ChatCompletionMessage,\n Completion,\n CompletionChoice,\n CompletionChunk,\n CompletionUsage,\n)\nfrom .. import LLM, LLMFamilyV1, LLMSpecV1\nfrom ..utils import ChatModelMixin\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from vllm.outputs import RequestOutput\n\n\nclass VLLMModelConfig(TypedDict, total=False):\n tokenizer_mode: Optional[str]\n trust_remote_code: bool\n tensor_parallel_size: int\n block_size: int\n swap_space: int # GiB\n gpu_memory_utilization: float\n max_num_batched_tokens: int\n max_num_seqs: int\n quantization: Optional[str]\n\n\nclass VLLMGenerateConfig(TypedDict, total=False):\n n: int\n best_of: Optional[int]\n presence_penalty: float\n frequency_penalty: float\n temperature: float\n top_p: float\n max_tokens: int\n stop_token_ids: Optional[List[int]]\n stop: Optional[Union[str, List[str]]]\n stream: bool # non-sampling param, should not be passed to the engine.\n\n\ntry:\n import vllm # noqa: F401\n\n VLLM_INSTALLED = True\nexcept ImportError:\n VLLM_INSTALLED = False\n\nVLLM_SUPPORTED_MODELS = [\"llama-2\", \"baichuan\", \"internlm-16k\", \"mistral-v0.1\"]\nVLLM_SUPPORTED_CHAT_MODELS = [\n \"llama-2-chat\",\n \"vicuna-v1.3\",\n \"vicuna-v1.5\",\n \"baichuan-chat\",\n \"internlm-chat-7b\",\n \"internlm-chat-8k\",\n \"internlm-chat-20b\",\n \"qwen-chat\",\n \"Yi\",\n \"Yi-chat\",\n \"code-llama\",\n \"code-llama-python\",\n \"code-llama-instruct\",\n \"mistral-instruct-v0.1\",\n \"chatglm3\",\n]\n\n\nclass VLLMModel(LLM):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n model_config: Optional[VLLMModelConfig],\n ):\n super().__init__(model_uid, model_family, model_spec, quantization, model_path)\n self._model_config = model_config\n self._engine = None\n\n def load(self):\n try:\n from vllm.engine.arg_utils import AsyncEngineArgs\n from vllm.engine.async_llm_engine import AsyncLLMEngine\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n self._model_config = self._sanitize_model_config(self._model_config)\n logger.info(\n f\"Loading {self.model_uid} with following model config: {self._model_config}\"\n )\n\n engine_args = AsyncEngineArgs(model=self.model_path, **self._model_config)\n self._engine = AsyncLLMEngine.from_engine_args(engine_args)\n\n def _sanitize_model_config(\n self, model_config: Optional[VLLMModelConfig]\n ) -> VLLMModelConfig:\n if model_config is None:\n model_config = VLLMModelConfig()\n\n cuda_count = self._get_cuda_count()\n\n model_config.setdefault(\"tokenizer_mode\", \"auto\")\n model_config.setdefault(\"trust_remote_code\", True)\n model_config.setdefault(\"tensor_parallel_size\", cuda_count)\n model_config.setdefault(\"block_size\", 16)\n model_config.setdefault(\"swap_space\", 4)\n model_config.setdefault(\"gpu_memory_utilization\", 0.90)\n model_config.setdefault(\"max_num_seqs\", 256)\n model_config.setdefault(\"quantization\", None)\n\n return model_config\n\n @staticmethod\n def _sanitize_generate_config(\n generate_config: Optional[Dict] = None,\n ) -> VLLMGenerateConfig:\n if not generate_config:\n generate_config = {}\n\n sanitized = VLLMGenerateConfig()\n sanitized.setdefault(\"n\", generate_config.get(\"n\", 1))\n sanitized.setdefault(\"best_of\", generate_config.get(\"best_of\", None))\n sanitized.setdefault(\n \"presence_penalty\", generate_config.get(\"presence_penalty\", 0.0)\n )\n sanitized.setdefault(\n \"frequency_penalty\", generate_config.get(\"frequency_penalty\", 0.0)\n )\n sanitized.setdefault(\"temperature\", generate_config.get(\"temperature\", 1.0))\n sanitized.setdefault(\"top_p\", generate_config.get(\"top_p\", 1.0))\n sanitized.setdefault(\"max_tokens\", generate_config.get(\"max_tokens\", 16))\n sanitized.setdefault(\"stop\", generate_config.get(\"stop\", None))\n sanitized.setdefault(\n \"stop_token_ids\", generate_config.get(\"stop_token_ids\", None)\n )\n sanitized.setdefault(\"stream\", generate_config.get(\"stream\", None))\n\n return sanitized\n\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if not cls._has_cuda_device():\n return False\n if not cls._is_linux():\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_MODELS:\n return False\n if \"generate\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n @staticmethod\n def _convert_request_output_to_completion_chunk(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> CompletionChunk:\n choices: List[CompletionChoice] = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n return CompletionChunk(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n )\n\n @staticmethod\n def _convert_request_output_to_completion(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> Completion:\n choices = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n\n prompt_tokens = len(request_output.prompt_token_ids)\n completion_tokens = sum(\n len(output.token_ids) for output in request_output.outputs\n )\n usage = CompletionUsage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n )\n return Completion(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n usage=usage,\n )\n\n async def async_generate(\n self,\n prompt: str,\n generate_config: Optional[Dict] = None,\n ) -> Union[Completion, AsyncGenerator[CompletionChunk, None]]:\n try:\n from vllm.sampling_params import SamplingParams\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n sanitized_generate_config = self._sanitize_generate_config(generate_config)\n logger.debug(\n \"Enter generate, prompt: %s, generate config: %s\", prompt, generate_config\n )\n\n stream = sanitized_generate_config.pop(\"stream\")\n sampling_params = SamplingParams(**sanitized_generate_config)\n request_id = str(uuid.uuid1())\n\n assert self._engine is not None\n results_generator = self._engine.generate(prompt, sampling_params, request_id)\n\n async def stream_results() -> AsyncGenerator[CompletionChunk, None]:\n previous_texts = [\"\"] * sanitized_generate_config[\"n\"]\n async for _request_output in results_generator:\n chunk = self._convert_request_output_to_completion_chunk(\n request_id=request_id,\n model=self.model_uid,\n request_output=_request_output,\n )\n for i, choice in enumerate(chunk[\"choices\"]):\n delta = choice[\"text\"][len(previous_texts[i]) :]\n previous_texts[i] = choice[\"text\"]\n choice[\"text\"] = delta\n yield chunk\n\n if stream:\n return stream_results()\n else:\n final_output = None\n async for request_output in results_generator:\n final_output = request_output\n\n assert final_output is not None\n return self._convert_request_output_to_completion(\n request_id, model=self.model_uid, request_output=final_output\n )\n\n\nclass VLLMChatModel(VLLMModel, ChatModelMixin):\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_CHAT_MODELS:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n def _sanitize_chat_config(\n self,\n generate_config: Optional[Dict] = None,\n ) -> Dict:\n if not generate_config:\n generate_config = {}\n if self.model_family.prompt_style:\n if (\n not generate_config.get(\"stop\")\n ) and self.model_family.prompt_style.stop:\n generate_config[\"stop\"] = self.model_family.prompt_style.stop.copy()\n if self.model_family.prompt_style.stop_token_ids:\n generate_config.setdefault(\n \"stop_token_ids\",\n self.model_family.prompt_style.stop_token_ids.copy(),\n )\n return generate_config\n\n async def async_chat(\n self,\n prompt: str,\n system_prompt: Optional[str] = None,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[Dict] = None,\n ) -> Union[ChatCompletion, AsyncGenerator[ChatCompletionChunk, None]]:\n assert self.model_family.prompt_style is not None\n prompt_style = self.model_family.prompt_style.copy()\n if system_prompt:\n prompt_style.system_prompt = system_prompt\n chat_history = chat_history or []\n full_prompt = self.get_prompt(prompt, chat_history, prompt_style)\n\n sanitized = self._sanitize_chat_config(generate_config)\n stream = sanitized.get(\"stream\", None)\n\n if stream:\n agen = await self.async_generate(full_prompt, sanitized)\n assert isinstance(agen, AsyncGenerator)\n return self._async_to_chat_completion_chunks(agen)\n else:\n c = await self.async_generate(full_prompt, sanitized)\n assert not isinstance(c, AsyncGenerator)\n return self._to_chat_completion(c)\n", "path": "xinference/model/llm/vllm/core.py"}]} |
gh_patches_debug_1427 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-2644 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Interventions - calcul des coûts
Dans le module intervention, il y a un truc que je comprends pas trop sur le calcul des coûts, en effet les coûts ne se cumulent pas, on dirait qu'il privilégie certains postes.
Par exemple si j'ajoute que la sous-traitance c'est bon :

Si je viens à ajouter du matériel en même temps, ça ne prend plus en compte la sous-traitance

Et si j’ajoute tout, ça prend en compte que le coût matériel et homme

Je peux comprendre la logique de départ en se disant c'est soit une intervention par un sous traitant ou soit une intervention interne, mais il peut y avoir des cas où il y a un coût matériel en plus d'une sous-traitance ou même une intervention d'un technicien. Du coup dans un soucis de compréhension et pour éviter des erreurs de suivi, est-ce que ce serait pas mieux de cumuler l'ensemble des coûts dans le coût total ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/maintenance/models.py`
Content:
```
1 import os
2 from datetime import datetime
3
4 from django.db.models import Q, Min, Max
5 from django.db.models.functions import ExtractYear
6 from django.conf import settings
7 from django.utils.translation import gettext_lazy as _
8 from django.contrib.contenttypes.fields import GenericForeignKey
9 from django.contrib.contenttypes.models import ContentType
10 from django.contrib.gis.db import models
11 from django.contrib.gis.geos import GeometryCollection
12
13 from mapentity.models import MapEntityMixin
14
15 from geotrek.authent.models import StructureRelated, StructureOrNoneRelated
16 from geotrek.altimetry.models import AltimetryMixin
17 from geotrek.core.models import Topology, Path, Trail
18 from geotrek.common.models import Organism
19 from geotrek.common.mixins import TimeStampedModelMixin, NoDeleteMixin, AddPropertyMixin, NoDeleteManager
20 from geotrek.common.utils import classproperty
21 from geotrek.infrastructure.models import Infrastructure
22 from geotrek.signage.models import Signage
23 from geotrek.zoning.mixins import ZoningPropertiesMixin
24
25 if 'geotrek.signage' in settings.INSTALLED_APPS:
26 from geotrek.signage.models import Blade
27
28
29 class InterventionManager(NoDeleteManager):
30 def year_choices(self):
31 return self.existing().filter(date__isnull=False).annotate(year=ExtractYear('date')) \
32 .order_by('-year').distinct().values_list('year', 'year')
33
34
35 class Intervention(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, AltimetryMixin,
36 TimeStampedModelMixin, StructureRelated, NoDeleteMixin):
37
38 target_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)
39 target_id = models.PositiveIntegerField(blank=True, null=True)
40 target = GenericForeignKey('target_type', 'target_id')
41
42 name = models.CharField(verbose_name=_("Name"), max_length=128, help_text=_("Brief summary"))
43 date = models.DateField(default=datetime.now, verbose_name=_("Date"), help_text=_("When ?"))
44 subcontracting = models.BooleanField(verbose_name=_("Subcontracting"), default=False)
45
46 # Technical information
47 width = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_("Width"))
48 height = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_("Height"))
49 area = models.FloatField(editable=False, default=0, blank=True, null=True, verbose_name=_("Area"))
50
51 # Costs
52 material_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_("Material cost"))
53 heliport_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_("Heliport cost"))
54 subcontract_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_("Subcontract cost"))
55
56 # AltimetyMixin for denormalized fields from related topology, updated via trigger.
57 length = models.FloatField(editable=True, default=0.0, null=True, blank=True, verbose_name=_("3D Length"))
58
59 stake = models.ForeignKey('core.Stake', null=True, blank=True, on_delete=models.CASCADE,
60 related_name='interventions', verbose_name=_("Stake"))
61
62 status = models.ForeignKey('InterventionStatus', verbose_name=_("Status"), on_delete=models.CASCADE)
63
64 type = models.ForeignKey('InterventionType', null=True, blank=True, on_delete=models.CASCADE,
65 verbose_name=_("Type"))
66
67 disorders = models.ManyToManyField('InterventionDisorder', related_name="interventions",
68 verbose_name=_("Disorders"), blank=True)
69
70 jobs = models.ManyToManyField('InterventionJob', through='ManDay', verbose_name=_("Jobs"))
71
72 project = models.ForeignKey('Project', null=True, blank=True, related_name="interventions",
73 on_delete=models.CASCADE, verbose_name=_("Project"))
74 description = models.TextField(blank=True, verbose_name=_("Description"), help_text=_("Remarks and notes"))
75
76 eid = models.CharField(verbose_name=_("External id"), max_length=1024, blank=True, null=True)
77
78 objects = InterventionManager()
79
80 class Meta:
81 verbose_name = _("Intervention")
82 verbose_name_plural = _("Interventions")
83
84 def __init__(self, *args, **kwargs):
85 super().__init__(*args, **kwargs)
86 self._geom = None
87
88 def default_stake(self):
89 stake = None
90 if self.target and isinstance(self.target, Topology):
91 for path in self.target.paths.exclude(stake=None):
92 if path.stake > stake:
93 stake = path.stake
94 return stake
95
96 def reload(self):
97 if self.pk:
98 fromdb = self.__class__.objects.get(pk=self.pk)
99 self.area = fromdb.area
100 AltimetryMixin.reload(self, fromdb)
101 TimeStampedModelMixin.reload(self, fromdb)
102 NoDeleteMixin.reload(self, fromdb)
103 if isinstance(self.target, Topology):
104 self.target.reload()
105 return self
106
107 def save(self, *args, **kwargs):
108 if self.stake is None:
109 self.stake = self.default_stake()
110
111 super().save(*args, **kwargs)
112
113 # Invalidate project map
114 if self.project:
115 try:
116 os.remove(self.project.get_map_image_path())
117 except OSError:
118 pass
119
120 self.reload()
121
122 @classproperty
123 def target_verbose_name(cls):
124 return _("On")
125
126 @property
127 def target_display(self):
128 icon = 'path'
129 title = _('Paths')
130 if not self.target._meta.model_name == "topology":
131 icon = self.target._meta.model_name
132
133 title = self.target.name_display
134 return '<img src="%simages/%s-16.png"> %s' % (settings.STATIC_URL,
135 icon,
136 title)
137
138 @property
139 def target_csv_display(self):
140 return "%s: %s (%s)" % (
141 _(self.target._meta.verbose_name),
142 self.target,
143 self.target.pk)
144
145 @property
146 def in_project(self):
147 return self.project is not None
148
149 @property
150 def paths(self):
151 if self.target._meta.model_name == 'blade':
152 return self.target.signage.paths.all()
153 if self.target:
154 return self.target.paths.all()
155 return Path.objects.none()
156
157 @property
158 def total_manday(self):
159 total = 0.0
160 for md in self.manday_set.all():
161 total += float(md.nb_days)
162 return total
163
164 @classproperty
165 def total_manday_verbose_name(cls):
166 return _("Mandays")
167
168 @property
169 def total_cost_mandays(self):
170 total = 0.0
171 for md in self.manday_set.all():
172 total += md.cost
173 return total
174
175 @classproperty
176 def total_cost_mandays_verbose_name(cls):
177 return _("Mandays cost")
178
179 @property
180 def total_cost(self):
181 return self.total_cost_mandays + \
182 self.material_cost or 0 + \
183 self.heliport_cost or 0 + \
184 self.subcontract_cost or 0
185
186 @classproperty
187 def total_cost_verbose_name(cls):
188 return _("Total cost")
189
190 @classproperty
191 def geomfield(cls):
192 return Topology._meta.get_field('geom')
193
194 @property
195 def geom(self):
196 if self._geom is None:
197 if self.target:
198 self._geom = self.target.geom
199 return self._geom
200
201 @geom.setter
202 def geom(self, value):
203 self._geom = value
204
205 @property
206 def api_geom(self):
207 if not self.geom:
208 return None
209 return self.geom.transform(settings.API_SRID, clone=True)
210
211 @property
212 def name_display(self):
213 return '<a data-pk="%s" href="%s" title="%s" >%s</a>' % (self.pk,
214 self.get_detail_url(),
215 self.name,
216 self.name)
217
218 @property
219 def name_csv_display(self):
220 return self.name
221
222 def __str__(self):
223 return "%s (%s)" % (self.name, self.date)
224
225 @classmethod
226 def get_interventions(cls, obj):
227 blade_content_type = ContentType.objects.get_for_model(Blade)
228 non_topology_content_types = [blade_content_type]
229 if 'geotrek.outdoor' in settings.INSTALLED_APPS:
230 non_topology_content_types += [
231 ContentType.objects.get_by_natural_key('outdoor', 'site'),
232 ContentType.objects.get_by_natural_key('outdoor', 'course'),
233 ]
234 if settings.TREKKING_TOPOLOGY_ENABLED:
235 topologies = list(Topology.overlapping(obj).values_list('pk', flat=True))
236 else:
237 area = obj.geom.buffer(settings.INTERVENTION_INTERSECTION_MARGIN)
238 topologies = list(Topology.objects.existing().filter(geom__intersects=area).values_list('pk', flat=True))
239 qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)
240 if 'geotrek.signage' in settings.INSTALLED_APPS:
241 blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))
242 qs |= Q(target_id__in=blades, target_type=blade_content_type)
243 return Intervention.objects.existing().filter(qs).distinct('pk')
244
245 @classmethod
246 def path_interventions(cls, path):
247 blade_content_type = ContentType.objects.get_for_model(Blade)
248 non_topology_content_types = [blade_content_type]
249 if 'geotrek.outdoor' in settings.INSTALLED_APPS:
250 non_topology_content_types += [
251 ContentType.objects.get_by_natural_key('outdoor', 'site'),
252 ContentType.objects.get_by_natural_key('outdoor', 'course'),
253 ]
254 topologies = list(Topology.objects.filter(aggregations__path=path).values_list('pk', flat=True))
255 qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)
256 if 'geotrek.signage' in settings.INSTALLED_APPS:
257 blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))
258 qs |= Q(target_id__in=blades, target_type=blade_content_type)
259 return Intervention.objects.existing().filter(qs).distinct('pk')
260
261 @classmethod
262 def topology_interventions(cls, topology):
263 return cls.get_interventions(topology)
264
265 @classmethod
266 def blade_interventions(cls, blade):
267 return cls.get_interventions(blade.signage)
268
269 @property
270 def signages(self):
271 if self.target_type == ContentType.objects.get_for_model(Signage):
272 return [self.target]
273 return []
274
275 @property
276 def infrastructures(self):
277 if self.target_type == ContentType.objects.get_for_model(Infrastructure):
278 return [self.target]
279 return []
280
281 def distance(self, to_cls):
282 """Distance to associate this intervention to another class"""
283 return settings.MAINTENANCE_INTERSECTION_MARGIN
284
285
286 Path.add_property('interventions', lambda self: Intervention.path_interventions(self), _("Interventions"))
287 Topology.add_property('interventions', lambda self: Intervention.topology_interventions(self), _("Interventions"))
288 if 'geotrek.signage' in settings.INSTALLED_APPS:
289 Blade.add_property('interventions', lambda self: Intervention.blade_interventions(self), _("Interventions"))
290
291
292 class InterventionStatus(StructureOrNoneRelated):
293
294 status = models.CharField(verbose_name=_("Status"), max_length=128)
295 order = models.PositiveSmallIntegerField(default=None, null=True, blank=True, verbose_name=_("Display order"))
296
297 class Meta:
298 verbose_name = _("Intervention's status")
299 verbose_name_plural = _("Intervention's statuses")
300 ordering = ['order', 'status']
301
302 def __str__(self):
303 if self.structure:
304 return "{} ({})".format(self.status, self.structure.name)
305 return self.status
306
307
308 class InterventionType(StructureOrNoneRelated):
309
310 type = models.CharField(max_length=128, verbose_name=_("Type"))
311
312 class Meta:
313 verbose_name = _("Intervention's type")
314 verbose_name_plural = _("Intervention's types")
315 ordering = ['type']
316
317 def __str__(self):
318 if self.structure:
319 return "{} ({})".format(self.type, self.structure.name)
320 return self.type
321
322
323 class InterventionDisorder(StructureOrNoneRelated):
324
325 disorder = models.CharField(max_length=128, verbose_name=_("Disorder"))
326
327 class Meta:
328 verbose_name = _("Intervention's disorder")
329 verbose_name_plural = _("Intervention's disorders")
330 ordering = ['disorder']
331
332 def __str__(self):
333 if self.structure:
334 return "{} ({})".format(self.disorder, self.structure.name)
335 return self.disorder
336
337
338 class InterventionJob(StructureOrNoneRelated):
339
340 job = models.CharField(max_length=128, verbose_name=_("Job"))
341 cost = models.DecimalField(verbose_name=_("Cost"), default=1.0, decimal_places=2, max_digits=8)
342
343 class Meta:
344 verbose_name = _("Intervention's job")
345 verbose_name_plural = _("Intervention's jobs")
346 ordering = ['job']
347
348 def __str__(self):
349 if self.structure:
350 return "{} ({})".format(self.job, self.structure.name)
351 return self.job
352
353
354 class ManDay(models.Model):
355
356 nb_days = models.DecimalField(verbose_name=_("Mandays"), decimal_places=2, max_digits=6)
357 intervention = models.ForeignKey(Intervention, on_delete=models.CASCADE)
358 job = models.ForeignKey(InterventionJob, verbose_name=_("Job"), on_delete=models.CASCADE)
359
360 class Meta:
361 verbose_name = _("Manday")
362 verbose_name_plural = _("Mandays")
363
364 @property
365 def cost(self):
366 return float(self.nb_days * self.job.cost)
367
368 def __str__(self):
369 return str(self.nb_days)
370
371
372 class ProjectManager(NoDeleteManager):
373 def year_choices(self):
374 bounds = self.existing().aggregate(min=Min('begin_year'), max=Max('end_year'))
375 if not bounds['min'] or not bounds['max']:
376 return []
377 return [(year, year) for year in range(bounds['min'], bounds['max'] + 1)]
378
379
380 class Project(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, TimeStampedModelMixin,
381 StructureRelated, NoDeleteMixin):
382
383 name = models.CharField(verbose_name=_("Name"), max_length=128)
384 begin_year = models.IntegerField(verbose_name=_("Begin year"))
385 end_year = models.IntegerField(verbose_name=_("End year"), blank=True, null=True)
386 constraint = models.TextField(verbose_name=_("Constraint"), blank=True,
387 help_text=_("Specific conditions, ..."))
388 global_cost = models.FloatField(verbose_name=_("Global cost"), default=0,
389 blank=True, null=True, help_text=_("€"))
390 comments = models.TextField(verbose_name=_("Comments"), blank=True,
391 help_text=_("Remarks and notes"))
392 type = models.ForeignKey('ProjectType', null=True, blank=True, on_delete=models.CASCADE,
393 verbose_name=_("Type"))
394 domain = models.ForeignKey('ProjectDomain', null=True, blank=True, on_delete=models.CASCADE,
395 verbose_name=_("Domain"))
396 contractors = models.ManyToManyField('Contractor', related_name="projects", blank=True,
397 verbose_name=_("Contractors"))
398 project_owner = models.ForeignKey(Organism, related_name='own', blank=True, null=True, on_delete=models.CASCADE,
399 verbose_name=_("Project owner"))
400 project_manager = models.ForeignKey(Organism, related_name='manage', blank=True, null=True, on_delete=models.CASCADE,
401 verbose_name=_("Project manager"))
402 founders = models.ManyToManyField(Organism, through='Funding', verbose_name=_("Founders"))
403 eid = models.CharField(verbose_name=_("External id"), max_length=1024, blank=True, null=True)
404
405 objects = ProjectManager()
406
407 class Meta:
408 verbose_name = _("Project")
409 verbose_name_plural = _("Projects")
410 ordering = ['-begin_year', 'name']
411
412 def __init__(self, *args, **kwargs):
413 super().__init__(*args, **kwargs)
414 self._geom = None
415
416 @property
417 def paths(self):
418 s = []
419 for i in self.interventions.existing():
420 s += i.paths
421 return Path.objects.filter(pk__in=[p.pk for p in set(s)])
422
423 @property
424 def trails(self):
425 s = []
426 for i in self.interventions.existing():
427 for p in i.target.paths.all():
428 for t in p.trails.all():
429 s.append(t.pk)
430
431 return Trail.objects.filter(pk__in=s)
432
433 @property
434 def signages(self):
435 from geotrek.signage.models import Signage
436 target_ids = self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Signage)).values_list('target_id', flat=True)
437 return list(Signage.objects.filter(topo_object__in=target_ids))
438
439 @property
440 def infrastructures(self):
441 from geotrek.infrastructure.models import Infrastructure
442 target_ids = list(self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Infrastructure)).values_list('target_id', flat=True))
443 return list(Infrastructure.objects.filter(topo_object__in=target_ids))
444
445 @classproperty
446 def geomfield(cls):
447 from django.contrib.gis.geos import LineString
448 # Fake field, TODO: still better than overkill code in views, but can do neater.
449 c = GeometryCollection([LineString((0, 0), (1, 1))], srid=settings.SRID)
450 c.name = 'geom'
451 return c
452
453 @property
454 def geom(self):
455 """ Merge all interventions geometry into a collection
456 """
457 if self._geom is None:
458 interventions = Intervention.objects.existing().filter(project=self)
459 geoms = [i.geom for i in interventions if i.geom is not None]
460 if geoms:
461 self._geom = GeometryCollection(*geoms, srid=settings.SRID)
462 return self._geom
463
464 @property
465 def api_geom(self):
466 if not self.geom:
467 return None
468 return self.geom.transform(settings.API_SRID, clone=True)
469
470 @geom.setter
471 def geom(self, value):
472 self._geom = value
473
474 @property
475 def name_display(self):
476 return '<a data-pk="%s" href="%s" title="%s">%s</a>' % (self.pk,
477 self.get_detail_url(),
478 self.name,
479 self.name)
480
481 @property
482 def name_csv_display(self):
483 return self.name
484
485 @property
486 def interventions_csv_display(self):
487 return [str(i) for i in self.interventions.existing()]
488
489 @property
490 def contractors_display(self):
491 return [str(c) for c in self.contractors.all()]
492
493 @property
494 def founders_display(self):
495 return [str(f) for f in self.founders.all()]
496
497 @property
498 def period(self):
499 return "%s - %s" % (self.begin_year, self.end_year or "")
500
501 @property
502 def period_display(self):
503 return self.period
504
505 @classproperty
506 def period_verbose_name(cls):
507 return _("Period")
508
509 @property
510 def interventions_total_cost(self):
511 total = 0
512 qs = self.interventions.existing()
513 for i in qs.prefetch_related('manday_set', 'manday_set__job'):
514 total += i.total_cost
515 return total
516
517 @classproperty
518 def interventions_total_cost_verbose_name(cls):
519 return _("Interventions total cost")
520
521 def __str__(self):
522 return "%s - %s" % (self.begin_year, self.name)
523
524 @classmethod
525 def path_projects(cls, path):
526 return cls.objects.existing().filter(interventions__in=path.interventions.all()).distinct()
527
528 @classmethod
529 def topology_projects(cls, topology):
530 return cls.objects.existing().filter(interventions__in=topology.interventions.all()).distinct()
531
532 def edges_by_attr(self, interventionattr):
533 """ Return related topology objects of project, by aggregating the same attribute
534 on its interventions.
535 (See geotrek.land.models)
536 """
537 pks = []
538 modelclass = Topology
539 for i in self.interventions.all():
540 attr_value = getattr(i, interventionattr)
541 if isinstance(attr_value, list):
542 pks += [o.pk for o in attr_value]
543 else:
544 modelclass = attr_value.model
545 topologies = attr_value.values('id')
546 for topology in topologies:
547 pks.append(topology['id'])
548 return modelclass.objects.filter(pk__in=pks)
549
550 @classmethod
551 def get_create_label(cls):
552 return _("Add a new project")
553
554
555 Path.add_property('projects', lambda self: Project.path_projects(self), _("Projects"))
556 Topology.add_property('projects', lambda self: Project.topology_projects(self), _("Projects"))
557
558
559 class ProjectType(StructureOrNoneRelated):
560
561 type = models.CharField(max_length=128, verbose_name=_("Type"))
562
563 class Meta:
564 verbose_name = _("Project type")
565 verbose_name_plural = _("Project types")
566 ordering = ['type']
567
568 def __str__(self):
569 if self.structure:
570 return "{} ({})".format(self.type, self.structure.name)
571 return self.type
572
573
574 class ProjectDomain(StructureOrNoneRelated):
575
576 domain = models.CharField(max_length=128, verbose_name=_("Domain"))
577
578 class Meta:
579 verbose_name = _("Project domain")
580 verbose_name_plural = _("Project domains")
581 ordering = ['domain']
582
583 def __str__(self):
584 if self.structure:
585 return "{} ({})".format(self.domain, self.structure.name)
586 return self.domain
587
588
589 class Contractor(StructureOrNoneRelated):
590
591 contractor = models.CharField(max_length=128, verbose_name=_("Contractor"))
592
593 class Meta:
594 verbose_name = _("Contractor")
595 verbose_name_plural = _("Contractors")
596 ordering = ['contractor']
597
598 def __str__(self):
599 if self.structure:
600 return "{} ({})".format(self.contractor, self.structure.name)
601 return self.contractor
602
603
604 class Funding(models.Model):
605
606 amount = models.FloatField(verbose_name=_("Amount"))
607 project = models.ForeignKey(Project, verbose_name=_("Project"), on_delete=models.CASCADE)
608 organism = models.ForeignKey(Organism, verbose_name=_("Organism"), on_delete=models.CASCADE)
609
610 class Meta:
611 verbose_name = _("Funding")
612 verbose_name_plural = _("Fundings")
613
614 def __str__(self):
615 return "%s : %s" % (self.project, self.amount)
616
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/maintenance/models.py b/geotrek/maintenance/models.py
--- a/geotrek/maintenance/models.py
+++ b/geotrek/maintenance/models.py
@@ -179,9 +179,9 @@
@property
def total_cost(self):
return self.total_cost_mandays + \
- self.material_cost or 0 + \
- self.heliport_cost or 0 + \
- self.subcontract_cost or 0
+ (self.material_cost or 0) + \
+ (self.heliport_cost or 0) + \
+ (self.subcontract_cost or 0)
@classproperty
def total_cost_verbose_name(cls):
| {"golden_diff": "diff --git a/geotrek/maintenance/models.py b/geotrek/maintenance/models.py\n--- a/geotrek/maintenance/models.py\n+++ b/geotrek/maintenance/models.py\n@@ -179,9 +179,9 @@\n @property\n def total_cost(self):\n return self.total_cost_mandays + \\\n- self.material_cost or 0 + \\\n- self.heliport_cost or 0 + \\\n- self.subcontract_cost or 0\n+ (self.material_cost or 0) + \\\n+ (self.heliport_cost or 0) + \\\n+ (self.subcontract_cost or 0)\n \n @classproperty\n def total_cost_verbose_name(cls):\n", "issue": "Interventions - calcul des co\u00fbts\nDans le module intervention, il y a un truc que je comprends pas trop sur le calcul des co\u00fbts, en effet les co\u00fbts ne se cumulent pas, on dirait qu'il privil\u00e9gie certains postes.\r\nPar exemple si j'ajoute que la sous-traitance c'est bon : \r\n\r\n \r\nSi je viens \u00e0 ajouter du mat\u00e9riel en m\u00eame temps, \u00e7a ne prend plus en compte la sous-traitance \r\n\r\n\r\nEt si j\u2019ajoute tout, \u00e7a prend en compte que le co\u00fbt mat\u00e9riel et homme \r\n\r\n\r\nJe peux comprendre la logique de d\u00e9part en se disant c'est soit une intervention par un sous traitant ou soit une intervention interne, mais il peut y avoir des cas o\u00f9 il y a un co\u00fbt mat\u00e9riel en plus d'une sous-traitance ou m\u00eame une intervention d'un technicien. Du coup dans un soucis de compr\u00e9hension et pour \u00e9viter des erreurs de suivi, est-ce que ce serait pas mieux de cumuler l'ensemble des co\u00fbts dans le co\u00fbt total ?\n", "before_files": [{"content": "import os\nfrom datetime import datetime\n\nfrom django.db.models import Q, Min, Max\nfrom django.db.models.functions import ExtractYear\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.gis.db import models\nfrom django.contrib.gis.geos import GeometryCollection\n\nfrom mapentity.models import MapEntityMixin\n\nfrom geotrek.authent.models import StructureRelated, StructureOrNoneRelated\nfrom geotrek.altimetry.models import AltimetryMixin\nfrom geotrek.core.models import Topology, Path, Trail\nfrom geotrek.common.models import Organism\nfrom geotrek.common.mixins import TimeStampedModelMixin, NoDeleteMixin, AddPropertyMixin, NoDeleteManager\nfrom geotrek.common.utils import classproperty\nfrom geotrek.infrastructure.models import Infrastructure\nfrom geotrek.signage.models import Signage\nfrom geotrek.zoning.mixins import ZoningPropertiesMixin\n\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n from geotrek.signage.models import Blade\n\n\nclass InterventionManager(NoDeleteManager):\n def year_choices(self):\n return self.existing().filter(date__isnull=False).annotate(year=ExtractYear('date')) \\\n .order_by('-year').distinct().values_list('year', 'year')\n\n\nclass Intervention(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, AltimetryMixin,\n TimeStampedModelMixin, StructureRelated, NoDeleteMixin):\n\n target_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)\n target_id = models.PositiveIntegerField(blank=True, null=True)\n target = GenericForeignKey('target_type', 'target_id')\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128, help_text=_(\"Brief summary\"))\n date = models.DateField(default=datetime.now, verbose_name=_(\"Date\"), help_text=_(\"When ?\"))\n subcontracting = models.BooleanField(verbose_name=_(\"Subcontracting\"), default=False)\n\n # Technical information\n width = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Width\"))\n height = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Height\"))\n area = models.FloatField(editable=False, default=0, blank=True, null=True, verbose_name=_(\"Area\"))\n\n # Costs\n material_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Material cost\"))\n heliport_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Heliport cost\"))\n subcontract_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Subcontract cost\"))\n\n # AltimetyMixin for denormalized fields from related topology, updated via trigger.\n length = models.FloatField(editable=True, default=0.0, null=True, blank=True, verbose_name=_(\"3D Length\"))\n\n stake = models.ForeignKey('core.Stake', null=True, blank=True, on_delete=models.CASCADE,\n related_name='interventions', verbose_name=_(\"Stake\"))\n\n status = models.ForeignKey('InterventionStatus', verbose_name=_(\"Status\"), on_delete=models.CASCADE)\n\n type = models.ForeignKey('InterventionType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n\n disorders = models.ManyToManyField('InterventionDisorder', related_name=\"interventions\",\n verbose_name=_(\"Disorders\"), blank=True)\n\n jobs = models.ManyToManyField('InterventionJob', through='ManDay', verbose_name=_(\"Jobs\"))\n\n project = models.ForeignKey('Project', null=True, blank=True, related_name=\"interventions\",\n on_delete=models.CASCADE, verbose_name=_(\"Project\"))\n description = models.TextField(blank=True, verbose_name=_(\"Description\"), help_text=_(\"Remarks and notes\"))\n\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = InterventionManager()\n\n class Meta:\n verbose_name = _(\"Intervention\")\n verbose_name_plural = _(\"Interventions\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n def default_stake(self):\n stake = None\n if self.target and isinstance(self.target, Topology):\n for path in self.target.paths.exclude(stake=None):\n if path.stake > stake:\n stake = path.stake\n return stake\n\n def reload(self):\n if self.pk:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.area = fromdb.area\n AltimetryMixin.reload(self, fromdb)\n TimeStampedModelMixin.reload(self, fromdb)\n NoDeleteMixin.reload(self, fromdb)\n if isinstance(self.target, Topology):\n self.target.reload()\n return self\n\n def save(self, *args, **kwargs):\n if self.stake is None:\n self.stake = self.default_stake()\n\n super().save(*args, **kwargs)\n\n # Invalidate project map\n if self.project:\n try:\n os.remove(self.project.get_map_image_path())\n except OSError:\n pass\n\n self.reload()\n\n @classproperty\n def target_verbose_name(cls):\n return _(\"On\")\n\n @property\n def target_display(self):\n icon = 'path'\n title = _('Paths')\n if not self.target._meta.model_name == \"topology\":\n icon = self.target._meta.model_name\n\n title = self.target.name_display\n return '<img src=\"%simages/%s-16.png\"> %s' % (settings.STATIC_URL,\n icon,\n title)\n\n @property\n def target_csv_display(self):\n return \"%s: %s (%s)\" % (\n _(self.target._meta.verbose_name),\n self.target,\n self.target.pk)\n\n @property\n def in_project(self):\n return self.project is not None\n\n @property\n def paths(self):\n if self.target._meta.model_name == 'blade':\n return self.target.signage.paths.all()\n if self.target:\n return self.target.paths.all()\n return Path.objects.none()\n\n @property\n def total_manday(self):\n total = 0.0\n for md in self.manday_set.all():\n total += float(md.nb_days)\n return total\n\n @classproperty\n def total_manday_verbose_name(cls):\n return _(\"Mandays\")\n\n @property\n def total_cost_mandays(self):\n total = 0.0\n for md in self.manday_set.all():\n total += md.cost\n return total\n\n @classproperty\n def total_cost_mandays_verbose_name(cls):\n return _(\"Mandays cost\")\n\n @property\n def total_cost(self):\n return self.total_cost_mandays + \\\n self.material_cost or 0 + \\\n self.heliport_cost or 0 + \\\n self.subcontract_cost or 0\n\n @classproperty\n def total_cost_verbose_name(cls):\n return _(\"Total cost\")\n\n @classproperty\n def geomfield(cls):\n return Topology._meta.get_field('geom')\n\n @property\n def geom(self):\n if self._geom is None:\n if self.target:\n self._geom = self.target.geom\n return self._geom\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\" >%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n def __str__(self):\n return \"%s (%s)\" % (self.name, self.date)\n\n @classmethod\n def get_interventions(cls, obj):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n if settings.TREKKING_TOPOLOGY_ENABLED:\n topologies = list(Topology.overlapping(obj).values_list('pk', flat=True))\n else:\n area = obj.geom.buffer(settings.INTERVENTION_INTERSECTION_MARGIN)\n topologies = list(Topology.objects.existing().filter(geom__intersects=area).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def path_interventions(cls, path):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n topologies = list(Topology.objects.filter(aggregations__path=path).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def topology_interventions(cls, topology):\n return cls.get_interventions(topology)\n\n @classmethod\n def blade_interventions(cls, blade):\n return cls.get_interventions(blade.signage)\n\n @property\n def signages(self):\n if self.target_type == ContentType.objects.get_for_model(Signage):\n return [self.target]\n return []\n\n @property\n def infrastructures(self):\n if self.target_type == ContentType.objects.get_for_model(Infrastructure):\n return [self.target]\n return []\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this intervention to another class\"\"\"\n return settings.MAINTENANCE_INTERSECTION_MARGIN\n\n\nPath.add_property('interventions', lambda self: Intervention.path_interventions(self), _(\"Interventions\"))\nTopology.add_property('interventions', lambda self: Intervention.topology_interventions(self), _(\"Interventions\"))\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n Blade.add_property('interventions', lambda self: Intervention.blade_interventions(self), _(\"Interventions\"))\n\n\nclass InterventionStatus(StructureOrNoneRelated):\n\n status = models.CharField(verbose_name=_(\"Status\"), max_length=128)\n order = models.PositiveSmallIntegerField(default=None, null=True, blank=True, verbose_name=_(\"Display order\"))\n\n class Meta:\n verbose_name = _(\"Intervention's status\")\n verbose_name_plural = _(\"Intervention's statuses\")\n ordering = ['order', 'status']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.status, self.structure.name)\n return self.status\n\n\nclass InterventionType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Intervention's type\")\n verbose_name_plural = _(\"Intervention's types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass InterventionDisorder(StructureOrNoneRelated):\n\n disorder = models.CharField(max_length=128, verbose_name=_(\"Disorder\"))\n\n class Meta:\n verbose_name = _(\"Intervention's disorder\")\n verbose_name_plural = _(\"Intervention's disorders\")\n ordering = ['disorder']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.disorder, self.structure.name)\n return self.disorder\n\n\nclass InterventionJob(StructureOrNoneRelated):\n\n job = models.CharField(max_length=128, verbose_name=_(\"Job\"))\n cost = models.DecimalField(verbose_name=_(\"Cost\"), default=1.0, decimal_places=2, max_digits=8)\n\n class Meta:\n verbose_name = _(\"Intervention's job\")\n verbose_name_plural = _(\"Intervention's jobs\")\n ordering = ['job']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.job, self.structure.name)\n return self.job\n\n\nclass ManDay(models.Model):\n\n nb_days = models.DecimalField(verbose_name=_(\"Mandays\"), decimal_places=2, max_digits=6)\n intervention = models.ForeignKey(Intervention, on_delete=models.CASCADE)\n job = models.ForeignKey(InterventionJob, verbose_name=_(\"Job\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Manday\")\n verbose_name_plural = _(\"Mandays\")\n\n @property\n def cost(self):\n return float(self.nb_days * self.job.cost)\n\n def __str__(self):\n return str(self.nb_days)\n\n\nclass ProjectManager(NoDeleteManager):\n def year_choices(self):\n bounds = self.existing().aggregate(min=Min('begin_year'), max=Max('end_year'))\n if not bounds['min'] or not bounds['max']:\n return []\n return [(year, year) for year in range(bounds['min'], bounds['max'] + 1)]\n\n\nclass Project(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, TimeStampedModelMixin,\n StructureRelated, NoDeleteMixin):\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128)\n begin_year = models.IntegerField(verbose_name=_(\"Begin year\"))\n end_year = models.IntegerField(verbose_name=_(\"End year\"), blank=True, null=True)\n constraint = models.TextField(verbose_name=_(\"Constraint\"), blank=True,\n help_text=_(\"Specific conditions, ...\"))\n global_cost = models.FloatField(verbose_name=_(\"Global cost\"), default=0,\n blank=True, null=True, help_text=_(\"\u20ac\"))\n comments = models.TextField(verbose_name=_(\"Comments\"), blank=True,\n help_text=_(\"Remarks and notes\"))\n type = models.ForeignKey('ProjectType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n domain = models.ForeignKey('ProjectDomain', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Domain\"))\n contractors = models.ManyToManyField('Contractor', related_name=\"projects\", blank=True,\n verbose_name=_(\"Contractors\"))\n project_owner = models.ForeignKey(Organism, related_name='own', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project owner\"))\n project_manager = models.ForeignKey(Organism, related_name='manage', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project manager\"))\n founders = models.ManyToManyField(Organism, through='Funding', verbose_name=_(\"Founders\"))\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = ProjectManager()\n\n class Meta:\n verbose_name = _(\"Project\")\n verbose_name_plural = _(\"Projects\")\n ordering = ['-begin_year', 'name']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n @property\n def paths(self):\n s = []\n for i in self.interventions.existing():\n s += i.paths\n return Path.objects.filter(pk__in=[p.pk for p in set(s)])\n\n @property\n def trails(self):\n s = []\n for i in self.interventions.existing():\n for p in i.target.paths.all():\n for t in p.trails.all():\n s.append(t.pk)\n\n return Trail.objects.filter(pk__in=s)\n\n @property\n def signages(self):\n from geotrek.signage.models import Signage\n target_ids = self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Signage)).values_list('target_id', flat=True)\n return list(Signage.objects.filter(topo_object__in=target_ids))\n\n @property\n def infrastructures(self):\n from geotrek.infrastructure.models import Infrastructure\n target_ids = list(self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Infrastructure)).values_list('target_id', flat=True))\n return list(Infrastructure.objects.filter(topo_object__in=target_ids))\n\n @classproperty\n def geomfield(cls):\n from django.contrib.gis.geos import LineString\n # Fake field, TODO: still better than overkill code in views, but can do neater.\n c = GeometryCollection([LineString((0, 0), (1, 1))], srid=settings.SRID)\n c.name = 'geom'\n return c\n\n @property\n def geom(self):\n \"\"\" Merge all interventions geometry into a collection\n \"\"\"\n if self._geom is None:\n interventions = Intervention.objects.existing().filter(project=self)\n geoms = [i.geom for i in interventions if i.geom is not None]\n if geoms:\n self._geom = GeometryCollection(*geoms, srid=settings.SRID)\n return self._geom\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n @property\n def interventions_csv_display(self):\n return [str(i) for i in self.interventions.existing()]\n\n @property\n def contractors_display(self):\n return [str(c) for c in self.contractors.all()]\n\n @property\n def founders_display(self):\n return [str(f) for f in self.founders.all()]\n\n @property\n def period(self):\n return \"%s - %s\" % (self.begin_year, self.end_year or \"\")\n\n @property\n def period_display(self):\n return self.period\n\n @classproperty\n def period_verbose_name(cls):\n return _(\"Period\")\n\n @property\n def interventions_total_cost(self):\n total = 0\n qs = self.interventions.existing()\n for i in qs.prefetch_related('manday_set', 'manday_set__job'):\n total += i.total_cost\n return total\n\n @classproperty\n def interventions_total_cost_verbose_name(cls):\n return _(\"Interventions total cost\")\n\n def __str__(self):\n return \"%s - %s\" % (self.begin_year, self.name)\n\n @classmethod\n def path_projects(cls, path):\n return cls.objects.existing().filter(interventions__in=path.interventions.all()).distinct()\n\n @classmethod\n def topology_projects(cls, topology):\n return cls.objects.existing().filter(interventions__in=topology.interventions.all()).distinct()\n\n def edges_by_attr(self, interventionattr):\n \"\"\" Return related topology objects of project, by aggregating the same attribute\n on its interventions.\n (See geotrek.land.models)\n \"\"\"\n pks = []\n modelclass = Topology\n for i in self.interventions.all():\n attr_value = getattr(i, interventionattr)\n if isinstance(attr_value, list):\n pks += [o.pk for o in attr_value]\n else:\n modelclass = attr_value.model\n topologies = attr_value.values('id')\n for topology in topologies:\n pks.append(topology['id'])\n return modelclass.objects.filter(pk__in=pks)\n\n @classmethod\n def get_create_label(cls):\n return _(\"Add a new project\")\n\n\nPath.add_property('projects', lambda self: Project.path_projects(self), _(\"Projects\"))\nTopology.add_property('projects', lambda self: Project.topology_projects(self), _(\"Projects\"))\n\n\nclass ProjectType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Project type\")\n verbose_name_plural = _(\"Project types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass ProjectDomain(StructureOrNoneRelated):\n\n domain = models.CharField(max_length=128, verbose_name=_(\"Domain\"))\n\n class Meta:\n verbose_name = _(\"Project domain\")\n verbose_name_plural = _(\"Project domains\")\n ordering = ['domain']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.domain, self.structure.name)\n return self.domain\n\n\nclass Contractor(StructureOrNoneRelated):\n\n contractor = models.CharField(max_length=128, verbose_name=_(\"Contractor\"))\n\n class Meta:\n verbose_name = _(\"Contractor\")\n verbose_name_plural = _(\"Contractors\")\n ordering = ['contractor']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.contractor, self.structure.name)\n return self.contractor\n\n\nclass Funding(models.Model):\n\n amount = models.FloatField(verbose_name=_(\"Amount\"))\n project = models.ForeignKey(Project, verbose_name=_(\"Project\"), on_delete=models.CASCADE)\n organism = models.ForeignKey(Organism, verbose_name=_(\"Organism\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Funding\")\n verbose_name_plural = _(\"Fundings\")\n\n def __str__(self):\n return \"%s : %s\" % (self.project, self.amount)\n", "path": "geotrek/maintenance/models.py"}], "after_files": [{"content": "import os\nfrom datetime import datetime\n\nfrom django.db.models import Q, Min, Max\nfrom django.db.models.functions import ExtractYear\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.gis.db import models\nfrom django.contrib.gis.geos import GeometryCollection\n\nfrom mapentity.models import MapEntityMixin\n\nfrom geotrek.authent.models import StructureRelated, StructureOrNoneRelated\nfrom geotrek.altimetry.models import AltimetryMixin\nfrom geotrek.core.models import Topology, Path, Trail\nfrom geotrek.common.models import Organism\nfrom geotrek.common.mixins import TimeStampedModelMixin, NoDeleteMixin, AddPropertyMixin, NoDeleteManager\nfrom geotrek.common.utils import classproperty\nfrom geotrek.infrastructure.models import Infrastructure\nfrom geotrek.signage.models import Signage\nfrom geotrek.zoning.mixins import ZoningPropertiesMixin\n\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n from geotrek.signage.models import Blade\n\n\nclass InterventionManager(NoDeleteManager):\n def year_choices(self):\n return self.existing().filter(date__isnull=False).annotate(year=ExtractYear('date')) \\\n .order_by('-year').distinct().values_list('year', 'year')\n\n\nclass Intervention(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, AltimetryMixin,\n TimeStampedModelMixin, StructureRelated, NoDeleteMixin):\n\n target_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)\n target_id = models.PositiveIntegerField(blank=True, null=True)\n target = GenericForeignKey('target_type', 'target_id')\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128, help_text=_(\"Brief summary\"))\n date = models.DateField(default=datetime.now, verbose_name=_(\"Date\"), help_text=_(\"When ?\"))\n subcontracting = models.BooleanField(verbose_name=_(\"Subcontracting\"), default=False)\n\n # Technical information\n width = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Width\"))\n height = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Height\"))\n area = models.FloatField(editable=False, default=0, blank=True, null=True, verbose_name=_(\"Area\"))\n\n # Costs\n material_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Material cost\"))\n heliport_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Heliport cost\"))\n subcontract_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Subcontract cost\"))\n\n # AltimetyMixin for denormalized fields from related topology, updated via trigger.\n length = models.FloatField(editable=True, default=0.0, null=True, blank=True, verbose_name=_(\"3D Length\"))\n\n stake = models.ForeignKey('core.Stake', null=True, blank=True, on_delete=models.CASCADE,\n related_name='interventions', verbose_name=_(\"Stake\"))\n\n status = models.ForeignKey('InterventionStatus', verbose_name=_(\"Status\"), on_delete=models.CASCADE)\n\n type = models.ForeignKey('InterventionType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n\n disorders = models.ManyToManyField('InterventionDisorder', related_name=\"interventions\",\n verbose_name=_(\"Disorders\"), blank=True)\n\n jobs = models.ManyToManyField('InterventionJob', through='ManDay', verbose_name=_(\"Jobs\"))\n\n project = models.ForeignKey('Project', null=True, blank=True, related_name=\"interventions\",\n on_delete=models.CASCADE, verbose_name=_(\"Project\"))\n description = models.TextField(blank=True, verbose_name=_(\"Description\"), help_text=_(\"Remarks and notes\"))\n\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = InterventionManager()\n\n class Meta:\n verbose_name = _(\"Intervention\")\n verbose_name_plural = _(\"Interventions\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n def default_stake(self):\n stake = None\n if self.target and isinstance(self.target, Topology):\n for path in self.target.paths.exclude(stake=None):\n if path.stake > stake:\n stake = path.stake\n return stake\n\n def reload(self):\n if self.pk:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.area = fromdb.area\n AltimetryMixin.reload(self, fromdb)\n TimeStampedModelMixin.reload(self, fromdb)\n NoDeleteMixin.reload(self, fromdb)\n if isinstance(self.target, Topology):\n self.target.reload()\n return self\n\n def save(self, *args, **kwargs):\n if self.stake is None:\n self.stake = self.default_stake()\n\n super().save(*args, **kwargs)\n\n # Invalidate project map\n if self.project:\n try:\n os.remove(self.project.get_map_image_path())\n except OSError:\n pass\n\n self.reload()\n\n @classproperty\n def target_verbose_name(cls):\n return _(\"On\")\n\n @property\n def target_display(self):\n icon = 'path'\n title = _('Paths')\n if not self.target._meta.model_name == \"topology\":\n icon = self.target._meta.model_name\n\n title = self.target.name_display\n return '<img src=\"%simages/%s-16.png\"> %s' % (settings.STATIC_URL,\n icon,\n title)\n\n @property\n def target_csv_display(self):\n return \"%s: %s (%s)\" % (\n _(self.target._meta.verbose_name),\n self.target,\n self.target.pk)\n\n @property\n def in_project(self):\n return self.project is not None\n\n @property\n def paths(self):\n if self.target._meta.model_name == 'blade':\n return self.target.signage.paths.all()\n if self.target:\n return self.target.paths.all()\n return Path.objects.none()\n\n @property\n def total_manday(self):\n total = 0.0\n for md in self.manday_set.all():\n total += float(md.nb_days)\n return total\n\n @classproperty\n def total_manday_verbose_name(cls):\n return _(\"Mandays\")\n\n @property\n def total_cost_mandays(self):\n total = 0.0\n for md in self.manday_set.all():\n total += md.cost\n return total\n\n @classproperty\n def total_cost_mandays_verbose_name(cls):\n return _(\"Mandays cost\")\n\n @property\n def total_cost(self):\n return self.total_cost_mandays + \\\n (self.material_cost or 0) + \\\n (self.heliport_cost or 0) + \\\n (self.subcontract_cost or 0)\n\n @classproperty\n def total_cost_verbose_name(cls):\n return _(\"Total cost\")\n\n @classproperty\n def geomfield(cls):\n return Topology._meta.get_field('geom')\n\n @property\n def geom(self):\n if self._geom is None:\n if self.target:\n self._geom = self.target.geom\n return self._geom\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\" >%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n def __str__(self):\n return \"%s (%s)\" % (self.name, self.date)\n\n @classmethod\n def get_interventions(cls, obj):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n if settings.TREKKING_TOPOLOGY_ENABLED:\n topologies = list(Topology.overlapping(obj).values_list('pk', flat=True))\n else:\n area = obj.geom.buffer(settings.INTERVENTION_INTERSECTION_MARGIN)\n topologies = list(Topology.objects.existing().filter(geom__intersects=area).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def path_interventions(cls, path):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n topologies = list(Topology.objects.filter(aggregations__path=path).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def topology_interventions(cls, topology):\n return cls.get_interventions(topology)\n\n @classmethod\n def blade_interventions(cls, blade):\n return cls.get_interventions(blade.signage)\n\n @property\n def signages(self):\n if self.target_type == ContentType.objects.get_for_model(Signage):\n return [self.target]\n return []\n\n @property\n def infrastructures(self):\n if self.target_type == ContentType.objects.get_for_model(Infrastructure):\n return [self.target]\n return []\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this intervention to another class\"\"\"\n return settings.MAINTENANCE_INTERSECTION_MARGIN\n\n\nPath.add_property('interventions', lambda self: Intervention.path_interventions(self), _(\"Interventions\"))\nTopology.add_property('interventions', lambda self: Intervention.topology_interventions(self), _(\"Interventions\"))\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n Blade.add_property('interventions', lambda self: Intervention.blade_interventions(self), _(\"Interventions\"))\n\n\nclass InterventionStatus(StructureOrNoneRelated):\n\n status = models.CharField(verbose_name=_(\"Status\"), max_length=128)\n order = models.PositiveSmallIntegerField(default=None, null=True, blank=True, verbose_name=_(\"Display order\"))\n\n class Meta:\n verbose_name = _(\"Intervention's status\")\n verbose_name_plural = _(\"Intervention's statuses\")\n ordering = ['order', 'status']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.status, self.structure.name)\n return self.status\n\n\nclass InterventionType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Intervention's type\")\n verbose_name_plural = _(\"Intervention's types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass InterventionDisorder(StructureOrNoneRelated):\n\n disorder = models.CharField(max_length=128, verbose_name=_(\"Disorder\"))\n\n class Meta:\n verbose_name = _(\"Intervention's disorder\")\n verbose_name_plural = _(\"Intervention's disorders\")\n ordering = ['disorder']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.disorder, self.structure.name)\n return self.disorder\n\n\nclass InterventionJob(StructureOrNoneRelated):\n\n job = models.CharField(max_length=128, verbose_name=_(\"Job\"))\n cost = models.DecimalField(verbose_name=_(\"Cost\"), default=1.0, decimal_places=2, max_digits=8)\n\n class Meta:\n verbose_name = _(\"Intervention's job\")\n verbose_name_plural = _(\"Intervention's jobs\")\n ordering = ['job']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.job, self.structure.name)\n return self.job\n\n\nclass ManDay(models.Model):\n\n nb_days = models.DecimalField(verbose_name=_(\"Mandays\"), decimal_places=2, max_digits=6)\n intervention = models.ForeignKey(Intervention, on_delete=models.CASCADE)\n job = models.ForeignKey(InterventionJob, verbose_name=_(\"Job\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Manday\")\n verbose_name_plural = _(\"Mandays\")\n\n @property\n def cost(self):\n return float(self.nb_days * self.job.cost)\n\n def __str__(self):\n return str(self.nb_days)\n\n\nclass ProjectManager(NoDeleteManager):\n def year_choices(self):\n bounds = self.existing().aggregate(min=Min('begin_year'), max=Max('end_year'))\n if not bounds['min'] or not bounds['max']:\n return []\n return [(year, year) for year in range(bounds['min'], bounds['max'] + 1)]\n\n\nclass Project(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, TimeStampedModelMixin,\n StructureRelated, NoDeleteMixin):\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128)\n begin_year = models.IntegerField(verbose_name=_(\"Begin year\"))\n end_year = models.IntegerField(verbose_name=_(\"End year\"), blank=True, null=True)\n constraint = models.TextField(verbose_name=_(\"Constraint\"), blank=True,\n help_text=_(\"Specific conditions, ...\"))\n global_cost = models.FloatField(verbose_name=_(\"Global cost\"), default=0,\n blank=True, null=True, help_text=_(\"\u20ac\"))\n comments = models.TextField(verbose_name=_(\"Comments\"), blank=True,\n help_text=_(\"Remarks and notes\"))\n type = models.ForeignKey('ProjectType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n domain = models.ForeignKey('ProjectDomain', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Domain\"))\n contractors = models.ManyToManyField('Contractor', related_name=\"projects\", blank=True,\n verbose_name=_(\"Contractors\"))\n project_owner = models.ForeignKey(Organism, related_name='own', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project owner\"))\n project_manager = models.ForeignKey(Organism, related_name='manage', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project manager\"))\n founders = models.ManyToManyField(Organism, through='Funding', verbose_name=_(\"Founders\"))\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = ProjectManager()\n\n class Meta:\n verbose_name = _(\"Project\")\n verbose_name_plural = _(\"Projects\")\n ordering = ['-begin_year', 'name']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n @property\n def paths(self):\n s = []\n for i in self.interventions.existing():\n s += i.paths\n return Path.objects.filter(pk__in=[p.pk for p in set(s)])\n\n @property\n def trails(self):\n s = []\n for i in self.interventions.existing():\n for p in i.target.paths.all():\n for t in p.trails.all():\n s.append(t.pk)\n\n return Trail.objects.filter(pk__in=s)\n\n @property\n def signages(self):\n from geotrek.signage.models import Signage\n target_ids = self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Signage)).values_list('target_id', flat=True)\n return list(Signage.objects.filter(topo_object__in=target_ids))\n\n @property\n def infrastructures(self):\n from geotrek.infrastructure.models import Infrastructure\n target_ids = list(self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Infrastructure)).values_list('target_id', flat=True))\n return list(Infrastructure.objects.filter(topo_object__in=target_ids))\n\n @classproperty\n def geomfield(cls):\n from django.contrib.gis.geos import LineString\n # Fake field, TODO: still better than overkill code in views, but can do neater.\n c = GeometryCollection([LineString((0, 0), (1, 1))], srid=settings.SRID)\n c.name = 'geom'\n return c\n\n @property\n def geom(self):\n \"\"\" Merge all interventions geometry into a collection\n \"\"\"\n if self._geom is None:\n interventions = Intervention.objects.existing().filter(project=self)\n geoms = [i.geom for i in interventions if i.geom is not None]\n if geoms:\n self._geom = GeometryCollection(*geoms, srid=settings.SRID)\n return self._geom\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n @property\n def interventions_csv_display(self):\n return [str(i) for i in self.interventions.existing()]\n\n @property\n def contractors_display(self):\n return [str(c) for c in self.contractors.all()]\n\n @property\n def founders_display(self):\n return [str(f) for f in self.founders.all()]\n\n @property\n def period(self):\n return \"%s - %s\" % (self.begin_year, self.end_year or \"\")\n\n @property\n def period_display(self):\n return self.period\n\n @classproperty\n def period_verbose_name(cls):\n return _(\"Period\")\n\n @property\n def interventions_total_cost(self):\n total = 0\n qs = self.interventions.existing()\n for i in qs.prefetch_related('manday_set', 'manday_set__job'):\n total += i.total_cost\n return total\n\n @classproperty\n def interventions_total_cost_verbose_name(cls):\n return _(\"Interventions total cost\")\n\n def __str__(self):\n return \"%s - %s\" % (self.begin_year, self.name)\n\n @classmethod\n def path_projects(cls, path):\n return cls.objects.existing().filter(interventions__in=path.interventions.all()).distinct()\n\n @classmethod\n def topology_projects(cls, topology):\n return cls.objects.existing().filter(interventions__in=topology.interventions.all()).distinct()\n\n def edges_by_attr(self, interventionattr):\n \"\"\" Return related topology objects of project, by aggregating the same attribute\n on its interventions.\n (See geotrek.land.models)\n \"\"\"\n pks = []\n modelclass = Topology\n for i in self.interventions.all():\n attr_value = getattr(i, interventionattr)\n if isinstance(attr_value, list):\n pks += [o.pk for o in attr_value]\n else:\n modelclass = attr_value.model\n topologies = attr_value.values('id')\n for topology in topologies:\n pks.append(topology['id'])\n return modelclass.objects.filter(pk__in=pks)\n\n @classmethod\n def get_create_label(cls):\n return _(\"Add a new project\")\n\n\nPath.add_property('projects', lambda self: Project.path_projects(self), _(\"Projects\"))\nTopology.add_property('projects', lambda self: Project.topology_projects(self), _(\"Projects\"))\n\n\nclass ProjectType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Project type\")\n verbose_name_plural = _(\"Project types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass ProjectDomain(StructureOrNoneRelated):\n\n domain = models.CharField(max_length=128, verbose_name=_(\"Domain\"))\n\n class Meta:\n verbose_name = _(\"Project domain\")\n verbose_name_plural = _(\"Project domains\")\n ordering = ['domain']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.domain, self.structure.name)\n return self.domain\n\n\nclass Contractor(StructureOrNoneRelated):\n\n contractor = models.CharField(max_length=128, verbose_name=_(\"Contractor\"))\n\n class Meta:\n verbose_name = _(\"Contractor\")\n verbose_name_plural = _(\"Contractors\")\n ordering = ['contractor']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.contractor, self.structure.name)\n return self.contractor\n\n\nclass Funding(models.Model):\n\n amount = models.FloatField(verbose_name=_(\"Amount\"))\n project = models.ForeignKey(Project, verbose_name=_(\"Project\"), on_delete=models.CASCADE)\n organism = models.ForeignKey(Organism, verbose_name=_(\"Organism\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Funding\")\n verbose_name_plural = _(\"Fundings\")\n\n def __str__(self):\n return \"%s : %s\" % (self.project, self.amount)\n", "path": "geotrek/maintenance/models.py"}]} |
gh_patches_debug_1428 | rasdani/github-patches | git_diff | pallets__werkzeug-1798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New Microsoft Edge User Agent
## Background
Microsoft Edge now based on Chromium and the user agent string is updated.
`Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68`
## Simple Code
```python
@app.route('/browser')
def browser():
from flask import request
ua = request.user_agent
return jsonify({
'browser': ua.browser,
'platform': ua.platform,
'user_agent': ua.string,
'version': ua.version,
})
```
## Expected Result
```json
{
"browser": "edge",
"platform": "windows",
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68",
"version": "81.0.416.68"
}
```
| Key | Value |
| --- | --- |
| browser | **edge** |
| platform | windows |
| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |
| version | **81.0.416.68** |
## Actual Result
```json
{
"browser": "chrome",
"platform": "windows",
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68",
"version": "81.0.4044.129"
}
```
| Key | Value |
| --- | --- |
| browser | **chrome** |
| platform | windows |
| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |
| version | **81.0.4044.129** |
## Environment
- Windows 10 Pro 1909
- Python 3.6.6
- Werkzeug 0.16.1
- Flask 1.1.1
### Related Issues
#818, #1556
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/useragents.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 werkzeug.useragents
4 ~~~~~~~~~~~~~~~~~~~
5
6 This module provides a helper to inspect user agent strings. This module
7 is far from complete but should work for most of the currently available
8 browsers.
9
10
11 :copyright: 2007 Pallets
12 :license: BSD-3-Clause
13 """
14 import re
15
16
17 class UserAgentParser(object):
18 """A simple user agent parser. Used by the `UserAgent`."""
19
20 platforms = (
21 (" cros ", "chromeos"),
22 ("iphone|ios", "iphone"),
23 ("ipad", "ipad"),
24 (r"darwin|mac|os\s*x", "macos"),
25 ("win", "windows"),
26 (r"android", "android"),
27 ("netbsd", "netbsd"),
28 ("openbsd", "openbsd"),
29 ("freebsd", "freebsd"),
30 ("dragonfly", "dragonflybsd"),
31 ("(sun|i86)os", "solaris"),
32 (r"x11|lin(\b|ux)?", "linux"),
33 (r"nintendo\s+wii", "wii"),
34 ("irix", "irix"),
35 ("hp-?ux", "hpux"),
36 ("aix", "aix"),
37 ("sco|unix_sv", "sco"),
38 ("bsd", "bsd"),
39 ("amiga", "amiga"),
40 ("blackberry|playbook", "blackberry"),
41 ("symbian", "symbian"),
42 )
43 browsers = (
44 ("googlebot", "google"),
45 ("msnbot", "msn"),
46 ("yahoo", "yahoo"),
47 ("ask jeeves", "ask"),
48 (r"aol|america\s+online\s+browser", "aol"),
49 (r"opera|opr", "opera"),
50 ("edge", "edge"),
51 ("chrome|crios", "chrome"),
52 ("seamonkey", "seamonkey"),
53 ("firefox|firebird|phoenix|iceweasel", "firefox"),
54 ("galeon", "galeon"),
55 ("safari|version", "safari"),
56 ("webkit", "webkit"),
57 ("camino", "camino"),
58 ("konqueror", "konqueror"),
59 ("k-meleon", "kmeleon"),
60 ("netscape", "netscape"),
61 (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
62 ("lynx", "lynx"),
63 ("links", "links"),
64 ("Baiduspider", "baidu"),
65 ("bingbot", "bing"),
66 ("mozilla", "mozilla"),
67 )
68
69 _browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
70 _language_re = re.compile(
71 r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
72 r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
73 )
74
75 def __init__(self):
76 self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
77 self.browsers = [
78 (b, re.compile(self._browser_version_re % a, re.I))
79 for a, b in self.browsers
80 ]
81
82 def __call__(self, user_agent):
83 for platform, regex in self.platforms: # noqa: B007
84 match = regex.search(user_agent)
85 if match is not None:
86 break
87 else:
88 platform = None
89 for browser, regex in self.browsers: # noqa: B007
90 match = regex.search(user_agent)
91 if match is not None:
92 version = match.group(1)
93 break
94 else:
95 browser = version = None
96 match = self._language_re.search(user_agent)
97 if match is not None:
98 language = match.group(1) or match.group(2)
99 else:
100 language = None
101 return platform, browser, version, language
102
103
104 class UserAgent(object):
105 """Represents a user agent. Pass it a WSGI environment or a user agent
106 string and you can inspect some of the details from the user agent
107 string via the attributes. The following attributes exist:
108
109 .. attribute:: string
110
111 the raw user agent string
112
113 .. attribute:: platform
114
115 the browser platform. ``None`` if not recognized.
116 The following platforms are currently recognized:
117
118 - `aix`
119 - `amiga`
120 - `android`
121 - `blackberry`
122 - `bsd`
123 - `chromeos`
124 - `dragonflybsd`
125 - `freebsd`
126 - `hpux`
127 - `ipad`
128 - `iphone`
129 - `irix`
130 - `linux`
131 - `macos`
132 - `netbsd`
133 - `openbsd`
134 - `sco`
135 - `solaris`
136 - `symbian`
137 - `wii`
138 - `windows`
139
140 .. attribute:: browser
141
142 the name of the browser. ``None`` if not recognized.
143 The following browsers are currently recognized:
144
145 - `aol` *
146 - `ask` *
147 - `baidu` *
148 - `bing` *
149 - `camino`
150 - `chrome`
151 - `edge`
152 - `firefox`
153 - `galeon`
154 - `google` *
155 - `kmeleon`
156 - `konqueror`
157 - `links`
158 - `lynx`
159 - `mozilla`
160 - `msie`
161 - `msn`
162 - `netscape`
163 - `opera`
164 - `safari`
165 - `seamonkey`
166 - `webkit`
167 - `yahoo` *
168
169 (Browsers marked with a star (``*``) are crawlers.)
170
171 .. attribute:: version
172
173 the version of the browser. ``None`` if not recognized.
174
175 .. attribute:: language
176
177 the language of the browser. ``None`` if not recognized.
178 """
179
180 _parser = UserAgentParser()
181
182 def __init__(self, environ_or_string):
183 if isinstance(environ_or_string, dict):
184 environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
185 self.string = environ_or_string
186 self.platform, self.browser, self.version, self.language = self._parser(
187 environ_or_string
188 )
189
190 def to_header(self):
191 return self.string
192
193 def __str__(self):
194 return self.string
195
196 def __nonzero__(self):
197 return bool(self.browser)
198
199 __bool__ = __nonzero__
200
201 def __repr__(self):
202 return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py
--- a/src/werkzeug/useragents.py
+++ b/src/werkzeug/useragents.py
@@ -47,7 +47,7 @@
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
(r"opera|opr", "opera"),
- ("edge", "edge"),
+ ("edge|edg", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
| {"golden_diff": "diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py\n--- a/src/werkzeug/useragents.py\n+++ b/src/werkzeug/useragents.py\n@@ -47,7 +47,7 @@\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n- (\"edge\", \"edge\"),\n+ (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n", "issue": "New Microsoft Edge User Agent\n## Background\r\nMicrosoft Edge now based on Chromium and the user agent string is updated.\r\n`Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68`\r\n\r\n## Simple Code\r\n```python\r\[email protected]('/browser')\r\ndef browser():\r\n from flask import request\r\n ua = request.user_agent\r\n return jsonify({\r\n 'browser': ua.browser,\r\n 'platform': ua.platform,\r\n 'user_agent': ua.string,\r\n 'version': ua.version,\r\n })\r\n```\r\n\r\n## Expected Result\r\n```json\r\n{\r\n \"browser\": \"edge\", \r\n \"platform\": \"windows\", \r\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68\", \r\n \"version\": \"81.0.416.68\"\r\n}\r\n```\r\n\r\n| Key | Value |\r\n| --- | --- |\r\n| browser | **edge** |\r\n| platform | windows |\r\n| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |\r\n| version | **81.0.416.68** |\r\n\r\n\r\n## Actual Result\r\n```json\r\n{\r\n \"browser\": \"chrome\", \r\n \"platform\": \"windows\", \r\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68\", \r\n \"version\": \"81.0.4044.129\"\r\n}\r\n```\r\n\r\n| Key | Value |\r\n| --- | --- |\r\n| browser | **chrome** |\r\n| platform | windows |\r\n| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |\r\n| version | **81.0.4044.129** |\r\n\r\n## Environment\r\n- Windows 10 Pro 1909\r\n- Python 3.6.6\r\n- Werkzeug 0.16.1\r\n- Flask 1.1.1\r\n\r\n### Related Issues\r\n#818, #1556\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n", "path": "src/werkzeug/useragents.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n", "path": "src/werkzeug/useragents.py"}]} |
gh_patches_debug_1429 | rasdani/github-patches | git_diff | ansible-collections__community.general-1082 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
solaris_zone: zone configuration fails with python3
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Explain the problem briefly below -->
Type error when trying to create a Solaris Zone with the `solaris_zone` module:
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
`solaris_zone`
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```paste below
ansible 2.10.1
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/jbronn/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/jbronn/.local/lib/python3.8/site-packages/ansible
executable location = /home/jbronn/.local/bin/ansible-playbook
python version = 3.8.5 (default, Jul 28 2020, 12:59:40) [GCC 9.3.0]
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
OmniOS CE r151034t (Illumos); target host Python is 3.7.5.
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: 'solaris zones'
hosts: all
become: true
tasks:
- solaris_zone:
name: z1
state: installed
path: /zones/z1
vars:
ansible_python_interpreter: '/usr/bin/python3'
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
The zone, `z1`, should be configured and installed.
##### ACTUAL RESULTS
Running the playbook produces this traceback:
```
Traceback (most recent call last):
File "/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py", line 102, in <module>
_ansiballz_main()
File "/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py", line 94, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py", line 40, in invoke_module
runpy.run_module(mod_name='ansible_collections.community.general.plugins.modules.solaris_zone', init_globals=None, run_name='__main__', alter_sys=True)
File "/usr/lib/python3.7/runpy.py", line 205, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib/python3.7/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 486, in <module>
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 468, in main
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 395, in state_present
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 202, in configure
File "/usr/lib/python3.7/tempfile.py", line 481, in func_wrapper
return func(*args, **kwargs)
TypeError: a bytes-like object is required, not 'str'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/system/solaris_zone.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2015, Paul Markham <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10 DOCUMENTATION = r'''
11 ---
12 module: solaris_zone
13 short_description: Manage Solaris zones
14 description:
15 - Create, start, stop and delete Solaris zones.
16 - This module does not currently allow changing of options for a zone that is already been created.
17 author:
18 - Paul Markham (@pmarkham)
19 requirements:
20 - Solaris 10 or 11
21 options:
22 state:
23 description:
24 - C(present), configure and install the zone.
25 - C(installed), synonym for C(present).
26 - C(running), if the zone already exists, boot it, otherwise, configure and install
27 the zone first, then boot it.
28 - C(started), synonym for C(running).
29 - C(stopped), shutdown a zone.
30 - C(absent), destroy the zone.
31 - C(configured), configure the ready so that it's to be attached.
32 - C(attached), attach a zone, but do not boot it.
33 - C(detached), shutdown and detach a zone
34 type: str
35 choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
36 default: present
37 required: true
38 name:
39 description:
40 - Zone name.
41 - A zone name must be unique name.
42 - A zone name must begin with an alpha-numeric character.
43 - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
44 - The name cannot be longer than 64 characters.
45 type: str
46 required: true
47 path:
48 description:
49 - The path where the zone will be created. This is required when the zone is created, but not
50 used otherwise.
51 type: str
52 sparse:
53 description:
54 - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
55 type: bool
56 default: no
57 root_password:
58 description:
59 - The password hash for the root account. If not specified, the zone's root account
60 will not have a password.
61 type: str
62 config:
63 description:
64 - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
65 and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
66 "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
67 type: str
68 default: ''
69 create_options:
70 description:
71 - 'Extra options to the zonecfg(1M) create command.'
72 type: str
73 default: ''
74 install_options:
75 description:
76 - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
77 use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
78 type: str
79 default: ''
80 attach_options:
81 description:
82 - 'Extra options to the zoneadm attach command. For example, this can be used to specify
83 whether a minimum or full update of packages is required and if any packages need to
84 be deleted. For valid values, see zoneadm(1M)'
85 type: str
86 default: ''
87 timeout:
88 description:
89 - Timeout, in seconds, for zone to boot.
90 type: int
91 default: 600
92 '''
93
94 EXAMPLES = '''
95 - name: Create and install a zone, but don't boot it
96 community.general.solaris_zone:
97 name: zone1
98 state: present
99 path: /zones/zone1
100 sparse: True
101 root_password: Be9oX7OSwWoU.
102 config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
103
104 - name: Create and install a zone and boot it
105 community.general.solaris_zone:
106 name: zone1
107 state: running
108 path: /zones/zone1
109 root_password: Be9oX7OSwWoU.
110 config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
111
112 - name: Boot an already installed zone
113 community.general.solaris_zone:
114 name: zone1
115 state: running
116
117 - name: Stop a zone
118 community.general.solaris_zone:
119 name: zone1
120 state: stopped
121
122 - name: Destroy a zone
123 community.general.solaris_zone:
124 name: zone1
125 state: absent
126
127 - name: Detach a zone
128 community.general.solaris_zone:
129 name: zone1
130 state: detached
131
132 - name: Configure a zone, ready to be attached
133 community.general.solaris_zone:
134 name: zone1
135 state: configured
136 path: /zones/zone1
137 root_password: Be9oX7OSwWoU.
138 config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
139
140 - name: Attach zone1
141 community.general.solaris_zone:
142 name: zone1
143 state: attached
144 attach_options: -u
145 '''
146
147 import os
148 import platform
149 import re
150 import tempfile
151 import time
152
153 from ansible.module_utils.basic import AnsibleModule
154
155
156 class Zone(object):
157 def __init__(self, module):
158 self.changed = False
159 self.msg = []
160
161 self.module = module
162 self.path = self.module.params['path']
163 self.name = self.module.params['name']
164 self.sparse = self.module.params['sparse']
165 self.root_password = self.module.params['root_password']
166 self.timeout = self.module.params['timeout']
167 self.config = self.module.params['config']
168 self.create_options = self.module.params['create_options']
169 self.install_options = self.module.params['install_options']
170 self.attach_options = self.module.params['attach_options']
171
172 self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
173 self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
174 self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
175
176 if self.module.check_mode:
177 self.msg.append('Running in check mode')
178
179 if platform.system() != 'SunOS':
180 self.module.fail_json(msg='This module requires Solaris')
181
182 (self.os_major, self.os_minor) = platform.release().split('.')
183 if int(self.os_minor) < 10:
184 self.module.fail_json(msg='This module requires Solaris 10 or later')
185
186 match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)
187 if not match:
188 self.module.fail_json(msg="Provided zone name is not a valid zone name. "
189 "Please refer documentation for correct zone name specifications.")
190
191 def configure(self):
192 if not self.path:
193 self.module.fail_json(msg='Missing required argument: path')
194
195 if not self.module.check_mode:
196 t = tempfile.NamedTemporaryFile(delete=False)
197
198 if self.sparse:
199 t.write('create %s\n' % self.create_options)
200 self.msg.append('creating sparse-root zone')
201 else:
202 t.write('create -b %s\n' % self.create_options)
203 self.msg.append('creating whole-root zone')
204
205 t.write('set zonepath=%s\n' % self.path)
206 t.write('%s\n' % self.config)
207 t.close()
208
209 cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
210 (rc, out, err) = self.module.run_command(cmd)
211 if rc != 0:
212 self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
213 os.unlink(t.name)
214
215 self.changed = True
216 self.msg.append('zone configured')
217
218 def install(self):
219 if not self.module.check_mode:
220 cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
221 (rc, out, err) = self.module.run_command(cmd)
222 if rc != 0:
223 self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
224 if int(self.os_minor) == 10:
225 self.configure_sysid()
226 self.configure_password()
227 self.configure_ssh_keys()
228 self.changed = True
229 self.msg.append('zone installed')
230
231 def uninstall(self):
232 if self.is_installed():
233 if not self.module.check_mode:
234 cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
235 (rc, out, err) = self.module.run_command(cmd)
236 if rc != 0:
237 self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
238 self.changed = True
239 self.msg.append('zone uninstalled')
240
241 def configure_sysid(self):
242 if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
243 os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
244
245 open('%s/root/noautoshutdown' % self.path, 'w').close()
246
247 node = open('%s/root/etc/nodename' % self.path, 'w')
248 node.write(self.name)
249 node.close()
250
251 id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
252 id.write('1 # System previously configured?\n')
253 id.write('1 # Bootparams succeeded?\n')
254 id.write('1 # System is on a network?\n')
255 id.write('1 # Extended network information gathered?\n')
256 id.write('0 # Autobinder succeeded?\n')
257 id.write('1 # Network has subnets?\n')
258 id.write('1 # root password prompted for?\n')
259 id.write('1 # locale and term prompted for?\n')
260 id.write('1 # security policy in place\n')
261 id.write('1 # NFSv4 domain configured\n')
262 id.write('0 # Auto Registration Configured\n')
263 id.write('vt100')
264 id.close()
265
266 def configure_ssh_keys(self):
267 rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
268 dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
269
270 if not os.path.isfile(rsa_key_file):
271 cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
272 (rc, out, err) = self.module.run_command(cmd)
273 if rc != 0:
274 self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
275
276 if not os.path.isfile(dsa_key_file):
277 cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
278 (rc, out, err) = self.module.run_command(cmd)
279 if rc != 0:
280 self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
281
282 def configure_password(self):
283 shadow = '%s/root/etc/shadow' % self.path
284 if self.root_password:
285 f = open(shadow, 'r')
286 lines = f.readlines()
287 f.close()
288
289 for i in range(0, len(lines)):
290 fields = lines[i].split(':')
291 if fields[0] == 'root':
292 fields[1] = self.root_password
293 lines[i] = ':'.join(fields)
294
295 f = open(shadow, 'w')
296 for line in lines:
297 f.write(line)
298 f.close()
299
300 def boot(self):
301 if not self.module.check_mode:
302 cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
303 (rc, out, err) = self.module.run_command(cmd)
304 if rc != 0:
305 self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
306
307 """
308 The boot command can return before the zone has fully booted. This is especially
309 true on the first boot when the zone initializes the SMF services. Unless the zone
310 has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
311 Wait until the zone's console login is running; once that's running, consider the zone booted.
312 """
313
314 elapsed = 0
315 while True:
316 if elapsed > self.timeout:
317 self.module.fail_json(msg='timed out waiting for zone to boot')
318 rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
319 if rc == 0:
320 break
321 time.sleep(10)
322 elapsed += 10
323 self.changed = True
324 self.msg.append('zone booted')
325
326 def destroy(self):
327 if self.is_running():
328 self.stop()
329 if self.is_installed():
330 self.uninstall()
331 if not self.module.check_mode:
332 cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
333 (rc, out, err) = self.module.run_command(cmd)
334 if rc != 0:
335 self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
336 self.changed = True
337 self.msg.append('zone deleted')
338
339 def stop(self):
340 if not self.module.check_mode:
341 cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
342 (rc, out, err) = self.module.run_command(cmd)
343 if rc != 0:
344 self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
345 self.changed = True
346 self.msg.append('zone stopped')
347
348 def detach(self):
349 if not self.module.check_mode:
350 cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
351 (rc, out, err) = self.module.run_command(cmd)
352 if rc != 0:
353 self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
354 self.changed = True
355 self.msg.append('zone detached')
356
357 def attach(self):
358 if not self.module.check_mode:
359 cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
360 (rc, out, err) = self.module.run_command(cmd)
361 if rc != 0:
362 self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
363 self.changed = True
364 self.msg.append('zone attached')
365
366 def exists(self):
367 cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
368 (rc, out, err) = self.module.run_command(cmd)
369 if rc == 0:
370 return True
371 else:
372 return False
373
374 def is_running(self):
375 return self.status() == 'running'
376
377 def is_installed(self):
378 return self.status() == 'installed'
379
380 def is_configured(self):
381 return self.status() == 'configured'
382
383 def status(self):
384 cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
385 (rc, out, err) = self.module.run_command(cmd)
386 if rc == 0:
387 return out.split(':')[2]
388 else:
389 return 'undefined'
390
391 def state_present(self):
392 if self.exists():
393 self.msg.append('zone already exists')
394 else:
395 self.configure()
396 self.install()
397
398 def state_running(self):
399 self.state_present()
400 if self.is_running():
401 self.msg.append('zone already running')
402 else:
403 self.boot()
404
405 def state_stopped(self):
406 if self.exists():
407 self.stop()
408 else:
409 self.module.fail_json(msg='zone does not exist')
410
411 def state_absent(self):
412 if self.exists():
413 if self.is_running():
414 self.stop()
415 self.destroy()
416 else:
417 self.msg.append('zone does not exist')
418
419 def state_configured(self):
420 if self.exists():
421 self.msg.append('zone already exists')
422 else:
423 self.configure()
424
425 def state_detached(self):
426 if not self.exists():
427 self.module.fail_json(msg='zone does not exist')
428 if self.is_configured():
429 self.msg.append('zone already detached')
430 else:
431 self.stop()
432 self.detach()
433
434 def state_attached(self):
435 if not self.exists():
436 self.msg.append('zone does not exist')
437 if self.is_configured():
438 self.attach()
439 else:
440 self.msg.append('zone already attached')
441
442
443 def main():
444 module = AnsibleModule(
445 argument_spec=dict(
446 name=dict(type='str', required=True),
447 state=dict(type='str', default='present',
448 choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),
449 path=dict(type='str'),
450 sparse=dict(type='bool', default=False),
451 root_password=dict(type='str', no_log=True),
452 timeout=dict(type='int', default=600),
453 config=dict(type='str', default=''),
454 create_options=dict(type='str', default=''),
455 install_options=dict(type='str', default=''),
456 attach_options=dict(type='str', default=''),
457 ),
458 supports_check_mode=True,
459 )
460
461 zone = Zone(module)
462
463 state = module.params['state']
464
465 if state == 'running' or state == 'started':
466 zone.state_running()
467 elif state == 'present' or state == 'installed':
468 zone.state_present()
469 elif state == 'stopped':
470 zone.state_stopped()
471 elif state == 'absent':
472 zone.state_absent()
473 elif state == 'configured':
474 zone.state_configured()
475 elif state == 'detached':
476 zone.state_detached()
477 elif state == 'attached':
478 zone.state_attached()
479 else:
480 module.fail_json(msg='Invalid state: %s' % state)
481
482 module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
483
484
485 if __name__ == '__main__':
486 main()
487
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/system/solaris_zone.py b/plugins/modules/system/solaris_zone.py
--- a/plugins/modules/system/solaris_zone.py
+++ b/plugins/modules/system/solaris_zone.py
@@ -193,7 +193,7 @@
self.module.fail_json(msg='Missing required argument: path')
if not self.module.check_mode:
- t = tempfile.NamedTemporaryFile(delete=False)
+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')
if self.sparse:
t.write('create %s\n' % self.create_options)
| {"golden_diff": "diff --git a/plugins/modules/system/solaris_zone.py b/plugins/modules/system/solaris_zone.py\n--- a/plugins/modules/system/solaris_zone.py\n+++ b/plugins/modules/system/solaris_zone.py\n@@ -193,7 +193,7 @@\n self.module.fail_json(msg='Missing required argument: path')\n \n if not self.module.check_mode:\n- t = tempfile.NamedTemporaryFile(delete=False)\n+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')\n \n if self.sparse:\n t.write('create %s\\n' % self.create_options)\n", "issue": "solaris_zone: zone configuration fails with python3\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly below -->\r\n\r\nType error when trying to create a Solaris Zone with the `solaris_zone` module:\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->\r\n\r\n`solaris_zone`\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes -->\r\n```paste below\r\nansible 2.10.1\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = ['/home/jbronn/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/jbronn/.local/lib/python3.8/site-packages/ansible\r\n executable location = /home/jbronn/.local/bin/ansible-playbook\r\n python version = 3.8.5 (default, Jul 28 2020, 12:59:40) [GCC 9.3.0]\r\n```\r\n\r\n##### CONFIGURATION\r\nN/A\r\n\r\n##### OS / ENVIRONMENT\r\nOmniOS CE r151034t (Illumos); target host Python is 3.7.5.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: 'solaris zones'\r\n hosts: all\r\n become: true\r\n tasks:\r\n - solaris_zone:\r\n name: z1\r\n state: installed\r\n path: /zones/z1\r\n vars:\r\n ansible_python_interpreter: '/usr/bin/python3'\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\nThe zone, `z1`, should be configured and installed.\r\n\r\n##### ACTUAL RESULTS\r\nRunning the playbook produces this traceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py\", line 102, in <module>\r\n _ansiballz_main()\r\n File \"/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py\", line 94, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \"/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py\", line 40, in invoke_module\r\n runpy.run_module(mod_name='ansible_collections.community.general.plugins.modules.solaris_zone', init_globals=None, run_name='__main__', alter_sys=True)\r\n File \"/usr/lib/python3.7/runpy.py\", line 205, in run_module\r\n return _run_module_code(code, init_globals, run_name, mod_spec)\r\n File \"/usr/lib/python3.7/runpy.py\", line 96, in _run_module_code\r\n mod_name, mod_spec, pkg_name, script_name)\r\n File \"/usr/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py\", line 486, in <module>\r\n File \"/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py\", line 468, in main\r\n File \"/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py\", line 395, in state_present\r\n File \"/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py\", line 202, in configure\r\n File \"/usr/lib/python3.7/tempfile.py\", line 481, in func_wrapper\r\n return func(*args, **kwargs)\r\nTypeError: a bytes-like object is required, not 'str'\r\n```\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2015, Paul Markham <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: solaris_zone\nshort_description: Manage Solaris zones\ndescription:\n - Create, start, stop and delete Solaris zones.\n - This module does not currently allow changing of options for a zone that is already been created.\nauthor:\n- Paul Markham (@pmarkham)\nrequirements:\n - Solaris 10 or 11\noptions:\n state:\n description:\n - C(present), configure and install the zone.\n - C(installed), synonym for C(present).\n - C(running), if the zone already exists, boot it, otherwise, configure and install\n the zone first, then boot it.\n - C(started), synonym for C(running).\n - C(stopped), shutdown a zone.\n - C(absent), destroy the zone.\n - C(configured), configure the ready so that it's to be attached.\n - C(attached), attach a zone, but do not boot it.\n - C(detached), shutdown and detach a zone\n type: str\n choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]\n default: present\n required: true\n name:\n description:\n - Zone name.\n - A zone name must be unique name.\n - A zone name must begin with an alpha-numeric character.\n - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).\n - The name cannot be longer than 64 characters.\n type: str\n required: true\n path:\n description:\n - The path where the zone will be created. This is required when the zone is created, but not\n used otherwise.\n type: str\n sparse:\n description:\n - Whether to create a sparse (C(true)) or whole root (C(false)) zone.\n type: bool\n default: no\n root_password:\n description:\n - The password hash for the root account. If not specified, the zone's root account\n will not have a password.\n type: str\n config:\n description:\n - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options\n and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.\n \"set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end\"'\n type: str\n default: ''\n create_options:\n description:\n - 'Extra options to the zonecfg(1M) create command.'\n type: str\n default: ''\n install_options:\n description:\n - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,\n use this to specify the profile XML file, e.g. install_options=\"-c sc_profile.xml\"'\n type: str\n default: ''\n attach_options:\n description:\n - 'Extra options to the zoneadm attach command. For example, this can be used to specify\n whether a minimum or full update of packages is required and if any packages need to\n be deleted. For valid values, see zoneadm(1M)'\n type: str\n default: ''\n timeout:\n description:\n - Timeout, in seconds, for zone to boot.\n type: int\n default: 600\n'''\n\nEXAMPLES = '''\n- name: Create and install a zone, but don't boot it\n community.general.solaris_zone:\n name: zone1\n state: present\n path: /zones/zone1\n sparse: True\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Create and install a zone and boot it\n community.general.solaris_zone:\n name: zone1\n state: running\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Boot an already installed zone\n community.general.solaris_zone:\n name: zone1\n state: running\n\n- name: Stop a zone\n community.general.solaris_zone:\n name: zone1\n state: stopped\n\n- name: Destroy a zone\n community.general.solaris_zone:\n name: zone1\n state: absent\n\n- name: Detach a zone\n community.general.solaris_zone:\n name: zone1\n state: detached\n\n- name: Configure a zone, ready to be attached\n community.general.solaris_zone:\n name: zone1\n state: configured\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Attach zone1\n community.general.solaris_zone:\n name: zone1\n state: attached\n attach_options: -u\n'''\n\nimport os\nimport platform\nimport re\nimport tempfile\nimport time\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Zone(object):\n def __init__(self, module):\n self.changed = False\n self.msg = []\n\n self.module = module\n self.path = self.module.params['path']\n self.name = self.module.params['name']\n self.sparse = self.module.params['sparse']\n self.root_password = self.module.params['root_password']\n self.timeout = self.module.params['timeout']\n self.config = self.module.params['config']\n self.create_options = self.module.params['create_options']\n self.install_options = self.module.params['install_options']\n self.attach_options = self.module.params['attach_options']\n\n self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)\n self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)\n self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)\n\n if self.module.check_mode:\n self.msg.append('Running in check mode')\n\n if platform.system() != 'SunOS':\n self.module.fail_json(msg='This module requires Solaris')\n\n (self.os_major, self.os_minor) = platform.release().split('.')\n if int(self.os_minor) < 10:\n self.module.fail_json(msg='This module requires Solaris 10 or later')\n\n match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)\n if not match:\n self.module.fail_json(msg=\"Provided zone name is not a valid zone name. \"\n \"Please refer documentation for correct zone name specifications.\")\n\n def configure(self):\n if not self.path:\n self.module.fail_json(msg='Missing required argument: path')\n\n if not self.module.check_mode:\n t = tempfile.NamedTemporaryFile(delete=False)\n\n if self.sparse:\n t.write('create %s\\n' % self.create_options)\n self.msg.append('creating sparse-root zone')\n else:\n t.write('create -b %s\\n' % self.create_options)\n self.msg.append('creating whole-root zone')\n\n t.write('set zonepath=%s\\n' % self.path)\n t.write('%s\\n' % self.config)\n t.close()\n\n cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create zone. %s' % (out + err))\n os.unlink(t.name)\n\n self.changed = True\n self.msg.append('zone configured')\n\n def install(self):\n if not self.module.check_mode:\n cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to install zone. %s' % (out + err))\n if int(self.os_minor) == 10:\n self.configure_sysid()\n self.configure_password()\n self.configure_ssh_keys()\n self.changed = True\n self.msg.append('zone installed')\n\n def uninstall(self):\n if self.is_installed():\n if not self.module.check_mode:\n cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone uninstalled')\n\n def configure_sysid(self):\n if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):\n os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)\n\n open('%s/root/noautoshutdown' % self.path, 'w').close()\n\n node = open('%s/root/etc/nodename' % self.path, 'w')\n node.write(self.name)\n node.close()\n\n id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')\n id.write('1 # System previously configured?\\n')\n id.write('1 # Bootparams succeeded?\\n')\n id.write('1 # System is on a network?\\n')\n id.write('1 # Extended network information gathered?\\n')\n id.write('0 # Autobinder succeeded?\\n')\n id.write('1 # Network has subnets?\\n')\n id.write('1 # root password prompted for?\\n')\n id.write('1 # locale and term prompted for?\\n')\n id.write('1 # security policy in place\\n')\n id.write('1 # NFSv4 domain configured\\n')\n id.write('0 # Auto Registration Configured\\n')\n id.write('vt100')\n id.close()\n\n def configure_ssh_keys(self):\n rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path\n dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path\n\n if not os.path.isfile(rsa_key_file):\n cmd = '%s -f %s -t rsa -N \"\"' % (self.ssh_keygen_cmd, rsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))\n\n if not os.path.isfile(dsa_key_file):\n cmd = '%s -f %s -t dsa -N \"\"' % (self.ssh_keygen_cmd, dsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))\n\n def configure_password(self):\n shadow = '%s/root/etc/shadow' % self.path\n if self.root_password:\n f = open(shadow, 'r')\n lines = f.readlines()\n f.close()\n\n for i in range(0, len(lines)):\n fields = lines[i].split(':')\n if fields[0] == 'root':\n fields[1] = self.root_password\n lines[i] = ':'.join(fields)\n\n f = open(shadow, 'w')\n for line in lines:\n f.write(line)\n f.close()\n\n def boot(self):\n if not self.module.check_mode:\n cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))\n\n \"\"\"\n The boot command can return before the zone has fully booted. This is especially\n true on the first boot when the zone initializes the SMF services. Unless the zone\n has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.\n Wait until the zone's console login is running; once that's running, consider the zone booted.\n \"\"\"\n\n elapsed = 0\n while True:\n if elapsed > self.timeout:\n self.module.fail_json(msg='timed out waiting for zone to boot')\n rc = os.system('ps -z %s -o args|grep \"ttymon.*-d /dev/console\" > /dev/null 2>/dev/null' % self.name)\n if rc == 0:\n break\n time.sleep(10)\n elapsed += 10\n self.changed = True\n self.msg.append('zone booted')\n\n def destroy(self):\n if self.is_running():\n self.stop()\n if self.is_installed():\n self.uninstall()\n if not self.module.check_mode:\n cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone deleted')\n\n def stop(self):\n if not self.module.check_mode:\n cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone stopped')\n\n def detach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone detached')\n\n def attach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone attached')\n\n def exists(self):\n cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return True\n else:\n return False\n\n def is_running(self):\n return self.status() == 'running'\n\n def is_installed(self):\n return self.status() == 'installed'\n\n def is_configured(self):\n return self.status() == 'configured'\n\n def status(self):\n cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return out.split(':')[2]\n else:\n return 'undefined'\n\n def state_present(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n self.install()\n\n def state_running(self):\n self.state_present()\n if self.is_running():\n self.msg.append('zone already running')\n else:\n self.boot()\n\n def state_stopped(self):\n if self.exists():\n self.stop()\n else:\n self.module.fail_json(msg='zone does not exist')\n\n def state_absent(self):\n if self.exists():\n if self.is_running():\n self.stop()\n self.destroy()\n else:\n self.msg.append('zone does not exist')\n\n def state_configured(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n\n def state_detached(self):\n if not self.exists():\n self.module.fail_json(msg='zone does not exist')\n if self.is_configured():\n self.msg.append('zone already detached')\n else:\n self.stop()\n self.detach()\n\n def state_attached(self):\n if not self.exists():\n self.msg.append('zone does not exist')\n if self.is_configured():\n self.attach()\n else:\n self.msg.append('zone already attached')\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(type='str', required=True),\n state=dict(type='str', default='present',\n choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),\n path=dict(type='str'),\n sparse=dict(type='bool', default=False),\n root_password=dict(type='str', no_log=True),\n timeout=dict(type='int', default=600),\n config=dict(type='str', default=''),\n create_options=dict(type='str', default=''),\n install_options=dict(type='str', default=''),\n attach_options=dict(type='str', default=''),\n ),\n supports_check_mode=True,\n )\n\n zone = Zone(module)\n\n state = module.params['state']\n\n if state == 'running' or state == 'started':\n zone.state_running()\n elif state == 'present' or state == 'installed':\n zone.state_present()\n elif state == 'stopped':\n zone.state_stopped()\n elif state == 'absent':\n zone.state_absent()\n elif state == 'configured':\n zone.state_configured()\n elif state == 'detached':\n zone.state_detached()\n elif state == 'attached':\n zone.state_attached()\n else:\n module.fail_json(msg='Invalid state: %s' % state)\n\n module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/solaris_zone.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2015, Paul Markham <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: solaris_zone\nshort_description: Manage Solaris zones\ndescription:\n - Create, start, stop and delete Solaris zones.\n - This module does not currently allow changing of options for a zone that is already been created.\nauthor:\n- Paul Markham (@pmarkham)\nrequirements:\n - Solaris 10 or 11\noptions:\n state:\n description:\n - C(present), configure and install the zone.\n - C(installed), synonym for C(present).\n - C(running), if the zone already exists, boot it, otherwise, configure and install\n the zone first, then boot it.\n - C(started), synonym for C(running).\n - C(stopped), shutdown a zone.\n - C(absent), destroy the zone.\n - C(configured), configure the ready so that it's to be attached.\n - C(attached), attach a zone, but do not boot it.\n - C(detached), shutdown and detach a zone\n type: str\n choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]\n default: present\n required: true\n name:\n description:\n - Zone name.\n - A zone name must be unique name.\n - A zone name must begin with an alpha-numeric character.\n - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).\n - The name cannot be longer than 64 characters.\n type: str\n required: true\n path:\n description:\n - The path where the zone will be created. This is required when the zone is created, but not\n used otherwise.\n type: str\n sparse:\n description:\n - Whether to create a sparse (C(true)) or whole root (C(false)) zone.\n type: bool\n default: no\n root_password:\n description:\n - The password hash for the root account. If not specified, the zone's root account\n will not have a password.\n type: str\n config:\n description:\n - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options\n and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.\n \"set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end\"'\n type: str\n default: ''\n create_options:\n description:\n - 'Extra options to the zonecfg(1M) create command.'\n type: str\n default: ''\n install_options:\n description:\n - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,\n use this to specify the profile XML file, e.g. install_options=\"-c sc_profile.xml\"'\n type: str\n default: ''\n attach_options:\n description:\n - 'Extra options to the zoneadm attach command. For example, this can be used to specify\n whether a minimum or full update of packages is required and if any packages need to\n be deleted. For valid values, see zoneadm(1M)'\n type: str\n default: ''\n timeout:\n description:\n - Timeout, in seconds, for zone to boot.\n type: int\n default: 600\n'''\n\nEXAMPLES = '''\n- name: Create and install a zone, but don't boot it\n community.general.solaris_zone:\n name: zone1\n state: present\n path: /zones/zone1\n sparse: True\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Create and install a zone and boot it\n community.general.solaris_zone:\n name: zone1\n state: running\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Boot an already installed zone\n community.general.solaris_zone:\n name: zone1\n state: running\n\n- name: Stop a zone\n community.general.solaris_zone:\n name: zone1\n state: stopped\n\n- name: Destroy a zone\n community.general.solaris_zone:\n name: zone1\n state: absent\n\n- name: Detach a zone\n community.general.solaris_zone:\n name: zone1\n state: detached\n\n- name: Configure a zone, ready to be attached\n community.general.solaris_zone:\n name: zone1\n state: configured\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Attach zone1\n community.general.solaris_zone:\n name: zone1\n state: attached\n attach_options: -u\n'''\n\nimport os\nimport platform\nimport re\nimport tempfile\nimport time\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Zone(object):\n def __init__(self, module):\n self.changed = False\n self.msg = []\n\n self.module = module\n self.path = self.module.params['path']\n self.name = self.module.params['name']\n self.sparse = self.module.params['sparse']\n self.root_password = self.module.params['root_password']\n self.timeout = self.module.params['timeout']\n self.config = self.module.params['config']\n self.create_options = self.module.params['create_options']\n self.install_options = self.module.params['install_options']\n self.attach_options = self.module.params['attach_options']\n\n self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)\n self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)\n self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)\n\n if self.module.check_mode:\n self.msg.append('Running in check mode')\n\n if platform.system() != 'SunOS':\n self.module.fail_json(msg='This module requires Solaris')\n\n (self.os_major, self.os_minor) = platform.release().split('.')\n if int(self.os_minor) < 10:\n self.module.fail_json(msg='This module requires Solaris 10 or later')\n\n match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)\n if not match:\n self.module.fail_json(msg=\"Provided zone name is not a valid zone name. \"\n \"Please refer documentation for correct zone name specifications.\")\n\n def configure(self):\n if not self.path:\n self.module.fail_json(msg='Missing required argument: path')\n\n if not self.module.check_mode:\n t = tempfile.NamedTemporaryFile(delete=False, mode='wt')\n\n if self.sparse:\n t.write('create %s\\n' % self.create_options)\n self.msg.append('creating sparse-root zone')\n else:\n t.write('create -b %s\\n' % self.create_options)\n self.msg.append('creating whole-root zone')\n\n t.write('set zonepath=%s\\n' % self.path)\n t.write('%s\\n' % self.config)\n t.close()\n\n cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create zone. %s' % (out + err))\n os.unlink(t.name)\n\n self.changed = True\n self.msg.append('zone configured')\n\n def install(self):\n if not self.module.check_mode:\n cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to install zone. %s' % (out + err))\n if int(self.os_minor) == 10:\n self.configure_sysid()\n self.configure_password()\n self.configure_ssh_keys()\n self.changed = True\n self.msg.append('zone installed')\n\n def uninstall(self):\n if self.is_installed():\n if not self.module.check_mode:\n cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone uninstalled')\n\n def configure_sysid(self):\n if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):\n os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)\n\n open('%s/root/noautoshutdown' % self.path, 'w').close()\n\n node = open('%s/root/etc/nodename' % self.path, 'w')\n node.write(self.name)\n node.close()\n\n id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')\n id.write('1 # System previously configured?\\n')\n id.write('1 # Bootparams succeeded?\\n')\n id.write('1 # System is on a network?\\n')\n id.write('1 # Extended network information gathered?\\n')\n id.write('0 # Autobinder succeeded?\\n')\n id.write('1 # Network has subnets?\\n')\n id.write('1 # root password prompted for?\\n')\n id.write('1 # locale and term prompted for?\\n')\n id.write('1 # security policy in place\\n')\n id.write('1 # NFSv4 domain configured\\n')\n id.write('0 # Auto Registration Configured\\n')\n id.write('vt100')\n id.close()\n\n def configure_ssh_keys(self):\n rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path\n dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path\n\n if not os.path.isfile(rsa_key_file):\n cmd = '%s -f %s -t rsa -N \"\"' % (self.ssh_keygen_cmd, rsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))\n\n if not os.path.isfile(dsa_key_file):\n cmd = '%s -f %s -t dsa -N \"\"' % (self.ssh_keygen_cmd, dsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))\n\n def configure_password(self):\n shadow = '%s/root/etc/shadow' % self.path\n if self.root_password:\n f = open(shadow, 'r')\n lines = f.readlines()\n f.close()\n\n for i in range(0, len(lines)):\n fields = lines[i].split(':')\n if fields[0] == 'root':\n fields[1] = self.root_password\n lines[i] = ':'.join(fields)\n\n f = open(shadow, 'w')\n for line in lines:\n f.write(line)\n f.close()\n\n def boot(self):\n if not self.module.check_mode:\n cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))\n\n \"\"\"\n The boot command can return before the zone has fully booted. This is especially\n true on the first boot when the zone initializes the SMF services. Unless the zone\n has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.\n Wait until the zone's console login is running; once that's running, consider the zone booted.\n \"\"\"\n\n elapsed = 0\n while True:\n if elapsed > self.timeout:\n self.module.fail_json(msg='timed out waiting for zone to boot')\n rc = os.system('ps -z %s -o args|grep \"ttymon.*-d /dev/console\" > /dev/null 2>/dev/null' % self.name)\n if rc == 0:\n break\n time.sleep(10)\n elapsed += 10\n self.changed = True\n self.msg.append('zone booted')\n\n def destroy(self):\n if self.is_running():\n self.stop()\n if self.is_installed():\n self.uninstall()\n if not self.module.check_mode:\n cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone deleted')\n\n def stop(self):\n if not self.module.check_mode:\n cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone stopped')\n\n def detach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone detached')\n\n def attach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone attached')\n\n def exists(self):\n cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return True\n else:\n return False\n\n def is_running(self):\n return self.status() == 'running'\n\n def is_installed(self):\n return self.status() == 'installed'\n\n def is_configured(self):\n return self.status() == 'configured'\n\n def status(self):\n cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return out.split(':')[2]\n else:\n return 'undefined'\n\n def state_present(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n self.install()\n\n def state_running(self):\n self.state_present()\n if self.is_running():\n self.msg.append('zone already running')\n else:\n self.boot()\n\n def state_stopped(self):\n if self.exists():\n self.stop()\n else:\n self.module.fail_json(msg='zone does not exist')\n\n def state_absent(self):\n if self.exists():\n if self.is_running():\n self.stop()\n self.destroy()\n else:\n self.msg.append('zone does not exist')\n\n def state_configured(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n\n def state_detached(self):\n if not self.exists():\n self.module.fail_json(msg='zone does not exist')\n if self.is_configured():\n self.msg.append('zone already detached')\n else:\n self.stop()\n self.detach()\n\n def state_attached(self):\n if not self.exists():\n self.msg.append('zone does not exist')\n if self.is_configured():\n self.attach()\n else:\n self.msg.append('zone already attached')\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(type='str', required=True),\n state=dict(type='str', default='present',\n choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),\n path=dict(type='str'),\n sparse=dict(type='bool', default=False),\n root_password=dict(type='str', no_log=True),\n timeout=dict(type='int', default=600),\n config=dict(type='str', default=''),\n create_options=dict(type='str', default=''),\n install_options=dict(type='str', default=''),\n attach_options=dict(type='str', default=''),\n ),\n supports_check_mode=True,\n )\n\n zone = Zone(module)\n\n state = module.params['state']\n\n if state == 'running' or state == 'started':\n zone.state_running()\n elif state == 'present' or state == 'installed':\n zone.state_present()\n elif state == 'stopped':\n zone.state_stopped()\n elif state == 'absent':\n zone.state_absent()\n elif state == 'configured':\n zone.state_configured()\n elif state == 'detached':\n zone.state_detached()\n elif state == 'attached':\n zone.state_attached()\n else:\n module.fail_json(msg='Invalid state: %s' % state)\n\n module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/solaris_zone.py"}]} |
gh_patches_debug_1430 | rasdani/github-patches | git_diff | pymedusa__Medusa-6208 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[APP SUBMITTED]: TypeError: cannot use a string pattern on a bytes-like object
### INFO
**Python Version**: `3.7.2 (default, Jan 3 2019, 02:55:40) [GCC 8.2.0]`
**Operating System**: `Linux-4.9.35-v7+-armv7l-with-debian-buster-sid`
**Locale**: `UTF-8`
**Branch**: [develop](../tree/develop)
**Database**: `44.14`
**Commit**: pymedusa/Medusa@18bd87dded99e1ecfbeae7757e226ea5510e0f96
**Link to Log**: https://gist.github.com/4421b6f5dd716b24746e97ed3008b0c4
### ERROR
<pre>
2019-02-10 19:30:40 ERROR SNATCHQUEUE-SNATCH-526 :: [18bd87d] Snatch failed! For result: The.Office.(US).S03.1080p.WEB-DL.AAC2.0.AVC-TrollHD
Traceback (most recent call last):
File "/home/pi/Medusa/<a href="../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/search/queue.py#L503">medusa/search/queue.py</a>", line 503, in run
self.success = snatch_episode(result)
File "/home/pi/Medusa/<a href="../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/search/core.py#L132">medusa/search/core.py</a>", line 132, in snatch_episode
nzb_data = result.provider.download_nzb_for_post(result)
File "/home/pi/Medusa/<a href="../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/providers/nzb/binsearch.py#L275">medusa/providers/nzb/binsearch.py</a>", line 275, in download_nzb_for_post
if not BinSearchProvider.nzb_check_segment.search(response.content):
TypeError: cannot use a string pattern on a bytes-like object
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/providers/nzb/binsearch.py`
Content:
```
1 # coding=utf-8
2
3 """Provider code for Binsearch provider."""
4
5 from __future__ import unicode_literals
6
7 import logging
8 import re
9 from builtins import zip
10 from os.path import join
11
12 from medusa import tv
13 from medusa.bs4_parser import BS4Parser
14 from medusa.helper.common import convert_size, sanitize_filename
15 from medusa.helpers import download_file
16 from medusa.logger.adapters.style import BraceAdapter
17 from medusa.providers.nzb.nzb_provider import NZBProvider
18
19 from requests.compat import urljoin
20
21 log = BraceAdapter(logging.getLogger(__name__))
22 log.logger.addHandler(logging.NullHandler())
23
24
25 class BinSearchProvider(NZBProvider):
26 """BinSearch Newznab provider."""
27
28 size_regex = re.compile(r'size: (\d+\.\d+\xa0\w{2}), parts', re.I)
29 title_regex = re.compile(r'\"([^\"]+)"', re.I)
30 title_reqex_clean = re.compile(r'^[ \d_]+ (.+)')
31 title_regex_rss = re.compile(r'- \"([^\"]+)"', re.I)
32 nzb_check_segment = re.compile(r'<segment bytes="[\d]+"')
33
34 def __init__(self):
35 """Initialize the class."""
36 super(BinSearchProvider, self).__init__('BinSearch')
37
38 # Credentials
39 self.public = True
40
41 # URLs
42 self.url = 'https://www.binsearch.info'
43 self.urls = {
44 'search': urljoin(self.url, 'index.php'),
45 'rss': urljoin(self.url, 'browse.php'),
46 }
47
48 # Proper Strings
49 self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
50
51 # Miscellaneous Options
52
53 # Cache
54 self.cache = tv.Cache(self, min_time=10)
55
56 def search(self, search_strings, **kwargs):
57 """
58 Search a provider and parse the results.
59
60 :param search_strings: A dict with mode (key) and the search value (value)
61 :returns: A list of search results (structure)
62 """
63 results = []
64 search_params = {
65 'adv_age': '',
66 'xminsize': 20,
67 'max': 250,
68 }
69 groups = [1, 2]
70
71 for mode in search_strings:
72 log.debug('Search mode: {0}', mode)
73 # https://www.binsearch.info/browse.php?bg=alt.binaries.teevee&server=2
74 for search_string in search_strings[mode]:
75 search_params['q'] = search_string
76 for group in groups:
77 # Try both 'search in the most popular groups' & 'search in the other groups' modes
78 search_params['server'] = group
79 if mode != 'RSS':
80 log.debug('Search string: {search}', {'search': search_string})
81 search_url = self.urls['search']
82 else:
83 search_params = {
84 'bg': 'alt.binaries.teevee',
85 'server': 2,
86 'max': 50,
87 }
88 search_url = self.urls['rss']
89
90 response = self.session.get(search_url, params=search_params)
91 if not response or not response.text:
92 log.debug('No data returned from provider')
93 continue
94
95 results += self.parse(response.text, mode)
96
97 return results
98
99 def parse(self, data, mode):
100 """
101 Parse search results for items.
102
103 :param data: The raw response from a search
104 :param mode: The current mode used to search, e.g. RSS
105
106 :return: A list of items found
107 """
108 def process_column_header(td):
109 return td.get_text(strip=True).lower()
110
111 items = []
112
113 with BS4Parser(data, 'html5lib') as html:
114
115 # We need to store the post url, to be used with every result later on.
116 post_url = html.find('form', {'method': 'post'})['action']
117
118 table = html.find('table', class_='xMenuT')
119 rows = table('tr') if table else []
120 row_offset = 1
121 if not rows or not len(rows) - row_offset:
122 log.debug('Data returned from provider does not contain any torrents')
123 return items
124
125 headers = rows[0]('th')
126 # 0, 1, subject, poster, group, age
127 labels = [process_column_header(header) or idx
128 for idx, header in enumerate(headers)]
129
130 # Skip column headers
131 rows = rows[row_offset:]
132 for row in rows:
133 try:
134 col = dict(list(zip(labels, row('td'))))
135 nzb_id_input = col[0 if mode == 'RSS' else 1].find('input')
136 if not nzb_id_input:
137 continue
138 nzb_id = nzb_id_input['name']
139 # Try and get the the article subject from the weird binsearch format
140 title = self.clean_title(col['subject'].text, mode)
141
142 except AttributeError:
143 log.debug('Parsing rows, that may not always have useful info. Skipping to next.')
144 continue
145 if not all([title, nzb_id]):
146 continue
147
148 # Obtain the size from the 'description'
149 size_field = BinSearchProvider.size_regex.search(col['subject'].text)
150 if size_field:
151 size_field = size_field.group(1)
152 size = convert_size(size_field, sep='\xa0') or -1
153 size = int(size)
154
155 download_url = urljoin(self.url, '{post_url}|nzb_id={nzb_id}'.format(post_url=post_url, nzb_id=nzb_id))
156
157 # For future use
158 # detail_url = 'https://www.binsearch.info/?q={0}'.format(title)
159 human_time = True
160 date = col['age' if mode != 'RSS' else 'date'].get_text(strip=True).replace('-', ' ')
161 if mode == 'RSS':
162 human_time = False
163 pubdate_raw = date
164 pubdate = self.parse_pubdate(pubdate_raw, human_time=human_time)
165
166 item = {
167 'title': title,
168 'link': download_url,
169 'size': size,
170 'pubdate': pubdate,
171 }
172 if mode != 'RSS':
173 log.debug('Found result: {0}', title)
174
175 items.append(item)
176
177 return items
178
179 @staticmethod
180 def clean_title(title, mode):
181 """
182 Clean title field, using a series of regex.
183
184 RSS search requires different cleaning then the other searches.
185 When adding to this function, make sure you update the tests.
186 """
187 try:
188 if mode == 'RSS':
189 title = BinSearchProvider.title_regex_rss.search(title).group(1)
190 else:
191 title = BinSearchProvider.title_regex.search(title).group(1)
192 if BinSearchProvider.title_reqex_clean.search(title):
193 title = BinSearchProvider.title_reqex_clean.search(title).group(1)
194 for extension in ('.nfo', '.par2', '.rar', '.zip', '.nzb', '.part'):
195 # Strip extensions that aren't part of the file name
196 if title.endswith(extension):
197 title = title[:len(title) - len(extension)]
198 return title
199 except AttributeError:
200 return None
201
202 def download_result(self, result):
203 """
204 Download result from provider.
205
206 This is used when a blackhole is used for sending the nzb file to the nzb client.
207 For now the url and the post data is stored as one string in the db, using a pipe (|) to separate them.
208
209 :param result: A SearchResult object.
210 :return: The result of the nzb download (True/False).
211 """
212 if not self.login():
213 return False
214
215 result_name = sanitize_filename(result.name)
216 filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)
217
218 if result.url.startswith('http'):
219 self.session.headers.update({
220 'Referer': '/'.join(result.url.split('/')[:3]) + '/'
221 })
222
223 log.info('Downloading {result} from {provider} at {url}',
224 {'result': result.name, 'provider': self.name, 'url': result.url})
225
226 verify = False if self.public else None
227
228 url, data = result.url.split('|')
229
230 data = {
231 data.split('=')[1]: 'on',
232 'action': 'nzb',
233 }
234
235 if download_file(url, filename, method='POST', data=data, session=self.session,
236 headers=self.headers, verify=verify):
237
238 if self._verify_download(filename):
239 log.info('Saved {result} to {location}',
240 {'result': result.name, 'location': filename})
241 return True
242
243 return False
244
245 def download_nzb_for_post(self, result):
246 """
247 Download the nzb content, prior to sending it to the nzb download client.
248
249 :param result: Nzb SearchResult object.
250 :return: The content of the nzb file if successful else None.
251 """
252 if not self.login():
253 return False
254
255 # For now to separate the url and the post data, where splitting it with a pipe.
256 url, data = result.url.split('|')
257
258 data = {
259 data.split('=')[1]: 'on',
260 'action': 'nzb',
261 }
262
263 log.info('Downloading {result} from {provider} at {url} and data {data}',
264 {'result': result.name, 'provider': self.name, 'url': result.url, 'data': data})
265
266 verify = False if self.public else None
267
268 response = self.session.post(url, data=data, headers=self.session.headers,
269 verify=verify, hooks={}, allow_redirects=True)
270 if not response or not response.content:
271 log.warning('Failed to download the NZB from BinSearch')
272 return None
273
274 # Validate that the result has the content of a valid nzb.
275 if not BinSearchProvider.nzb_check_segment.search(response.content):
276 log.warning('Result returned from BinSearch was not a valid NZB')
277 return None
278
279 return response.content
280
281 def _get_size(self, item):
282 """
283 Get result size.
284
285 Overwrite this, as the default _get_size() from nzb_provider isn't working for us.
286 :param item:
287 :return: size in bytes or -1
288 """
289 return item.get('size', -1)
290
291
292 provider = BinSearchProvider()
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/providers/nzb/binsearch.py b/medusa/providers/nzb/binsearch.py
--- a/medusa/providers/nzb/binsearch.py
+++ b/medusa/providers/nzb/binsearch.py
@@ -272,7 +272,7 @@
return None
# Validate that the result has the content of a valid nzb.
- if not BinSearchProvider.nzb_check_segment.search(response.content):
+ if not BinSearchProvider.nzb_check_segment.search(response.text):
log.warning('Result returned from BinSearch was not a valid NZB')
return None
| {"golden_diff": "diff --git a/medusa/providers/nzb/binsearch.py b/medusa/providers/nzb/binsearch.py\n--- a/medusa/providers/nzb/binsearch.py\n+++ b/medusa/providers/nzb/binsearch.py\n@@ -272,7 +272,7 @@\n return None\n \n # Validate that the result has the content of a valid nzb.\n- if not BinSearchProvider.nzb_check_segment.search(response.content):\n+ if not BinSearchProvider.nzb_check_segment.search(response.text):\n log.warning('Result returned from BinSearch was not a valid NZB')\n return None\n", "issue": "[APP SUBMITTED]: TypeError: cannot use a string pattern on a bytes-like object\n\n### INFO\n**Python Version**: `3.7.2 (default, Jan 3 2019, 02:55:40) [GCC 8.2.0]`\n**Operating System**: `Linux-4.9.35-v7+-armv7l-with-debian-buster-sid`\n**Locale**: `UTF-8`\n**Branch**: [develop](../tree/develop)\n**Database**: `44.14`\n**Commit**: pymedusa/Medusa@18bd87dded99e1ecfbeae7757e226ea5510e0f96\n**Link to Log**: https://gist.github.com/4421b6f5dd716b24746e97ed3008b0c4\n### ERROR\n<pre>\n2019-02-10 19:30:40 ERROR SNATCHQUEUE-SNATCH-526 :: [18bd87d] Snatch failed! For result: The.Office.(US).S03.1080p.WEB-DL.AAC2.0.AVC-TrollHD\nTraceback (most recent call last):\n File \"/home/pi/Medusa/<a href=\"../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/search/queue.py#L503\">medusa/search/queue.py</a>\", line 503, in run\n self.success = snatch_episode(result)\n File \"/home/pi/Medusa/<a href=\"../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/search/core.py#L132\">medusa/search/core.py</a>\", line 132, in snatch_episode\n nzb_data = result.provider.download_nzb_for_post(result)\n File \"/home/pi/Medusa/<a href=\"../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/providers/nzb/binsearch.py#L275\">medusa/providers/nzb/binsearch.py</a>\", line 275, in download_nzb_for_post\n if not BinSearchProvider.nzb_check_segment.search(response.content):\nTypeError: cannot use a string pattern on a bytes-like object\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Binsearch provider.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\nfrom builtins import zip\nfrom os.path import join\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size, sanitize_filename\nfrom medusa.helpers import download_file\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.nzb.nzb_provider import NZBProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BinSearchProvider(NZBProvider):\n \"\"\"BinSearch Newznab provider.\"\"\"\n\n size_regex = re.compile(r'size: (\\d+\\.\\d+\\xa0\\w{2}), parts', re.I)\n title_regex = re.compile(r'\\\"([^\\\"]+)\"', re.I)\n title_reqex_clean = re.compile(r'^[ \\d_]+ (.+)')\n title_regex_rss = re.compile(r'- \\\"([^\\\"]+)\"', re.I)\n nzb_check_segment = re.compile(r'<segment bytes=\"[\\d]+\"')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BinSearchProvider, self).__init__('BinSearch')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://www.binsearch.info'\n self.urls = {\n 'search': urljoin(self.url, 'index.php'),\n 'rss': urljoin(self.url, 'browse.php'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self, min_time=10)\n\n def search(self, search_strings, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n search_params = {\n 'adv_age': '',\n 'xminsize': 20,\n 'max': 250,\n }\n groups = [1, 2]\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n # https://www.binsearch.info/browse.php?bg=alt.binaries.teevee&server=2\n for search_string in search_strings[mode]:\n search_params['q'] = search_string\n for group in groups:\n # Try both 'search in the most popular groups' & 'search in the other groups' modes\n search_params['server'] = group\n if mode != 'RSS':\n log.debug('Search string: {search}', {'search': search_string})\n search_url = self.urls['search']\n else:\n search_params = {\n 'bg': 'alt.binaries.teevee',\n 'server': 2,\n 'max': 50,\n }\n search_url = self.urls['rss']\n\n response = self.session.get(search_url, params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n def process_column_header(td):\n return td.get_text(strip=True).lower()\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n\n # We need to store the post url, to be used with every result later on.\n post_url = html.find('form', {'method': 'post'})['action']\n\n table = html.find('table', class_='xMenuT')\n rows = table('tr') if table else []\n row_offset = 1\n if not rows or not len(rows) - row_offset:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n headers = rows[0]('th')\n # 0, 1, subject, poster, group, age\n labels = [process_column_header(header) or idx\n for idx, header in enumerate(headers)]\n\n # Skip column headers\n rows = rows[row_offset:]\n for row in rows:\n try:\n col = dict(list(zip(labels, row('td'))))\n nzb_id_input = col[0 if mode == 'RSS' else 1].find('input')\n if not nzb_id_input:\n continue\n nzb_id = nzb_id_input['name']\n # Try and get the the article subject from the weird binsearch format\n title = self.clean_title(col['subject'].text, mode)\n\n except AttributeError:\n log.debug('Parsing rows, that may not always have useful info. Skipping to next.')\n continue\n if not all([title, nzb_id]):\n continue\n\n # Obtain the size from the 'description'\n size_field = BinSearchProvider.size_regex.search(col['subject'].text)\n if size_field:\n size_field = size_field.group(1)\n size = convert_size(size_field, sep='\\xa0') or -1\n size = int(size)\n\n download_url = urljoin(self.url, '{post_url}|nzb_id={nzb_id}'.format(post_url=post_url, nzb_id=nzb_id))\n\n # For future use\n # detail_url = 'https://www.binsearch.info/?q={0}'.format(title)\n human_time = True\n date = col['age' if mode != 'RSS' else 'date'].get_text(strip=True).replace('-', ' ')\n if mode == 'RSS':\n human_time = False\n pubdate_raw = date\n pubdate = self.parse_pubdate(pubdate_raw, human_time=human_time)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0}', title)\n\n items.append(item)\n\n return items\n\n @staticmethod\n def clean_title(title, mode):\n \"\"\"\n Clean title field, using a series of regex.\n\n RSS search requires different cleaning then the other searches.\n When adding to this function, make sure you update the tests.\n \"\"\"\n try:\n if mode == 'RSS':\n title = BinSearchProvider.title_regex_rss.search(title).group(1)\n else:\n title = BinSearchProvider.title_regex.search(title).group(1)\n if BinSearchProvider.title_reqex_clean.search(title):\n title = BinSearchProvider.title_reqex_clean.search(title).group(1)\n for extension in ('.nfo', '.par2', '.rar', '.zip', '.nzb', '.part'):\n # Strip extensions that aren't part of the file name\n if title.endswith(extension):\n title = title[:len(title) - len(extension)]\n return title\n except AttributeError:\n return None\n\n def download_result(self, result):\n \"\"\"\n Download result from provider.\n\n This is used when a blackhole is used for sending the nzb file to the nzb client.\n For now the url and the post data is stored as one string in the db, using a pipe (|) to separate them.\n\n :param result: A SearchResult object.\n :return: The result of the nzb download (True/False).\n \"\"\"\n if not self.login():\n return False\n\n result_name = sanitize_filename(result.name)\n filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)\n\n if result.url.startswith('http'):\n self.session.headers.update({\n 'Referer': '/'.join(result.url.split('/')[:3]) + '/'\n })\n\n log.info('Downloading {result} from {provider} at {url}',\n {'result': result.name, 'provider': self.name, 'url': result.url})\n\n verify = False if self.public else None\n\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n if download_file(url, filename, method='POST', data=data, session=self.session,\n headers=self.headers, verify=verify):\n\n if self._verify_download(filename):\n log.info('Saved {result} to {location}',\n {'result': result.name, 'location': filename})\n return True\n\n return False\n\n def download_nzb_for_post(self, result):\n \"\"\"\n Download the nzb content, prior to sending it to the nzb download client.\n\n :param result: Nzb SearchResult object.\n :return: The content of the nzb file if successful else None.\n \"\"\"\n if not self.login():\n return False\n\n # For now to separate the url and the post data, where splitting it with a pipe.\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n log.info('Downloading {result} from {provider} at {url} and data {data}',\n {'result': result.name, 'provider': self.name, 'url': result.url, 'data': data})\n\n verify = False if self.public else None\n\n response = self.session.post(url, data=data, headers=self.session.headers,\n verify=verify, hooks={}, allow_redirects=True)\n if not response or not response.content:\n log.warning('Failed to download the NZB from BinSearch')\n return None\n\n # Validate that the result has the content of a valid nzb.\n if not BinSearchProvider.nzb_check_segment.search(response.content):\n log.warning('Result returned from BinSearch was not a valid NZB')\n return None\n\n return response.content\n\n def _get_size(self, item):\n \"\"\"\n Get result size.\n\n Overwrite this, as the default _get_size() from nzb_provider isn't working for us.\n :param item:\n :return: size in bytes or -1\n \"\"\"\n return item.get('size', -1)\n\n\nprovider = BinSearchProvider()\n", "path": "medusa/providers/nzb/binsearch.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Binsearch provider.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\nfrom builtins import zip\nfrom os.path import join\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size, sanitize_filename\nfrom medusa.helpers import download_file\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.nzb.nzb_provider import NZBProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BinSearchProvider(NZBProvider):\n \"\"\"BinSearch Newznab provider.\"\"\"\n\n size_regex = re.compile(r'size: (\\d+\\.\\d+\\xa0\\w{2}), parts', re.I)\n title_regex = re.compile(r'\\\"([^\\\"]+)\"', re.I)\n title_reqex_clean = re.compile(r'^[ \\d_]+ (.+)')\n title_regex_rss = re.compile(r'- \\\"([^\\\"]+)\"', re.I)\n nzb_check_segment = re.compile(r'<segment bytes=\"[\\d]+\"')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BinSearchProvider, self).__init__('BinSearch')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://www.binsearch.info'\n self.urls = {\n 'search': urljoin(self.url, 'index.php'),\n 'rss': urljoin(self.url, 'browse.php'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self, min_time=10)\n\n def search(self, search_strings, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n search_params = {\n 'adv_age': '',\n 'xminsize': 20,\n 'max': 250,\n }\n groups = [1, 2]\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n # https://www.binsearch.info/browse.php?bg=alt.binaries.teevee&server=2\n for search_string in search_strings[mode]:\n search_params['q'] = search_string\n for group in groups:\n # Try both 'search in the most popular groups' & 'search in the other groups' modes\n search_params['server'] = group\n if mode != 'RSS':\n log.debug('Search string: {search}', {'search': search_string})\n search_url = self.urls['search']\n else:\n search_params = {\n 'bg': 'alt.binaries.teevee',\n 'server': 2,\n 'max': 50,\n }\n search_url = self.urls['rss']\n\n response = self.session.get(search_url, params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n def process_column_header(td):\n return td.get_text(strip=True).lower()\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n\n # We need to store the post url, to be used with every result later on.\n post_url = html.find('form', {'method': 'post'})['action']\n\n table = html.find('table', class_='xMenuT')\n rows = table('tr') if table else []\n row_offset = 1\n if not rows or not len(rows) - row_offset:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n headers = rows[0]('th')\n # 0, 1, subject, poster, group, age\n labels = [process_column_header(header) or idx\n for idx, header in enumerate(headers)]\n\n # Skip column headers\n rows = rows[row_offset:]\n for row in rows:\n try:\n col = dict(list(zip(labels, row('td'))))\n nzb_id_input = col[0 if mode == 'RSS' else 1].find('input')\n if not nzb_id_input:\n continue\n nzb_id = nzb_id_input['name']\n # Try and get the the article subject from the weird binsearch format\n title = self.clean_title(col['subject'].text, mode)\n\n except AttributeError:\n log.debug('Parsing rows, that may not always have useful info. Skipping to next.')\n continue\n if not all([title, nzb_id]):\n continue\n\n # Obtain the size from the 'description'\n size_field = BinSearchProvider.size_regex.search(col['subject'].text)\n if size_field:\n size_field = size_field.group(1)\n size = convert_size(size_field, sep='\\xa0') or -1\n size = int(size)\n\n download_url = urljoin(self.url, '{post_url}|nzb_id={nzb_id}'.format(post_url=post_url, nzb_id=nzb_id))\n\n # For future use\n # detail_url = 'https://www.binsearch.info/?q={0}'.format(title)\n human_time = True\n date = col['age' if mode != 'RSS' else 'date'].get_text(strip=True).replace('-', ' ')\n if mode == 'RSS':\n human_time = False\n pubdate_raw = date\n pubdate = self.parse_pubdate(pubdate_raw, human_time=human_time)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0}', title)\n\n items.append(item)\n\n return items\n\n @staticmethod\n def clean_title(title, mode):\n \"\"\"\n Clean title field, using a series of regex.\n\n RSS search requires different cleaning then the other searches.\n When adding to this function, make sure you update the tests.\n \"\"\"\n try:\n if mode == 'RSS':\n title = BinSearchProvider.title_regex_rss.search(title).group(1)\n else:\n title = BinSearchProvider.title_regex.search(title).group(1)\n if BinSearchProvider.title_reqex_clean.search(title):\n title = BinSearchProvider.title_reqex_clean.search(title).group(1)\n for extension in ('.nfo', '.par2', '.rar', '.zip', '.nzb', '.part'):\n # Strip extensions that aren't part of the file name\n if title.endswith(extension):\n title = title[:len(title) - len(extension)]\n return title\n except AttributeError:\n return None\n\n def download_result(self, result):\n \"\"\"\n Download result from provider.\n\n This is used when a blackhole is used for sending the nzb file to the nzb client.\n For now the url and the post data is stored as one string in the db, using a pipe (|) to separate them.\n\n :param result: A SearchResult object.\n :return: The result of the nzb download (True/False).\n \"\"\"\n if not self.login():\n return False\n\n result_name = sanitize_filename(result.name)\n filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)\n\n if result.url.startswith('http'):\n self.session.headers.update({\n 'Referer': '/'.join(result.url.split('/')[:3]) + '/'\n })\n\n log.info('Downloading {result} from {provider} at {url}',\n {'result': result.name, 'provider': self.name, 'url': result.url})\n\n verify = False if self.public else None\n\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n if download_file(url, filename, method='POST', data=data, session=self.session,\n headers=self.headers, verify=verify):\n\n if self._verify_download(filename):\n log.info('Saved {result} to {location}',\n {'result': result.name, 'location': filename})\n return True\n\n return False\n\n def download_nzb_for_post(self, result):\n \"\"\"\n Download the nzb content, prior to sending it to the nzb download client.\n\n :param result: Nzb SearchResult object.\n :return: The content of the nzb file if successful else None.\n \"\"\"\n if not self.login():\n return False\n\n # For now to separate the url and the post data, where splitting it with a pipe.\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n log.info('Downloading {result} from {provider} at {url} and data {data}',\n {'result': result.name, 'provider': self.name, 'url': result.url, 'data': data})\n\n verify = False if self.public else None\n\n response = self.session.post(url, data=data, headers=self.session.headers,\n verify=verify, hooks={}, allow_redirects=True)\n if not response or not response.content:\n log.warning('Failed to download the NZB from BinSearch')\n return None\n\n # Validate that the result has the content of a valid nzb.\n if not BinSearchProvider.nzb_check_segment.search(response.text):\n log.warning('Result returned from BinSearch was not a valid NZB')\n return None\n\n return response.content\n\n def _get_size(self, item):\n \"\"\"\n Get result size.\n\n Overwrite this, as the default _get_size() from nzb_provider isn't working for us.\n :param item:\n :return: size in bytes or -1\n \"\"\"\n return item.get('size', -1)\n\n\nprovider = BinSearchProvider()\n", "path": "medusa/providers/nzb/binsearch.py"}]} |
gh_patches_debug_1431 | rasdani/github-patches | git_diff | goauthentik__authentik-3769 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support HA postgresql
When using a HA installation of postgresql connection problems with the api server arise during initial load of the user dashboard.
Pgbouncer in transaction pooling mode requires custom settings in order to function correctly.
It would be nice if the user could specify that a HA installation is used and the settings are then adjusted automatically.
```
# https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors
DATABASES['default']['DISABLE_SERVER_SIDE_CURSORS'] = True
# https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections
DATABASES['default']['CONN_MAX_AGE'] = None # persistent
```
Currently the settings do not include these options:
https://github.com/goauthentik/authentik/blob/89c84f10d0e6e70e51444a9fbf18980ba25008de/authentik/root/settings.py#L279-L288
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/root/settings.py`
Content:
```
1 """root settings for authentik"""
2
3 import importlib
4 import logging
5 import os
6 from hashlib import sha512
7 from urllib.parse import quote_plus
8
9 import structlog
10 from celery.schedules import crontab
11 from sentry_sdk import set_tag
12
13 from authentik import ENV_GIT_HASH_KEY, __version__
14 from authentik.lib.config import CONFIG
15 from authentik.lib.logging import add_process_id
16 from authentik.lib.sentry import sentry_init
17 from authentik.lib.utils.reflection import get_env
18 from authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT, BACKEND_LDAP
19
20 LOGGER = structlog.get_logger()
21
22 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
23 BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
24 STATIC_ROOT = BASE_DIR + "/static"
25 STATICFILES_DIRS = [BASE_DIR + "/web"]
26 MEDIA_ROOT = BASE_DIR + "/media"
27
28 DEBUG = CONFIG.y_bool("debug")
29 SECRET_KEY = CONFIG.y("secret_key")
30
31 INTERNAL_IPS = ["127.0.0.1"]
32 ALLOWED_HOSTS = ["*"]
33 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
34 SECURE_CROSS_ORIGIN_OPENER_POLICY = None
35 LOGIN_URL = "authentik_flows:default-authentication"
36
37 # Custom user model
38 AUTH_USER_MODEL = "authentik_core.User"
39
40 CSRF_COOKIE_NAME = "authentik_csrf"
41 CSRF_HEADER_NAME = "HTTP_X_AUTHENTIK_CSRF"
42 LANGUAGE_COOKIE_NAME = "authentik_language"
43 SESSION_COOKIE_NAME = "authentik_session"
44 SESSION_COOKIE_DOMAIN = CONFIG.y("cookie_domain", None)
45
46 AUTHENTICATION_BACKENDS = [
47 "django.contrib.auth.backends.ModelBackend",
48 BACKEND_INBUILT,
49 BACKEND_APP_PASSWORD,
50 BACKEND_LDAP,
51 "guardian.backends.ObjectPermissionBackend",
52 ]
53
54 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
55
56 # Application definition
57 INSTALLED_APPS = [
58 "django.contrib.auth",
59 "django.contrib.contenttypes",
60 "django.contrib.sessions",
61 "django.contrib.messages",
62 "django.contrib.staticfiles",
63 "django.contrib.humanize",
64 "authentik.admin",
65 "authentik.api",
66 "authentik.crypto",
67 "authentik.events",
68 "authentik.flows",
69 "authentik.lib",
70 "authentik.outposts",
71 "authentik.policies.dummy",
72 "authentik.policies.event_matcher",
73 "authentik.policies.expiry",
74 "authentik.policies.expression",
75 "authentik.policies.hibp",
76 "authentik.policies.password",
77 "authentik.policies.reputation",
78 "authentik.policies",
79 "authentik.providers.ldap",
80 "authentik.providers.oauth2",
81 "authentik.providers.proxy",
82 "authentik.providers.saml",
83 "authentik.recovery",
84 "authentik.sources.ldap",
85 "authentik.sources.oauth",
86 "authentik.sources.plex",
87 "authentik.sources.saml",
88 "authentik.stages.authenticator_duo",
89 "authentik.stages.authenticator_sms",
90 "authentik.stages.authenticator_static",
91 "authentik.stages.authenticator_totp",
92 "authentik.stages.authenticator_validate",
93 "authentik.stages.authenticator_webauthn",
94 "authentik.stages.captcha",
95 "authentik.stages.consent",
96 "authentik.stages.deny",
97 "authentik.stages.dummy",
98 "authentik.stages.email",
99 "authentik.stages.identification",
100 "authentik.stages.invitation",
101 "authentik.stages.password",
102 "authentik.stages.prompt",
103 "authentik.stages.user_delete",
104 "authentik.stages.user_login",
105 "authentik.stages.user_logout",
106 "authentik.stages.user_write",
107 "authentik.tenants",
108 "authentik.blueprints",
109 "rest_framework",
110 "django_filters",
111 "drf_spectacular",
112 "guardian",
113 "django_prometheus",
114 "channels",
115 ]
116
117 GUARDIAN_MONKEY_PATCH = False
118
119 SPECTACULAR_SETTINGS = {
120 "TITLE": "authentik",
121 "DESCRIPTION": "Making authentication simple.",
122 "VERSION": __version__,
123 "COMPONENT_SPLIT_REQUEST": True,
124 "SCHEMA_PATH_PREFIX": "/api/v([0-9]+(beta)?)",
125 "SCHEMA_PATH_PREFIX_TRIM": True,
126 "SERVERS": [
127 {
128 "url": "/api/v3/",
129 },
130 ],
131 "CONTACT": {
132 "email": "[email protected]",
133 },
134 "AUTHENTICATION_WHITELIST": ["authentik.api.authentication.TokenAuthentication"],
135 "LICENSE": {
136 "name": "GNU GPLv3",
137 "url": "https://github.com/goauthentik/authentik/blob/main/LICENSE",
138 },
139 "ENUM_NAME_OVERRIDES": {
140 "EventActions": "authentik.events.models.EventAction",
141 "ChallengeChoices": "authentik.flows.challenge.ChallengeTypes",
142 "FlowDesignationEnum": "authentik.flows.models.FlowDesignation",
143 "PolicyEngineMode": "authentik.policies.models.PolicyEngineMode",
144 "ProxyMode": "authentik.providers.proxy.models.ProxyMode",
145 "PromptTypeEnum": "authentik.stages.prompt.models.FieldTypes",
146 "LDAPAPIAccessMode": "authentik.providers.ldap.models.APIAccessMode",
147 },
148 "ENUM_ADD_EXPLICIT_BLANK_NULL_CHOICE": False,
149 "POSTPROCESSING_HOOKS": [
150 "authentik.api.schema.postprocess_schema_responses",
151 "drf_spectacular.hooks.postprocess_schema_enums",
152 ],
153 }
154
155 REST_FRAMEWORK = {
156 "DEFAULT_PAGINATION_CLASS": "authentik.api.pagination.Pagination",
157 "PAGE_SIZE": 100,
158 "DEFAULT_FILTER_BACKENDS": [
159 "rest_framework_guardian.filters.ObjectPermissionsFilter",
160 "django_filters.rest_framework.DjangoFilterBackend",
161 "rest_framework.filters.OrderingFilter",
162 "rest_framework.filters.SearchFilter",
163 ],
164 "DEFAULT_PARSER_CLASSES": [
165 "rest_framework.parsers.JSONParser",
166 ],
167 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.DjangoObjectPermissions",),
168 "DEFAULT_AUTHENTICATION_CLASSES": (
169 "authentik.api.authentication.TokenAuthentication",
170 "rest_framework.authentication.SessionAuthentication",
171 ),
172 "DEFAULT_RENDERER_CLASSES": [
173 "rest_framework.renderers.JSONRenderer",
174 ],
175 "DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
176 "TEST_REQUEST_DEFAULT_FORMAT": "json",
177 }
178
179 REDIS_PROTOCOL_PREFIX = "redis://"
180 REDIS_CELERY_TLS_REQUIREMENTS = ""
181 if CONFIG.y_bool("redis.tls", False):
182 REDIS_PROTOCOL_PREFIX = "rediss://"
183 REDIS_CELERY_TLS_REQUIREMENTS = f"?ssl_cert_reqs={CONFIG.y('redis.tls_reqs')}"
184 _redis_url = (
185 f"{REDIS_PROTOCOL_PREFIX}:"
186 f"{quote_plus(CONFIG.y('redis.password'))}@{quote_plus(CONFIG.y('redis.host'))}:"
187 f"{int(CONFIG.y('redis.port'))}"
188 )
189
190 CACHES = {
191 "default": {
192 "BACKEND": "django_redis.cache.RedisCache",
193 "LOCATION": f"{_redis_url}/{CONFIG.y('redis.cache_db')}",
194 "TIMEOUT": int(CONFIG.y("redis.cache_timeout", 300)),
195 "OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
196 }
197 }
198 DJANGO_REDIS_SCAN_ITERSIZE = 1000
199 DJANGO_REDIS_IGNORE_EXCEPTIONS = True
200 DJANGO_REDIS_LOG_IGNORED_EXCEPTIONS = True
201 SESSION_ENGINE = "django.contrib.sessions.backends.cache"
202 SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
203 SESSION_CACHE_ALIAS = "default"
204 # Configured via custom SessionMiddleware
205 # SESSION_COOKIE_SAMESITE = "None"
206 # SESSION_COOKIE_SECURE = True
207 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
208
209 MESSAGE_STORAGE = "authentik.root.messages.storage.ChannelsStorage"
210
211 MIDDLEWARE = [
212 "authentik.root.middleware.LoggingMiddleware",
213 "django_prometheus.middleware.PrometheusBeforeMiddleware",
214 "authentik.root.middleware.SessionMiddleware",
215 "django.contrib.auth.middleware.AuthenticationMiddleware",
216 "authentik.core.middleware.RequestIDMiddleware",
217 "authentik.tenants.middleware.TenantMiddleware",
218 "authentik.events.middleware.AuditMiddleware",
219 "django.middleware.security.SecurityMiddleware",
220 "django.middleware.common.CommonMiddleware",
221 "django.middleware.csrf.CsrfViewMiddleware",
222 "django.contrib.messages.middleware.MessageMiddleware",
223 "django.middleware.clickjacking.XFrameOptionsMiddleware",
224 "authentik.core.middleware.ImpersonateMiddleware",
225 "django_prometheus.middleware.PrometheusAfterMiddleware",
226 ]
227
228 ROOT_URLCONF = "authentik.root.urls"
229
230 TEMPLATES = [
231 {
232 "BACKEND": "django.template.backends.django.DjangoTemplates",
233 "DIRS": [CONFIG.y("email.template_dir")],
234 "APP_DIRS": True,
235 "OPTIONS": {
236 "context_processors": [
237 "django.template.context_processors.debug",
238 "django.template.context_processors.request",
239 "django.contrib.auth.context_processors.auth",
240 "django.contrib.messages.context_processors.messages",
241 "authentik.tenants.utils.context_processor",
242 ],
243 },
244 },
245 ]
246
247 ASGI_APPLICATION = "authentik.root.asgi.application"
248
249 CHANNEL_LAYERS = {
250 "default": {
251 "BACKEND": "channels_redis.core.RedisChannelLayer",
252 "CONFIG": {
253 "hosts": [f"{_redis_url}/{CONFIG.y('redis.ws_db')}"],
254 },
255 },
256 }
257
258
259 # Database
260 # https://docs.djangoproject.com/en/2.1/ref/settings/#databases
261
262 DATABASES = {
263 "default": {
264 "ENGINE": "django_prometheus.db.backends.postgresql",
265 "HOST": CONFIG.y("postgresql.host"),
266 "NAME": CONFIG.y("postgresql.name"),
267 "USER": CONFIG.y("postgresql.user"),
268 "PASSWORD": CONFIG.y("postgresql.password"),
269 "PORT": int(CONFIG.y("postgresql.port")),
270 }
271 }
272
273 # Email
274 EMAIL_HOST = CONFIG.y("email.host")
275 EMAIL_PORT = int(CONFIG.y("email.port"))
276 EMAIL_HOST_USER = CONFIG.y("email.username")
277 EMAIL_HOST_PASSWORD = CONFIG.y("email.password")
278 EMAIL_USE_TLS = CONFIG.y_bool("email.use_tls", False)
279 EMAIL_USE_SSL = CONFIG.y_bool("email.use_ssl", False)
280 EMAIL_TIMEOUT = int(CONFIG.y("email.timeout"))
281 DEFAULT_FROM_EMAIL = CONFIG.y("email.from")
282 SERVER_EMAIL = DEFAULT_FROM_EMAIL
283 EMAIL_SUBJECT_PREFIX = "[authentik] "
284
285 # Password validation
286 # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
287
288 AUTH_PASSWORD_VALIDATORS = [
289 {
290 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
291 },
292 {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
293 {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
294 {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
295 ]
296
297
298 # Internationalization
299 # https://docs.djangoproject.com/en/2.1/topics/i18n/
300
301 LANGUAGE_CODE = "en-us"
302
303 TIME_ZONE = "UTC"
304
305 USE_I18N = True
306
307 USE_TZ = True
308
309 LOCALE_PATHS = ["./locale"]
310
311 # Celery settings
312 # Add a 10 minute timeout to all Celery tasks.
313 CELERY_TASK_SOFT_TIME_LIMIT = 600
314 CELERY_WORKER_MAX_TASKS_PER_CHILD = 50
315 CELERY_WORKER_CONCURRENCY = 2
316 CELERY_BEAT_SCHEDULE = {
317 "clean_expired_models": {
318 "task": "authentik.core.tasks.clean_expired_models",
319 "schedule": crontab(minute="2-59/5"),
320 "options": {"queue": "authentik_scheduled"},
321 },
322 "user_cleanup": {
323 "task": "authentik.core.tasks.clean_temporary_users",
324 "schedule": crontab(minute="9-59/5"),
325 "options": {"queue": "authentik_scheduled"},
326 },
327 }
328 CELERY_TASK_CREATE_MISSING_QUEUES = True
329 CELERY_TASK_DEFAULT_QUEUE = "authentik"
330 CELERY_BROKER_URL = (
331 f"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}"
332 )
333 CELERY_RESULT_BACKEND = (
334 f"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}"
335 )
336
337 # Sentry integration
338 env = get_env()
339 _ERROR_REPORTING = CONFIG.y_bool("error_reporting.enabled", False)
340 if _ERROR_REPORTING:
341 sentry_env = CONFIG.y("error_reporting.environment", "customer")
342 sentry_init()
343 set_tag("authentik.uuid", sha512(str(SECRET_KEY).encode("ascii")).hexdigest()[:16])
344
345
346 # Static files (CSS, JavaScript, Images)
347 # https://docs.djangoproject.com/en/2.1/howto/static-files/
348
349 STATIC_URL = "/static/"
350 MEDIA_URL = "/media/"
351
352 TEST = False
353 TEST_RUNNER = "authentik.root.test_runner.PytestTestRunner"
354 # We can't check TEST here as its set later by the test runner
355 LOG_LEVEL = CONFIG.y("log_level").upper() if "TF_BUILD" not in os.environ else "DEBUG"
356 # We could add a custom level to stdlib logging and structlog, but it's not easy or clean
357 # https://stackoverflow.com/questions/54505487/custom-log-level-not-working-with-structlog
358 # Additionally, the entire code uses debug as highest level so that would have to be re-written too
359 if LOG_LEVEL == "TRACE":
360 LOG_LEVEL = "DEBUG"
361
362 structlog.configure_once(
363 processors=[
364 structlog.stdlib.add_log_level,
365 structlog.stdlib.add_logger_name,
366 structlog.contextvars.merge_contextvars,
367 add_process_id,
368 structlog.stdlib.PositionalArgumentsFormatter(),
369 structlog.processors.TimeStamper(fmt="iso", utc=False),
370 structlog.processors.StackInfoRenderer(),
371 structlog.processors.dict_tracebacks,
372 structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
373 ],
374 logger_factory=structlog.stdlib.LoggerFactory(),
375 wrapper_class=structlog.make_filtering_bound_logger(
376 getattr(logging, LOG_LEVEL, logging.WARNING)
377 ),
378 cache_logger_on_first_use=True,
379 )
380
381 LOG_PRE_CHAIN = [
382 # Add the log level and a timestamp to the event_dict if the log entry
383 # is not from structlog.
384 structlog.stdlib.add_log_level,
385 structlog.stdlib.add_logger_name,
386 structlog.processors.TimeStamper(),
387 structlog.processors.StackInfoRenderer(),
388 ]
389
390 LOGGING = {
391 "version": 1,
392 "disable_existing_loggers": False,
393 "formatters": {
394 "json": {
395 "()": structlog.stdlib.ProcessorFormatter,
396 "processor": structlog.processors.JSONRenderer(sort_keys=True),
397 "foreign_pre_chain": LOG_PRE_CHAIN,
398 },
399 "console": {
400 "()": structlog.stdlib.ProcessorFormatter,
401 "processor": structlog.dev.ConsoleRenderer(colors=DEBUG),
402 "foreign_pre_chain": LOG_PRE_CHAIN,
403 },
404 },
405 "handlers": {
406 "console": {
407 "level": "DEBUG",
408 "class": "logging.StreamHandler",
409 "formatter": "console" if DEBUG else "json",
410 },
411 },
412 "loggers": {},
413 }
414
415 _LOGGING_HANDLER_MAP = {
416 "": LOG_LEVEL,
417 "authentik": LOG_LEVEL,
418 "django": "WARNING",
419 "celery": "WARNING",
420 "selenium": "WARNING",
421 "docker": "WARNING",
422 "urllib3": "WARNING",
423 "websockets": "WARNING",
424 "daphne": "WARNING",
425 "kubernetes": "INFO",
426 "asyncio": "WARNING",
427 "redis": "WARNING",
428 "silk": "INFO",
429 }
430 for handler_name, level in _LOGGING_HANDLER_MAP.items():
431 # pyright: reportGeneralTypeIssues=false
432 LOGGING["loggers"][handler_name] = {
433 "handlers": ["console"],
434 "level": level,
435 "propagate": False,
436 }
437
438
439 _DISALLOWED_ITEMS = [
440 "INSTALLED_APPS",
441 "MIDDLEWARE",
442 "AUTHENTICATION_BACKENDS",
443 "CELERY_BEAT_SCHEDULE",
444 ]
445 # Load subapps's INSTALLED_APPS
446 for _app in INSTALLED_APPS:
447 if _app.startswith("authentik"):
448 if "apps" in _app:
449 _app = ".".join(_app.split(".")[:-2])
450 try:
451 app_settings = importlib.import_module(f"{_app}.settings")
452 INSTALLED_APPS.extend(getattr(app_settings, "INSTALLED_APPS", []))
453 MIDDLEWARE.extend(getattr(app_settings, "MIDDLEWARE", []))
454 AUTHENTICATION_BACKENDS.extend(getattr(app_settings, "AUTHENTICATION_BACKENDS", []))
455 CELERY_BEAT_SCHEDULE.update(getattr(app_settings, "CELERY_BEAT_SCHEDULE", {}))
456 for _attr in dir(app_settings):
457 if not _attr.startswith("__") and _attr not in _DISALLOWED_ITEMS:
458 globals()[_attr] = getattr(app_settings, _attr)
459 except ImportError:
460 pass
461
462 if DEBUG:
463 CELERY_TASK_ALWAYS_EAGER = True
464 os.environ[ENV_GIT_HASH_KEY] = "dev"
465 INSTALLED_APPS.append("silk")
466 SILKY_PYTHON_PROFILER = True
467 MIDDLEWARE = ["silk.middleware.SilkyMiddleware"] + MIDDLEWARE
468
469 INSTALLED_APPS.append("authentik.core")
470
471 CONFIG.log("info", "Booting authentik", version=__version__)
472
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/root/settings.py b/authentik/root/settings.py
--- a/authentik/root/settings.py
+++ b/authentik/root/settings.py
@@ -270,6 +270,12 @@
}
}
+if CONFIG.y_bool("postgresql.use_pgbouncer", False):
+ # https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors
+ DATABASES["default"]["DISABLE_SERVER_SIDE_CURSORS"] = True
+ # https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections
+ DATABASES["default"]["CONN_MAX_AGE"] = None # persistent
+
# Email
EMAIL_HOST = CONFIG.y("email.host")
EMAIL_PORT = int(CONFIG.y("email.port"))
| {"golden_diff": "diff --git a/authentik/root/settings.py b/authentik/root/settings.py\n--- a/authentik/root/settings.py\n+++ b/authentik/root/settings.py\n@@ -270,6 +270,12 @@\n }\n }\n \n+if CONFIG.y_bool(\"postgresql.use_pgbouncer\", False):\n+ # https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors\n+ DATABASES[\"default\"][\"DISABLE_SERVER_SIDE_CURSORS\"] = True\n+ # https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections\n+ DATABASES[\"default\"][\"CONN_MAX_AGE\"] = None # persistent\n+\n # Email\n EMAIL_HOST = CONFIG.y(\"email.host\")\n EMAIL_PORT = int(CONFIG.y(\"email.port\"))\n", "issue": "Support HA postgresql\nWhen using a HA installation of postgresql connection problems with the api server arise during initial load of the user dashboard.\r\n\r\nPgbouncer in transaction pooling mode requires custom settings in order to function correctly.\r\nIt would be nice if the user could specify that a HA installation is used and the settings are then adjusted automatically.\r\n\r\n```\r\n# https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors\r\nDATABASES['default']['DISABLE_SERVER_SIDE_CURSORS'] = True\r\n# https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections\r\nDATABASES['default']['CONN_MAX_AGE'] = None # persistent\r\n```\r\n\r\nCurrently the settings do not include these options:\r\nhttps://github.com/goauthentik/authentik/blob/89c84f10d0e6e70e51444a9fbf18980ba25008de/authentik/root/settings.py#L279-L288\r\n\n", "before_files": [{"content": "\"\"\"root settings for authentik\"\"\"\n\nimport importlib\nimport logging\nimport os\nfrom hashlib import sha512\nfrom urllib.parse import quote_plus\n\nimport structlog\nfrom celery.schedules import crontab\nfrom sentry_sdk import set_tag\n\nfrom authentik import ENV_GIT_HASH_KEY, __version__\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.logging import add_process_id\nfrom authentik.lib.sentry import sentry_init\nfrom authentik.lib.utils.reflection import get_env\nfrom authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT, BACKEND_LDAP\n\nLOGGER = structlog.get_logger()\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nSTATIC_ROOT = BASE_DIR + \"/static\"\nSTATICFILES_DIRS = [BASE_DIR + \"/web\"]\nMEDIA_ROOT = BASE_DIR + \"/media\"\n\nDEBUG = CONFIG.y_bool(\"debug\")\nSECRET_KEY = CONFIG.y(\"secret_key\")\n\nINTERNAL_IPS = [\"127.0.0.1\"]\nALLOWED_HOSTS = [\"*\"]\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_CROSS_ORIGIN_OPENER_POLICY = None\nLOGIN_URL = \"authentik_flows:default-authentication\"\n\n# Custom user model\nAUTH_USER_MODEL = \"authentik_core.User\"\n\nCSRF_COOKIE_NAME = \"authentik_csrf\"\nCSRF_HEADER_NAME = \"HTTP_X_AUTHENTIK_CSRF\"\nLANGUAGE_COOKIE_NAME = \"authentik_language\"\nSESSION_COOKIE_NAME = \"authentik_session\"\nSESSION_COOKIE_DOMAIN = CONFIG.y(\"cookie_domain\", None)\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n BACKEND_INBUILT,\n BACKEND_APP_PASSWORD,\n BACKEND_LDAP,\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Application definition\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"authentik.admin\",\n \"authentik.api\",\n \"authentik.crypto\",\n \"authentik.events\",\n \"authentik.flows\",\n \"authentik.lib\",\n \"authentik.outposts\",\n \"authentik.policies.dummy\",\n \"authentik.policies.event_matcher\",\n \"authentik.policies.expiry\",\n \"authentik.policies.expression\",\n \"authentik.policies.hibp\",\n \"authentik.policies.password\",\n \"authentik.policies.reputation\",\n \"authentik.policies\",\n \"authentik.providers.ldap\",\n \"authentik.providers.oauth2\",\n \"authentik.providers.proxy\",\n \"authentik.providers.saml\",\n \"authentik.recovery\",\n \"authentik.sources.ldap\",\n \"authentik.sources.oauth\",\n \"authentik.sources.plex\",\n \"authentik.sources.saml\",\n \"authentik.stages.authenticator_duo\",\n \"authentik.stages.authenticator_sms\",\n \"authentik.stages.authenticator_static\",\n \"authentik.stages.authenticator_totp\",\n \"authentik.stages.authenticator_validate\",\n \"authentik.stages.authenticator_webauthn\",\n \"authentik.stages.captcha\",\n \"authentik.stages.consent\",\n \"authentik.stages.deny\",\n \"authentik.stages.dummy\",\n \"authentik.stages.email\",\n \"authentik.stages.identification\",\n \"authentik.stages.invitation\",\n \"authentik.stages.password\",\n \"authentik.stages.prompt\",\n \"authentik.stages.user_delete\",\n \"authentik.stages.user_login\",\n \"authentik.stages.user_logout\",\n \"authentik.stages.user_write\",\n \"authentik.tenants\",\n \"authentik.blueprints\",\n \"rest_framework\",\n \"django_filters\",\n \"drf_spectacular\",\n \"guardian\",\n \"django_prometheus\",\n \"channels\",\n]\n\nGUARDIAN_MONKEY_PATCH = False\n\nSPECTACULAR_SETTINGS = {\n \"TITLE\": \"authentik\",\n \"DESCRIPTION\": \"Making authentication simple.\",\n \"VERSION\": __version__,\n \"COMPONENT_SPLIT_REQUEST\": True,\n \"SCHEMA_PATH_PREFIX\": \"/api/v([0-9]+(beta)?)\",\n \"SCHEMA_PATH_PREFIX_TRIM\": True,\n \"SERVERS\": [\n {\n \"url\": \"/api/v3/\",\n },\n ],\n \"CONTACT\": {\n \"email\": \"[email protected]\",\n },\n \"AUTHENTICATION_WHITELIST\": [\"authentik.api.authentication.TokenAuthentication\"],\n \"LICENSE\": {\n \"name\": \"GNU GPLv3\",\n \"url\": \"https://github.com/goauthentik/authentik/blob/main/LICENSE\",\n },\n \"ENUM_NAME_OVERRIDES\": {\n \"EventActions\": \"authentik.events.models.EventAction\",\n \"ChallengeChoices\": \"authentik.flows.challenge.ChallengeTypes\",\n \"FlowDesignationEnum\": \"authentik.flows.models.FlowDesignation\",\n \"PolicyEngineMode\": \"authentik.policies.models.PolicyEngineMode\",\n \"ProxyMode\": \"authentik.providers.proxy.models.ProxyMode\",\n \"PromptTypeEnum\": \"authentik.stages.prompt.models.FieldTypes\",\n \"LDAPAPIAccessMode\": \"authentik.providers.ldap.models.APIAccessMode\",\n },\n \"ENUM_ADD_EXPLICIT_BLANK_NULL_CHOICE\": False,\n \"POSTPROCESSING_HOOKS\": [\n \"authentik.api.schema.postprocess_schema_responses\",\n \"drf_spectacular.hooks.postprocess_schema_enums\",\n ],\n}\n\nREST_FRAMEWORK = {\n \"DEFAULT_PAGINATION_CLASS\": \"authentik.api.pagination.Pagination\",\n \"PAGE_SIZE\": 100,\n \"DEFAULT_FILTER_BACKENDS\": [\n \"rest_framework_guardian.filters.ObjectPermissionsFilter\",\n \"django_filters.rest_framework.DjangoFilterBackend\",\n \"rest_framework.filters.OrderingFilter\",\n \"rest_framework.filters.SearchFilter\",\n ],\n \"DEFAULT_PARSER_CLASSES\": [\n \"rest_framework.parsers.JSONParser\",\n ],\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.DjangoObjectPermissions\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"authentik.api.authentication.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_RENDERER_CLASSES\": [\n \"rest_framework.renderers.JSONRenderer\",\n ],\n \"DEFAULT_SCHEMA_CLASS\": \"drf_spectacular.openapi.AutoSchema\",\n \"TEST_REQUEST_DEFAULT_FORMAT\": \"json\",\n}\n\nREDIS_PROTOCOL_PREFIX = \"redis://\"\nREDIS_CELERY_TLS_REQUIREMENTS = \"\"\nif CONFIG.y_bool(\"redis.tls\", False):\n REDIS_PROTOCOL_PREFIX = \"rediss://\"\n REDIS_CELERY_TLS_REQUIREMENTS = f\"?ssl_cert_reqs={CONFIG.y('redis.tls_reqs')}\"\n_redis_url = (\n f\"{REDIS_PROTOCOL_PREFIX}:\"\n f\"{quote_plus(CONFIG.y('redis.password'))}@{quote_plus(CONFIG.y('redis.host'))}:\"\n f\"{int(CONFIG.y('redis.port'))}\"\n)\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"{_redis_url}/{CONFIG.y('redis.cache_db')}\",\n \"TIMEOUT\": int(CONFIG.y(\"redis.cache_timeout\", 300)),\n \"OPTIONS\": {\"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"},\n }\n}\nDJANGO_REDIS_SCAN_ITERSIZE = 1000\nDJANGO_REDIS_IGNORE_EXCEPTIONS = True\nDJANGO_REDIS_LOG_IGNORED_EXCEPTIONS = True\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_SERIALIZER = \"django.contrib.sessions.serializers.PickleSerializer\"\nSESSION_CACHE_ALIAS = \"default\"\n# Configured via custom SessionMiddleware\n# SESSION_COOKIE_SAMESITE = \"None\"\n# SESSION_COOKIE_SECURE = True\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nMESSAGE_STORAGE = \"authentik.root.messages.storage.ChannelsStorage\"\n\nMIDDLEWARE = [\n \"authentik.root.middleware.LoggingMiddleware\",\n \"django_prometheus.middleware.PrometheusBeforeMiddleware\",\n \"authentik.root.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"authentik.core.middleware.RequestIDMiddleware\",\n \"authentik.tenants.middleware.TenantMiddleware\",\n \"authentik.events.middleware.AuditMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"authentik.core.middleware.ImpersonateMiddleware\",\n \"django_prometheus.middleware.PrometheusAfterMiddleware\",\n]\n\nROOT_URLCONF = \"authentik.root.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [CONFIG.y(\"email.template_dir\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"authentik.tenants.utils.context_processor\",\n ],\n },\n },\n]\n\nASGI_APPLICATION = \"authentik.root.asgi.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [f\"{_redis_url}/{CONFIG.y('redis.ws_db')}\"],\n },\n },\n}\n\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django_prometheus.db.backends.postgresql\",\n \"HOST\": CONFIG.y(\"postgresql.host\"),\n \"NAME\": CONFIG.y(\"postgresql.name\"),\n \"USER\": CONFIG.y(\"postgresql.user\"),\n \"PASSWORD\": CONFIG.y(\"postgresql.password\"),\n \"PORT\": int(CONFIG.y(\"postgresql.port\")),\n }\n}\n\n# Email\nEMAIL_HOST = CONFIG.y(\"email.host\")\nEMAIL_PORT = int(CONFIG.y(\"email.port\"))\nEMAIL_HOST_USER = CONFIG.y(\"email.username\")\nEMAIL_HOST_PASSWORD = CONFIG.y(\"email.password\")\nEMAIL_USE_TLS = CONFIG.y_bool(\"email.use_tls\", False)\nEMAIL_USE_SSL = CONFIG.y_bool(\"email.use_ssl\", False)\nEMAIL_TIMEOUT = int(CONFIG.y(\"email.timeout\"))\nDEFAULT_FROM_EMAIL = CONFIG.y(\"email.from\")\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\nEMAIL_SUBJECT_PREFIX = \"[authentik] \"\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = [\"./locale\"]\n\n# Celery settings\n# Add a 10 minute timeout to all Celery tasks.\nCELERY_TASK_SOFT_TIME_LIMIT = 600\nCELERY_WORKER_MAX_TASKS_PER_CHILD = 50\nCELERY_WORKER_CONCURRENCY = 2\nCELERY_BEAT_SCHEDULE = {\n \"clean_expired_models\": {\n \"task\": \"authentik.core.tasks.clean_expired_models\",\n \"schedule\": crontab(minute=\"2-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n \"user_cleanup\": {\n \"task\": \"authentik.core.tasks.clean_temporary_users\",\n \"schedule\": crontab(minute=\"9-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n}\nCELERY_TASK_CREATE_MISSING_QUEUES = True\nCELERY_TASK_DEFAULT_QUEUE = \"authentik\"\nCELERY_BROKER_URL = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\nCELERY_RESULT_BACKEND = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\n\n# Sentry integration\nenv = get_env()\n_ERROR_REPORTING = CONFIG.y_bool(\"error_reporting.enabled\", False)\nif _ERROR_REPORTING:\n sentry_env = CONFIG.y(\"error_reporting.environment\", \"customer\")\n sentry_init()\n set_tag(\"authentik.uuid\", sha512(str(SECRET_KEY).encode(\"ascii\")).hexdigest()[:16])\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nMEDIA_URL = \"/media/\"\n\nTEST = False\nTEST_RUNNER = \"authentik.root.test_runner.PytestTestRunner\"\n# We can't check TEST here as its set later by the test runner\nLOG_LEVEL = CONFIG.y(\"log_level\").upper() if \"TF_BUILD\" not in os.environ else \"DEBUG\"\n# We could add a custom level to stdlib logging and structlog, but it's not easy or clean\n# https://stackoverflow.com/questions/54505487/custom-log-level-not-working-with-structlog\n# Additionally, the entire code uses debug as highest level so that would have to be re-written too\nif LOG_LEVEL == \"TRACE\":\n LOG_LEVEL = \"DEBUG\"\n\nstructlog.configure_once(\n processors=[\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.contextvars.merge_contextvars,\n add_process_id,\n structlog.stdlib.PositionalArgumentsFormatter(),\n structlog.processors.TimeStamper(fmt=\"iso\", utc=False),\n structlog.processors.StackInfoRenderer(),\n structlog.processors.dict_tracebacks,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.make_filtering_bound_logger(\n getattr(logging, LOG_LEVEL, logging.WARNING)\n ),\n cache_logger_on_first_use=True,\n)\n\nLOG_PRE_CHAIN = [\n # Add the log level and a timestamp to the event_dict if the log entry\n # is not from structlog.\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.processors.TimeStamper(),\n structlog.processors.StackInfoRenderer(),\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"json\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.processors.JSONRenderer(sort_keys=True),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n \"console\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.dev.ConsoleRenderer(colors=DEBUG),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\" if DEBUG else \"json\",\n },\n },\n \"loggers\": {},\n}\n\n_LOGGING_HANDLER_MAP = {\n \"\": LOG_LEVEL,\n \"authentik\": LOG_LEVEL,\n \"django\": \"WARNING\",\n \"celery\": \"WARNING\",\n \"selenium\": \"WARNING\",\n \"docker\": \"WARNING\",\n \"urllib3\": \"WARNING\",\n \"websockets\": \"WARNING\",\n \"daphne\": \"WARNING\",\n \"kubernetes\": \"INFO\",\n \"asyncio\": \"WARNING\",\n \"redis\": \"WARNING\",\n \"silk\": \"INFO\",\n}\nfor handler_name, level in _LOGGING_HANDLER_MAP.items():\n # pyright: reportGeneralTypeIssues=false\n LOGGING[\"loggers\"][handler_name] = {\n \"handlers\": [\"console\"],\n \"level\": level,\n \"propagate\": False,\n }\n\n\n_DISALLOWED_ITEMS = [\n \"INSTALLED_APPS\",\n \"MIDDLEWARE\",\n \"AUTHENTICATION_BACKENDS\",\n \"CELERY_BEAT_SCHEDULE\",\n]\n# Load subapps's INSTALLED_APPS\nfor _app in INSTALLED_APPS:\n if _app.startswith(\"authentik\"):\n if \"apps\" in _app:\n _app = \".\".join(_app.split(\".\")[:-2])\n try:\n app_settings = importlib.import_module(f\"{_app}.settings\")\n INSTALLED_APPS.extend(getattr(app_settings, \"INSTALLED_APPS\", []))\n MIDDLEWARE.extend(getattr(app_settings, \"MIDDLEWARE\", []))\n AUTHENTICATION_BACKENDS.extend(getattr(app_settings, \"AUTHENTICATION_BACKENDS\", []))\n CELERY_BEAT_SCHEDULE.update(getattr(app_settings, \"CELERY_BEAT_SCHEDULE\", {}))\n for _attr in dir(app_settings):\n if not _attr.startswith(\"__\") and _attr not in _DISALLOWED_ITEMS:\n globals()[_attr] = getattr(app_settings, _attr)\n except ImportError:\n pass\n\nif DEBUG:\n CELERY_TASK_ALWAYS_EAGER = True\n os.environ[ENV_GIT_HASH_KEY] = \"dev\"\n INSTALLED_APPS.append(\"silk\")\n SILKY_PYTHON_PROFILER = True\n MIDDLEWARE = [\"silk.middleware.SilkyMiddleware\"] + MIDDLEWARE\n\nINSTALLED_APPS.append(\"authentik.core\")\n\nCONFIG.log(\"info\", \"Booting authentik\", version=__version__)\n", "path": "authentik/root/settings.py"}], "after_files": [{"content": "\"\"\"root settings for authentik\"\"\"\n\nimport importlib\nimport logging\nimport os\nfrom hashlib import sha512\nfrom urllib.parse import quote_plus\n\nimport structlog\nfrom celery.schedules import crontab\nfrom sentry_sdk import set_tag\n\nfrom authentik import ENV_GIT_HASH_KEY, __version__\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.logging import add_process_id\nfrom authentik.lib.sentry import sentry_init\nfrom authentik.lib.utils.reflection import get_env\nfrom authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT, BACKEND_LDAP\n\nLOGGER = structlog.get_logger()\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nSTATIC_ROOT = BASE_DIR + \"/static\"\nSTATICFILES_DIRS = [BASE_DIR + \"/web\"]\nMEDIA_ROOT = BASE_DIR + \"/media\"\n\nDEBUG = CONFIG.y_bool(\"debug\")\nSECRET_KEY = CONFIG.y(\"secret_key\")\n\nINTERNAL_IPS = [\"127.0.0.1\"]\nALLOWED_HOSTS = [\"*\"]\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_CROSS_ORIGIN_OPENER_POLICY = None\nLOGIN_URL = \"authentik_flows:default-authentication\"\n\n# Custom user model\nAUTH_USER_MODEL = \"authentik_core.User\"\n\nCSRF_COOKIE_NAME = \"authentik_csrf\"\nCSRF_HEADER_NAME = \"HTTP_X_AUTHENTIK_CSRF\"\nLANGUAGE_COOKIE_NAME = \"authentik_language\"\nSESSION_COOKIE_NAME = \"authentik_session\"\nSESSION_COOKIE_DOMAIN = CONFIG.y(\"cookie_domain\", None)\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n BACKEND_INBUILT,\n BACKEND_APP_PASSWORD,\n BACKEND_LDAP,\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Application definition\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"authentik.admin\",\n \"authentik.api\",\n \"authentik.crypto\",\n \"authentik.events\",\n \"authentik.flows\",\n \"authentik.lib\",\n \"authentik.outposts\",\n \"authentik.policies.dummy\",\n \"authentik.policies.event_matcher\",\n \"authentik.policies.expiry\",\n \"authentik.policies.expression\",\n \"authentik.policies.hibp\",\n \"authentik.policies.password\",\n \"authentik.policies.reputation\",\n \"authentik.policies\",\n \"authentik.providers.ldap\",\n \"authentik.providers.oauth2\",\n \"authentik.providers.proxy\",\n \"authentik.providers.saml\",\n \"authentik.recovery\",\n \"authentik.sources.ldap\",\n \"authentik.sources.oauth\",\n \"authentik.sources.plex\",\n \"authentik.sources.saml\",\n \"authentik.stages.authenticator_duo\",\n \"authentik.stages.authenticator_sms\",\n \"authentik.stages.authenticator_static\",\n \"authentik.stages.authenticator_totp\",\n \"authentik.stages.authenticator_validate\",\n \"authentik.stages.authenticator_webauthn\",\n \"authentik.stages.captcha\",\n \"authentik.stages.consent\",\n \"authentik.stages.deny\",\n \"authentik.stages.dummy\",\n \"authentik.stages.email\",\n \"authentik.stages.identification\",\n \"authentik.stages.invitation\",\n \"authentik.stages.password\",\n \"authentik.stages.prompt\",\n \"authentik.stages.user_delete\",\n \"authentik.stages.user_login\",\n \"authentik.stages.user_logout\",\n \"authentik.stages.user_write\",\n \"authentik.tenants\",\n \"authentik.blueprints\",\n \"rest_framework\",\n \"django_filters\",\n \"drf_spectacular\",\n \"guardian\",\n \"django_prometheus\",\n \"channels\",\n]\n\nGUARDIAN_MONKEY_PATCH = False\n\nSPECTACULAR_SETTINGS = {\n \"TITLE\": \"authentik\",\n \"DESCRIPTION\": \"Making authentication simple.\",\n \"VERSION\": __version__,\n \"COMPONENT_SPLIT_REQUEST\": True,\n \"SCHEMA_PATH_PREFIX\": \"/api/v([0-9]+(beta)?)\",\n \"SCHEMA_PATH_PREFIX_TRIM\": True,\n \"SERVERS\": [\n {\n \"url\": \"/api/v3/\",\n },\n ],\n \"CONTACT\": {\n \"email\": \"[email protected]\",\n },\n \"AUTHENTICATION_WHITELIST\": [\"authentik.api.authentication.TokenAuthentication\"],\n \"LICENSE\": {\n \"name\": \"GNU GPLv3\",\n \"url\": \"https://github.com/goauthentik/authentik/blob/main/LICENSE\",\n },\n \"ENUM_NAME_OVERRIDES\": {\n \"EventActions\": \"authentik.events.models.EventAction\",\n \"ChallengeChoices\": \"authentik.flows.challenge.ChallengeTypes\",\n \"FlowDesignationEnum\": \"authentik.flows.models.FlowDesignation\",\n \"PolicyEngineMode\": \"authentik.policies.models.PolicyEngineMode\",\n \"ProxyMode\": \"authentik.providers.proxy.models.ProxyMode\",\n \"PromptTypeEnum\": \"authentik.stages.prompt.models.FieldTypes\",\n \"LDAPAPIAccessMode\": \"authentik.providers.ldap.models.APIAccessMode\",\n },\n \"ENUM_ADD_EXPLICIT_BLANK_NULL_CHOICE\": False,\n \"POSTPROCESSING_HOOKS\": [\n \"authentik.api.schema.postprocess_schema_responses\",\n \"drf_spectacular.hooks.postprocess_schema_enums\",\n ],\n}\n\nREST_FRAMEWORK = {\n \"DEFAULT_PAGINATION_CLASS\": \"authentik.api.pagination.Pagination\",\n \"PAGE_SIZE\": 100,\n \"DEFAULT_FILTER_BACKENDS\": [\n \"rest_framework_guardian.filters.ObjectPermissionsFilter\",\n \"django_filters.rest_framework.DjangoFilterBackend\",\n \"rest_framework.filters.OrderingFilter\",\n \"rest_framework.filters.SearchFilter\",\n ],\n \"DEFAULT_PARSER_CLASSES\": [\n \"rest_framework.parsers.JSONParser\",\n ],\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.DjangoObjectPermissions\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"authentik.api.authentication.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_RENDERER_CLASSES\": [\n \"rest_framework.renderers.JSONRenderer\",\n ],\n \"DEFAULT_SCHEMA_CLASS\": \"drf_spectacular.openapi.AutoSchema\",\n \"TEST_REQUEST_DEFAULT_FORMAT\": \"json\",\n}\n\nREDIS_PROTOCOL_PREFIX = \"redis://\"\nREDIS_CELERY_TLS_REQUIREMENTS = \"\"\nif CONFIG.y_bool(\"redis.tls\", False):\n REDIS_PROTOCOL_PREFIX = \"rediss://\"\n REDIS_CELERY_TLS_REQUIREMENTS = f\"?ssl_cert_reqs={CONFIG.y('redis.tls_reqs')}\"\n_redis_url = (\n f\"{REDIS_PROTOCOL_PREFIX}:\"\n f\"{quote_plus(CONFIG.y('redis.password'))}@{quote_plus(CONFIG.y('redis.host'))}:\"\n f\"{int(CONFIG.y('redis.port'))}\"\n)\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"{_redis_url}/{CONFIG.y('redis.cache_db')}\",\n \"TIMEOUT\": int(CONFIG.y(\"redis.cache_timeout\", 300)),\n \"OPTIONS\": {\"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"},\n }\n}\nDJANGO_REDIS_SCAN_ITERSIZE = 1000\nDJANGO_REDIS_IGNORE_EXCEPTIONS = True\nDJANGO_REDIS_LOG_IGNORED_EXCEPTIONS = True\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_SERIALIZER = \"django.contrib.sessions.serializers.PickleSerializer\"\nSESSION_CACHE_ALIAS = \"default\"\n# Configured via custom SessionMiddleware\n# SESSION_COOKIE_SAMESITE = \"None\"\n# SESSION_COOKIE_SECURE = True\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nMESSAGE_STORAGE = \"authentik.root.messages.storage.ChannelsStorage\"\n\nMIDDLEWARE = [\n \"authentik.root.middleware.LoggingMiddleware\",\n \"django_prometheus.middleware.PrometheusBeforeMiddleware\",\n \"authentik.root.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"authentik.core.middleware.RequestIDMiddleware\",\n \"authentik.tenants.middleware.TenantMiddleware\",\n \"authentik.events.middleware.AuditMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"authentik.core.middleware.ImpersonateMiddleware\",\n \"django_prometheus.middleware.PrometheusAfterMiddleware\",\n]\n\nROOT_URLCONF = \"authentik.root.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [CONFIG.y(\"email.template_dir\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"authentik.tenants.utils.context_processor\",\n ],\n },\n },\n]\n\nASGI_APPLICATION = \"authentik.root.asgi.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [f\"{_redis_url}/{CONFIG.y('redis.ws_db')}\"],\n },\n },\n}\n\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django_prometheus.db.backends.postgresql\",\n \"HOST\": CONFIG.y(\"postgresql.host\"),\n \"NAME\": CONFIG.y(\"postgresql.name\"),\n \"USER\": CONFIG.y(\"postgresql.user\"),\n \"PASSWORD\": CONFIG.y(\"postgresql.password\"),\n \"PORT\": int(CONFIG.y(\"postgresql.port\")),\n }\n}\n\nif CONFIG.y_bool(\"postgresql.use_pgbouncer\", False):\n # https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors\n DATABASES[\"default\"][\"DISABLE_SERVER_SIDE_CURSORS\"] = True\n # https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections\n DATABASES[\"default\"][\"CONN_MAX_AGE\"] = None # persistent\n\n# Email\nEMAIL_HOST = CONFIG.y(\"email.host\")\nEMAIL_PORT = int(CONFIG.y(\"email.port\"))\nEMAIL_HOST_USER = CONFIG.y(\"email.username\")\nEMAIL_HOST_PASSWORD = CONFIG.y(\"email.password\")\nEMAIL_USE_TLS = CONFIG.y_bool(\"email.use_tls\", False)\nEMAIL_USE_SSL = CONFIG.y_bool(\"email.use_ssl\", False)\nEMAIL_TIMEOUT = int(CONFIG.y(\"email.timeout\"))\nDEFAULT_FROM_EMAIL = CONFIG.y(\"email.from\")\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\nEMAIL_SUBJECT_PREFIX = \"[authentik] \"\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = [\"./locale\"]\n\n# Celery settings\n# Add a 10 minute timeout to all Celery tasks.\nCELERY_TASK_SOFT_TIME_LIMIT = 600\nCELERY_WORKER_MAX_TASKS_PER_CHILD = 50\nCELERY_WORKER_CONCURRENCY = 2\nCELERY_BEAT_SCHEDULE = {\n \"clean_expired_models\": {\n \"task\": \"authentik.core.tasks.clean_expired_models\",\n \"schedule\": crontab(minute=\"2-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n \"user_cleanup\": {\n \"task\": \"authentik.core.tasks.clean_temporary_users\",\n \"schedule\": crontab(minute=\"9-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n}\nCELERY_TASK_CREATE_MISSING_QUEUES = True\nCELERY_TASK_DEFAULT_QUEUE = \"authentik\"\nCELERY_BROKER_URL = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\nCELERY_RESULT_BACKEND = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\n\n# Sentry integration\nenv = get_env()\n_ERROR_REPORTING = CONFIG.y_bool(\"error_reporting.enabled\", False)\nif _ERROR_REPORTING:\n sentry_env = CONFIG.y(\"error_reporting.environment\", \"customer\")\n sentry_init()\n set_tag(\"authentik.uuid\", sha512(str(SECRET_KEY).encode(\"ascii\")).hexdigest()[:16])\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nMEDIA_URL = \"/media/\"\n\nTEST = False\nTEST_RUNNER = \"authentik.root.test_runner.PytestTestRunner\"\n# We can't check TEST here as its set later by the test runner\nLOG_LEVEL = CONFIG.y(\"log_level\").upper() if \"TF_BUILD\" not in os.environ else \"DEBUG\"\n# We could add a custom level to stdlib logging and structlog, but it's not easy or clean\n# https://stackoverflow.com/questions/54505487/custom-log-level-not-working-with-structlog\n# Additionally, the entire code uses debug as highest level so that would have to be re-written too\nif LOG_LEVEL == \"TRACE\":\n LOG_LEVEL = \"DEBUG\"\n\nstructlog.configure_once(\n processors=[\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.contextvars.merge_contextvars,\n add_process_id,\n structlog.stdlib.PositionalArgumentsFormatter(),\n structlog.processors.TimeStamper(fmt=\"iso\", utc=False),\n structlog.processors.StackInfoRenderer(),\n structlog.processors.dict_tracebacks,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.make_filtering_bound_logger(\n getattr(logging, LOG_LEVEL, logging.WARNING)\n ),\n cache_logger_on_first_use=True,\n)\n\nLOG_PRE_CHAIN = [\n # Add the log level and a timestamp to the event_dict if the log entry\n # is not from structlog.\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.processors.TimeStamper(),\n structlog.processors.StackInfoRenderer(),\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"json\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.processors.JSONRenderer(sort_keys=True),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n \"console\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.dev.ConsoleRenderer(colors=DEBUG),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\" if DEBUG else \"json\",\n },\n },\n \"loggers\": {},\n}\n\n_LOGGING_HANDLER_MAP = {\n \"\": LOG_LEVEL,\n \"authentik\": LOG_LEVEL,\n \"django\": \"WARNING\",\n \"celery\": \"WARNING\",\n \"selenium\": \"WARNING\",\n \"docker\": \"WARNING\",\n \"urllib3\": \"WARNING\",\n \"websockets\": \"WARNING\",\n \"daphne\": \"WARNING\",\n \"kubernetes\": \"INFO\",\n \"asyncio\": \"WARNING\",\n \"redis\": \"WARNING\",\n \"silk\": \"INFO\",\n}\nfor handler_name, level in _LOGGING_HANDLER_MAP.items():\n # pyright: reportGeneralTypeIssues=false\n LOGGING[\"loggers\"][handler_name] = {\n \"handlers\": [\"console\"],\n \"level\": level,\n \"propagate\": False,\n }\n\n\n_DISALLOWED_ITEMS = [\n \"INSTALLED_APPS\",\n \"MIDDLEWARE\",\n \"AUTHENTICATION_BACKENDS\",\n \"CELERY_BEAT_SCHEDULE\",\n]\n# Load subapps's INSTALLED_APPS\nfor _app in INSTALLED_APPS:\n if _app.startswith(\"authentik\"):\n if \"apps\" in _app:\n _app = \".\".join(_app.split(\".\")[:-2])\n try:\n app_settings = importlib.import_module(f\"{_app}.settings\")\n INSTALLED_APPS.extend(getattr(app_settings, \"INSTALLED_APPS\", []))\n MIDDLEWARE.extend(getattr(app_settings, \"MIDDLEWARE\", []))\n AUTHENTICATION_BACKENDS.extend(getattr(app_settings, \"AUTHENTICATION_BACKENDS\", []))\n CELERY_BEAT_SCHEDULE.update(getattr(app_settings, \"CELERY_BEAT_SCHEDULE\", {}))\n for _attr in dir(app_settings):\n if not _attr.startswith(\"__\") and _attr not in _DISALLOWED_ITEMS:\n globals()[_attr] = getattr(app_settings, _attr)\n except ImportError:\n pass\n\nif DEBUG:\n CELERY_TASK_ALWAYS_EAGER = True\n os.environ[ENV_GIT_HASH_KEY] = \"dev\"\n INSTALLED_APPS.append(\"silk\")\n SILKY_PYTHON_PROFILER = True\n MIDDLEWARE = [\"silk.middleware.SilkyMiddleware\"] + MIDDLEWARE\n\nINSTALLED_APPS.append(\"authentik.core\")\n\nCONFIG.log(\"info\", \"Booting authentik\", version=__version__)\n", "path": "authentik/root/settings.py"}]} |
gh_patches_debug_1432 | rasdani/github-patches | git_diff | django-extensions__django-extensions-1150 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
validate_template raised false problems
Since 1.9.8 I got this problem on our internal ci system but the files exists on the server on the path.
Locally there is no error raised.
```
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django/contrib/auth/templates/registration/password_reset_subject.txt: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django/contrib/auth/templates/registration/password_reset_subject.txt
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/label.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/label.dot
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/digraph.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/digraph.dot
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/relation.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/relation.dot
CommandError: 5 errors found
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_extensions/management/commands/validate_templates.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os
3 import fnmatch
4
5 from django.apps import apps
6 from django.conf import settings
7 from django.core.management.base import BaseCommand, CommandError
8 from django.core.management.color import color_style
9 from django.template.loader import get_template
10
11 from django_extensions.compat import get_template_setting
12 from django_extensions.management.utils import signalcommand
13
14
15 #
16 # TODO: Render the template with fake request object ?
17 #
18
19
20 class Command(BaseCommand):
21 args = ''
22 help = "Validate templates on syntax and compile errors"
23 ignores = set([
24 ".DS_Store",
25 "*.swp",
26 "*~",
27 ])
28
29 def add_arguments(self, parser):
30 super(Command, self).add_arguments(parser)
31 parser.add_argument(
32 '--no-apps', action='store_true', dest='no_apps',
33 default=False, help="Do not automatically include apps.")
34 parser.add_argument(
35 '--break', '-b', action='store_true', dest='break',
36 default=False, help="Break on first error.")
37 parser.add_argument(
38 '--include', '-i', action='append', dest='includes',
39 default=[], help="Append these paths to TEMPLATE DIRS")
40 parser.add_argument(
41 '--ignore-app', action='append', dest='ignore_apps',
42 default=[], help="Ignore these apps")
43
44 def ignore_filename(self, filename):
45 filename = os.path.basename(filename)
46 for ignore_pattern in self.ignores:
47 if fnmatch.fnmatch(filename, ignore_pattern):
48 return True
49 return False
50
51 @signalcommand
52 def handle(self, *args, **options):
53 if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):
54 self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')
55
56 style = color_style()
57 template_dirs = set(get_template_setting('DIRS'))
58 template_dirs |= set(options.get('includes', []))
59 template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))
60
61 if not options['no_apps']:
62 ignore_apps = options['ignore_apps']
63 if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):
64 ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')
65 for app in apps.get_app_configs():
66 if app.name in ignore_apps:
67 continue
68 app_template_dir = os.path.join(app.path, 'templates')
69 if os.path.isdir(app_template_dir):
70 template_dirs.add(app_template_dir)
71
72 # This is unsafe:
73 # https://docs.djangoproject.com/en/1.10/topics/settings/#altering-settings-at-runtime
74 if hasattr(settings, 'TEMPLATES'):
75 settings.TEMPLATES[0]['DIRS'] = list(template_dirs)
76 else:
77 settings.TEMPLATE_DIRS = list(template_dirs)
78 settings.TEMPLATE_DEBUG = True
79 verbosity = int(options.get('verbosity', 1))
80 errors = 0
81
82 for template_dir in template_dirs:
83 for root, dirs, filenames in os.walk(template_dir):
84 for filename in filenames:
85 if self.ignore_filename(filename):
86 continue
87
88 filepath = os.path.realpath(os.path.join(root, filename))
89 if verbosity > 1:
90 print(filepath)
91 try:
92 get_template(filepath)
93 except Exception as e:
94 errors += 1
95 print("%s: %s" % (filepath, style.ERROR("%s %s" % (e.__class__.__name__, str(e)))))
96 if errors and options.get('break', False):
97 raise CommandError("Errors found")
98
99 if errors:
100 raise CommandError("%s errors found" % errors)
101 print("%s errors found" % errors)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django_extensions/management/commands/validate_templates.py b/django_extensions/management/commands/validate_templates.py
--- a/django_extensions/management/commands/validate_templates.py
+++ b/django_extensions/management/commands/validate_templates.py
@@ -85,7 +85,7 @@
if self.ignore_filename(filename):
continue
- filepath = os.path.realpath(os.path.join(root, filename))
+ filepath = os.path.join(root, filename)
if verbosity > 1:
print(filepath)
try:
| {"golden_diff": "diff --git a/django_extensions/management/commands/validate_templates.py b/django_extensions/management/commands/validate_templates.py\n--- a/django_extensions/management/commands/validate_templates.py\n+++ b/django_extensions/management/commands/validate_templates.py\n@@ -85,7 +85,7 @@\n if self.ignore_filename(filename):\n continue\n \n- filepath = os.path.realpath(os.path.join(root, filename))\n+ filepath = os.path.join(root, filename)\n if verbosity > 1:\n print(filepath)\n try:\n", "issue": "validate_template raised false problems \nSince 1.9.8 I got this problem on our internal ci system but the files exists on the server on the path.\r\nLocally there is no error raised.\r\n\r\n\r\n```\r\n/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django/contrib/auth/templates/registration/password_reset_subject.txt: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django/contrib/auth/templates/registration/password_reset_subject.txt\r\n/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html\r\n/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/label.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/label.dot\r\n/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/digraph.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/digraph.dot\r\n/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/relation.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/relation.dot\r\nCommandError: 5 errors found\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport fnmatch\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.management.color import color_style\nfrom django.template.loader import get_template\n\nfrom django_extensions.compat import get_template_setting\nfrom django_extensions.management.utils import signalcommand\n\n\n#\n# TODO: Render the template with fake request object ?\n#\n\n\nclass Command(BaseCommand):\n args = ''\n help = \"Validate templates on syntax and compile errors\"\n ignores = set([\n \".DS_Store\",\n \"*.swp\",\n \"*~\",\n ])\n\n def add_arguments(self, parser):\n super(Command, self).add_arguments(parser)\n parser.add_argument(\n '--no-apps', action='store_true', dest='no_apps',\n default=False, help=\"Do not automatically include apps.\")\n parser.add_argument(\n '--break', '-b', action='store_true', dest='break',\n default=False, help=\"Break on first error.\")\n parser.add_argument(\n '--include', '-i', action='append', dest='includes',\n default=[], help=\"Append these paths to TEMPLATE DIRS\")\n parser.add_argument(\n '--ignore-app', action='append', dest='ignore_apps',\n default=[], help=\"Ignore these apps\")\n\n def ignore_filename(self, filename):\n filename = os.path.basename(filename)\n for ignore_pattern in self.ignores:\n if fnmatch.fnmatch(filename, ignore_pattern):\n return True\n return False\n\n @signalcommand\n def handle(self, *args, **options):\n if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):\n self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')\n\n style = color_style()\n template_dirs = set(get_template_setting('DIRS'))\n template_dirs |= set(options.get('includes', []))\n template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))\n\n if not options['no_apps']:\n ignore_apps = options['ignore_apps']\n if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):\n ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')\n for app in apps.get_app_configs():\n if app.name in ignore_apps:\n continue\n app_template_dir = os.path.join(app.path, 'templates')\n if os.path.isdir(app_template_dir):\n template_dirs.add(app_template_dir)\n\n # This is unsafe:\n # https://docs.djangoproject.com/en/1.10/topics/settings/#altering-settings-at-runtime\n if hasattr(settings, 'TEMPLATES'):\n settings.TEMPLATES[0]['DIRS'] = list(template_dirs)\n else:\n settings.TEMPLATE_DIRS = list(template_dirs)\n settings.TEMPLATE_DEBUG = True\n verbosity = int(options.get('verbosity', 1))\n errors = 0\n\n for template_dir in template_dirs:\n for root, dirs, filenames in os.walk(template_dir):\n for filename in filenames:\n if self.ignore_filename(filename):\n continue\n\n filepath = os.path.realpath(os.path.join(root, filename))\n if verbosity > 1:\n print(filepath)\n try:\n get_template(filepath)\n except Exception as e:\n errors += 1\n print(\"%s: %s\" % (filepath, style.ERROR(\"%s %s\" % (e.__class__.__name__, str(e)))))\n if errors and options.get('break', False):\n raise CommandError(\"Errors found\")\n\n if errors:\n raise CommandError(\"%s errors found\" % errors)\n print(\"%s errors found\" % errors)\n", "path": "django_extensions/management/commands/validate_templates.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport fnmatch\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.management.color import color_style\nfrom django.template.loader import get_template\n\nfrom django_extensions.compat import get_template_setting\nfrom django_extensions.management.utils import signalcommand\n\n\n#\n# TODO: Render the template with fake request object ?\n#\n\n\nclass Command(BaseCommand):\n args = ''\n help = \"Validate templates on syntax and compile errors\"\n ignores = set([\n \".DS_Store\",\n \"*.swp\",\n \"*~\",\n ])\n\n def add_arguments(self, parser):\n super(Command, self).add_arguments(parser)\n parser.add_argument(\n '--no-apps', action='store_true', dest='no_apps',\n default=False, help=\"Do not automatically include apps.\")\n parser.add_argument(\n '--break', '-b', action='store_true', dest='break',\n default=False, help=\"Break on first error.\")\n parser.add_argument(\n '--include', '-i', action='append', dest='includes',\n default=[], help=\"Append these paths to TEMPLATE DIRS\")\n parser.add_argument(\n '--ignore-app', action='append', dest='ignore_apps',\n default=[], help=\"Ignore these apps\")\n\n def ignore_filename(self, filename):\n filename = os.path.basename(filename)\n for ignore_pattern in self.ignores:\n if fnmatch.fnmatch(filename, ignore_pattern):\n return True\n return False\n\n @signalcommand\n def handle(self, *args, **options):\n if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):\n self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')\n\n style = color_style()\n template_dirs = set(get_template_setting('DIRS'))\n template_dirs |= set(options.get('includes', []))\n template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))\n\n if not options['no_apps']:\n ignore_apps = options['ignore_apps']\n if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):\n ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')\n for app in apps.get_app_configs():\n if app.name in ignore_apps:\n continue\n app_template_dir = os.path.join(app.path, 'templates')\n if os.path.isdir(app_template_dir):\n template_dirs.add(app_template_dir)\n\n # This is unsafe:\n # https://docs.djangoproject.com/en/1.10/topics/settings/#altering-settings-at-runtime\n if hasattr(settings, 'TEMPLATES'):\n settings.TEMPLATES[0]['DIRS'] = list(template_dirs)\n else:\n settings.TEMPLATE_DIRS = list(template_dirs)\n settings.TEMPLATE_DEBUG = True\n verbosity = int(options.get('verbosity', 1))\n errors = 0\n\n for template_dir in template_dirs:\n for root, dirs, filenames in os.walk(template_dir):\n for filename in filenames:\n if self.ignore_filename(filename):\n continue\n\n filepath = os.path.join(root, filename)\n if verbosity > 1:\n print(filepath)\n try:\n get_template(filepath)\n except Exception as e:\n errors += 1\n print(\"%s: %s\" % (filepath, style.ERROR(\"%s %s\" % (e.__class__.__name__, str(e)))))\n if errors and options.get('break', False):\n raise CommandError(\"Errors found\")\n\n if errors:\n raise CommandError(\"%s errors found\" % errors)\n print(\"%s errors found\" % errors)\n", "path": "django_extensions/management/commands/validate_templates.py"}]} |
gh_patches_debug_1433 | rasdani/github-patches | git_diff | xonsh__xonsh-4879 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xpip doesn't detect/support "pip install --user" installs of xonsh
## xonfig
<details>
```
+------------------+----------------------+
| xonsh | 0.9.27 |
| Git SHA | 71fe9014 |
| Commit Date | Jan 29 08:58:58 2021 |
| Python | 3.9.5 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.19 |
| shell type | prompt_toolkit |
| pygments | 2.9.0 |
| on posix | True |
| on linux | True |
| distro | ubuntu |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
| on jupyter | False |
| jupyter kernel | None |
| xontrib 1 | apt_tabcomplete |
| xontrib 2 | direnv |
| xontrib 3 | kitty |
| xontrib 4 | linuxbrew |
+------------------+----------------------+
```
</details>
## Expected Behavior
After installing xonsh via `pip3 install --user xonsh` (and ensuring that `~/.local/bin` is on `$PATH`, etc), xonsh works and runs just fine. Since `xpip` is supposed to manage the Python environment where xonsh itself is defined, I would expect it to wrap a non-root `pip`, ideally invoked in a way to install in the user's dir.
## Current Behavior
```
$ which xpip
sudo /usr/bin/python3 -m pip
```
Instead, `xpip` wraps a `sudo` invocation that will install things globally systemwide, which is not at all how xonsh itself was installed. And, if the user tries to do something "smart" like `xpip install --user xontrib-whatever`, I'm not sure quite what it will do but surely nothing good.
## Steps to Reproduce
1. Install xonsh via `pip3 install --user xonsh`
2. Run `xpip` to install something like a xonfig
3. Sadness and an unexpected `sudo` that might do undesired things to your system
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/aliases.py`
Content:
```
1 """Aliases for the xonsh shell."""
2 import argparse
3 import collections.abc as cabc
4 import functools
5 import inspect
6 import os
7 import re
8 import sys
9 import types
10 import typing as tp
11
12 import xonsh.completers._aliases as xca
13 import xonsh.history.main as xhm
14 import xonsh.xoreutils.which as xxw
15 from xonsh.ast import isexpression
16 from xonsh.built_ins import XSH
17 from xonsh.cli_utils import Annotated, Arg, ArgParserAlias
18 from xonsh.dirstack import _get_cwd, cd, dirs, popd, pushd
19 from xonsh.environ import locate_binary, make_args_env
20 from xonsh.foreign_shells import foreign_shell_data
21 from xonsh.jobs import bg, clean_jobs, disown, fg, jobs
22 from xonsh.lazyasd import lazyobject
23 from xonsh.platform import (
24 IN_APPIMAGE,
25 ON_ANACONDA,
26 ON_DARWIN,
27 ON_DRAGONFLY,
28 ON_FREEBSD,
29 ON_NETBSD,
30 ON_OPENBSD,
31 ON_WINDOWS,
32 )
33 from xonsh.timings import timeit_alias
34 from xonsh.tools import (
35 ALIAS_KWARG_NAMES,
36 XonshError,
37 adjust_shlvl,
38 argvquote,
39 escape_windows_cmd_string,
40 print_color,
41 strip_simple_quotes,
42 swap_values,
43 to_repr_pretty_,
44 to_shlvl,
45 unthreadable,
46 )
47 from xonsh.xontribs import xontribs_main
48
49
50 @lazyobject
51 def EXEC_ALIAS_RE():
52 return re.compile(r"@\(|\$\(|!\(|\$\[|!\[|\&\&|\|\||\s+and\s+|\s+or\s+|[>|<]")
53
54
55 class Aliases(cabc.MutableMapping):
56 """Represents a location to hold and look up aliases."""
57
58 def __init__(self, *args, **kwargs):
59 self._raw = {}
60 self.update(*args, **kwargs)
61
62 @staticmethod
63 def _get_func_name(func):
64 name = func.__name__
65
66 # Strip leading underscore
67 if name.startswith("_"):
68 name = name[1:]
69 return name
70
71 def _register(self, func, name="", dash_case=True):
72 name = name or self._get_func_name(func)
73
74 if dash_case:
75 name = name.replace("_", "-")
76
77 self[name] = func
78 return func
79
80 @tp.overload
81 def register(self, func: types.FunctionType) -> types.FunctionType:
82 """simple usage"""
83
84 @tp.overload
85 def register(
86 self, name: str, *, dash_case: bool = True
87 ) -> tp.Callable[[types.FunctionType], types.FunctionType]:
88 ...
89
90 def register(self, func_or_name, name=None, dash_case=True):
91 """Decorator to register the given function by name."""
92
93 if isinstance(func_or_name, types.FunctionType):
94 return self._register(func_or_name, name, dash_case)
95
96 def wrapper(func):
97 return self._register(func, func_or_name, dash_case)
98
99 return wrapper
100
101 def get(self, key, default=None):
102 """Returns the (possibly modified) value. If the key is not present,
103 then `default` is returned.
104 If the value is callable, it is returned without modification. If it
105 is an iterable of strings it will be evaluated recursively to expand
106 other aliases, resulting in a new list or a "partially applied"
107 callable.
108 """
109 val = self._raw.get(key)
110 if val is None:
111 return default
112 elif isinstance(val, cabc.Iterable) or callable(val):
113 return self.eval_alias(val, seen_tokens={key})
114 else:
115 msg = "alias of {!r} has an inappropriate type: {!r}"
116 raise TypeError(msg.format(key, val))
117
118 def eval_alias(self, value, seen_tokens=frozenset(), acc_args=()):
119 """
120 "Evaluates" the alias ``value``, by recursively looking up the leftmost
121 token and "expanding" if it's also an alias.
122
123 A value like ``["cmd", "arg"]`` might transform like this:
124 ``> ["cmd", "arg"] -> ["ls", "-al", "arg"] -> callable()``
125 where ``cmd=ls -al`` and ``ls`` is an alias with its value being a
126 callable. The resulting callable will be "partially applied" with
127 ``["-al", "arg"]``.
128 """
129 # Beware of mutability: default values for keyword args are evaluated
130 # only once.
131 if callable(value):
132 return partial_eval_alias(value, acc_args=acc_args)
133 else:
134 expand_path = XSH.expand_path
135 token, *rest = map(expand_path, value)
136 if token in seen_tokens or token not in self._raw:
137 # ^ Making sure things like `egrep=egrep --color=auto` works,
138 # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF`
139 # and `ls=ls --color=auto`
140 rtn = [token]
141 rtn.extend(rest)
142 rtn.extend(acc_args)
143 return rtn
144 else:
145 seen_tokens = seen_tokens | {token}
146 acc_args = rest + list(acc_args)
147 return self.eval_alias(self._raw[token], seen_tokens, acc_args)
148
149 def expand_alias(self, line: str, cursor_index: int) -> str:
150 """Expands any aliases present in line if alias does not point to a
151 builtin function and if alias is only a single command.
152 The command won't be expanded if the cursor's inside/behind it.
153 """
154 word = (line.split(maxsplit=1) or [""])[0]
155 if word in XSH.aliases and isinstance(self.get(word), cabc.Sequence): # type: ignore
156 word_idx = line.find(word)
157 word_edge = word_idx + len(word)
158 if cursor_index > word_edge:
159 # the cursor isn't inside/behind the word
160 expansion = " ".join(self.get(word))
161 line = line[:word_idx] + expansion + line[word_edge:]
162 return line
163
164 #
165 # Mutable mapping interface
166 #
167
168 def __getitem__(self, key):
169 return self._raw[key]
170
171 def __setitem__(self, key, val):
172 if isinstance(val, str):
173 f = "<exec-alias:" + key + ">"
174 if EXEC_ALIAS_RE.search(val) is not None:
175 # We have a sub-command (e.g. $(cmd)) or IO redirect (e.g. >>)
176 self._raw[key] = ExecAlias(val, filename=f)
177 elif isexpression(val):
178 # expansion substitution
179 lexer = XSH.execer.parser.lexer
180 self._raw[key] = list(map(strip_simple_quotes, lexer.split(val)))
181 else:
182 # need to exec alias
183 self._raw[key] = ExecAlias(val, filename=f)
184 else:
185 self._raw[key] = val
186
187 def _common_or(self, other):
188 new_dict = self._raw.copy()
189 for key in dict(other):
190 new_dict[key] = other[key]
191 return Aliases(new_dict)
192
193 def __or__(self, other):
194 return self._common_or(other)
195
196 def __ror__(self, other):
197 return self._common_or(other)
198
199 def __ior__(self, other):
200 for key in dict(other):
201 self[key] = other[key]
202 return self
203
204 def __delitem__(self, key):
205 del self._raw[key]
206
207 def update(self, *args, **kwargs):
208 for key, val in dict(*args, **kwargs).items():
209 self[key] = val
210
211 def __iter__(self):
212 yield from self._raw
213
214 def __len__(self):
215 return len(self._raw)
216
217 def __str__(self):
218 return str(self._raw)
219
220 def __repr__(self):
221 return "{}.{}({})".format(
222 self.__class__.__module__, self.__class__.__name__, self._raw
223 )
224
225 _repr_pretty_ = to_repr_pretty_
226
227
228 class ExecAlias:
229 """Provides a callable alias for xonsh source code."""
230
231 def __init__(self, src, filename="<exec-alias>"):
232 """
233 Parameters
234 ----------
235 src : str
236 Source code that will be
237 """
238 self.src = src
239 self.filename = filename
240
241 def __call__(
242 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
243 ):
244 execer = XSH.execer
245 frame = stack[0][0] # execute as though we are at the call site
246
247 alias_args = {"args": args}
248 for i, a in enumerate(args):
249 alias_args[f"arg{i}"] = a
250
251 with XSH.env.swap(alias_args):
252 execer.exec(
253 self.src,
254 glbs=frame.f_globals,
255 locs=frame.f_locals,
256 filename=self.filename,
257 )
258 if XSH.history is not None:
259 return XSH.history.last_cmd_rtn
260
261 def __repr__(self):
262 return f"ExecAlias({self.src!r}, filename={self.filename!r})"
263
264
265 class PartialEvalAliasBase:
266 """Partially evaluated alias."""
267
268 def __init__(self, f, acc_args=()):
269 """
270 Parameters
271 ----------
272 f : callable
273 A function to dispatch to.
274 acc_args : sequence of strings, optional
275 Additional arguments to prepent to the argument list passed in
276 when the alias is called.
277 """
278 self.f = f
279 self.acc_args = acc_args
280 self.__name__ = getattr(f, "__name__", self.__class__.__name__)
281
282 def __call__(
283 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
284 ):
285 args = list(self.acc_args) + args
286 return self.f(args, stdin, stdout, stderr, spec, stack)
287
288 def __repr__(self):
289 return "{name}({f!r}, acc_args={acc_args!r})".format(
290 name=self.__class__.__name__, f=self.f, acc_args=self.acc_args
291 )
292
293
294 class PartialEvalAlias0(PartialEvalAliasBase):
295 def __call__(
296 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
297 ):
298 args = list(self.acc_args) + args
299 if args:
300 msg = "callable alias {f!r} takes no arguments, but {args!f} provided. "
301 msg += "Of these {acc_args!r} were partially applied."
302 raise XonshError(msg.format(f=self.f, args=args, acc_args=self.acc_args))
303 return self.f()
304
305
306 class PartialEvalAlias1(PartialEvalAliasBase):
307 def __call__(
308 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
309 ):
310 args = list(self.acc_args) + args
311 return self.f(args)
312
313
314 class PartialEvalAlias2(PartialEvalAliasBase):
315 def __call__(
316 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
317 ):
318 args = list(self.acc_args) + args
319 return self.f(args, stdin)
320
321
322 class PartialEvalAlias3(PartialEvalAliasBase):
323 def __call__(
324 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
325 ):
326 args = list(self.acc_args) + args
327 return self.f(args, stdin, stdout)
328
329
330 class PartialEvalAlias4(PartialEvalAliasBase):
331 def __call__(
332 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
333 ):
334 args = list(self.acc_args) + args
335 return self.f(args, stdin, stdout, stderr)
336
337
338 class PartialEvalAlias5(PartialEvalAliasBase):
339 def __call__(
340 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
341 ):
342 args = list(self.acc_args) + args
343 return self.f(args, stdin, stdout, stderr, spec)
344
345
346 class PartialEvalAlias6(PartialEvalAliasBase):
347 def __call__(
348 self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None
349 ):
350 args = list(self.acc_args) + args
351 return self.f(args, stdin, stdout, stderr, spec, stack)
352
353
354 PARTIAL_EVAL_ALIASES = (
355 PartialEvalAlias0,
356 PartialEvalAlias1,
357 PartialEvalAlias2,
358 PartialEvalAlias3,
359 PartialEvalAlias4,
360 PartialEvalAlias5,
361 PartialEvalAlias6,
362 )
363
364
365 def partial_eval_alias(f, acc_args=()):
366 """Dispatches the appropriate eval alias based on the number of args to the original callable alias
367 and how many arguments to apply.
368 """
369 # no partial needed if no extra args
370 if not acc_args:
371 return f
372 # need to dispatch
373 numargs = 0
374 for name, param in inspect.signature(f).parameters.items():
375 if (
376 param.kind == param.POSITIONAL_ONLY
377 or param.kind == param.POSITIONAL_OR_KEYWORD
378 ):
379 numargs += 1
380 elif name in ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY:
381 numargs += 1
382 if numargs < 7:
383 return PARTIAL_EVAL_ALIASES[numargs](f, acc_args=acc_args)
384 else:
385 e = "Expected proxy with 6 or fewer arguments for {}, not {}"
386 raise XonshError(e.format(", ".join(ALIAS_KWARG_NAMES), numargs))
387
388
389 #
390 # Actual aliases below
391 #
392
393
394 def xonsh_exit(args, stdin=None):
395 """Sends signal to exit shell."""
396 if not clean_jobs():
397 # Do not exit if jobs not cleaned up
398 return None, None
399 XSH.exit = True
400 print() # gimme a newline
401 return None, None
402
403
404 def xonsh_reset(args, stdin=None):
405 """Clears __xonsh__.ctx"""
406 XSH.ctx.clear()
407
408
409 def source_foreign_fn(
410 shell: str,
411 files_or_code: Annotated[tp.List[str], Arg(nargs="+")],
412 interactive=True,
413 login=False,
414 envcmd=None,
415 aliascmd=None,
416 extra_args="",
417 safe=True,
418 prevcmd="",
419 postcmd="",
420 funcscmd="",
421 sourcer=None,
422 use_tmpfile=False,
423 seterrprevcmd=None,
424 seterrpostcmd=None,
425 overwrite_aliases=False,
426 suppress_skip_message=False,
427 show=False,
428 dryrun=False,
429 _stderr=None,
430 ):
431 """Sources a file written in a foreign shell language.
432
433 Parameters
434 ----------
435 shell
436 Name or path to the foreign shell
437 files_or_code
438 file paths to source or code in the target language.
439 interactive : -n, --non-interactive
440 whether the sourced shell should be interactive
441 login : -l, --login
442 whether the sourced shell should be login
443 envcmd : --envcmd
444 command to print environment
445 aliascmd : --aliascmd
446 command to print aliases
447 extra_args : --extra-args
448 extra arguments needed to run the shell
449 safe : -u, --unsafe
450 whether the source shell should be run safely, and not raise any errors, even if they occur.
451 prevcmd : -p, --prevcmd
452 command(s) to run before any other commands, replaces traditional source.
453 postcmd : --postcmd
454 command(s) to run after all other commands
455 funcscmd : --funcscmd
456 code to find locations of all native functions in the shell language.
457 sourcer : --sourcer
458 the source command in the target shell language.
459 If this is not set, a default value will attempt to be
460 looked up based on the shell name.
461 use_tmpfile : --use-tmpfile
462 whether the commands for source shell should be written to a temporary file.
463 seterrprevcmd : --seterrprevcmd
464 command(s) to set exit-on-error before any other commands.
465 seterrpostcmd : --seterrpostcmd
466 command(s) to set exit-on-error after all other commands.
467 overwrite_aliases : --overwrite-aliases
468 flag for whether or not sourced aliases should replace the current xonsh aliases.
469 suppress_skip_message : --suppress-skip-message
470 flag for whether or not skip messages should be suppressed.
471 show : --show
472 show the script output.
473 dryrun : -d, --dry-run
474 Will not actually source the file.
475 """
476 extra_args = tuple(extra_args.split())
477 env = XSH.env
478 suppress_skip_message = (
479 env.get("FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE")
480 if not suppress_skip_message
481 else suppress_skip_message
482 )
483 files: tp.Tuple[str, ...] = ()
484 if prevcmd:
485 pass # don't change prevcmd if given explicitly
486 elif os.path.isfile(files_or_code[0]):
487 if not sourcer:
488 return (None, "xonsh: error: `sourcer` command is not mentioned.\n", 1)
489 # we have filenames to source
490 prevcmd = "".join([f"{sourcer} {f}\n" for f in files_or_code])
491 files = tuple(files_or_code)
492 elif not prevcmd:
493 prevcmd = " ".join(files_or_code) # code to run, no files
494 foreign_shell_data.cache_clear() # make sure that we don't get prev src
495 fsenv, fsaliases = foreign_shell_data(
496 shell=shell,
497 login=login,
498 interactive=interactive,
499 envcmd=envcmd,
500 aliascmd=aliascmd,
501 extra_args=extra_args,
502 safe=safe,
503 prevcmd=prevcmd,
504 postcmd=postcmd,
505 funcscmd=funcscmd or None, # the default is None in the called function
506 sourcer=sourcer,
507 use_tmpfile=use_tmpfile,
508 seterrprevcmd=seterrprevcmd,
509 seterrpostcmd=seterrpostcmd,
510 show=show,
511 dryrun=dryrun,
512 files=files,
513 )
514 if fsenv is None:
515 if dryrun:
516 return
517 else:
518 msg = f"xonsh: error: Source failed: {prevcmd!r}\n"
519 msg += "xonsh: error: Possible reasons: File not found or syntax error\n"
520 return (None, msg, 1)
521 # apply results
522 denv = env.detype()
523 for k, v in fsenv.items():
524 if k == "SHLVL": # ignore $SHLVL as sourcing should not change $SHLVL
525 continue
526 if k in denv and v == denv[k]:
527 continue # no change from original
528 env[k] = v
529 # Remove any env-vars that were unset by the script.
530 for k in denv:
531 if k not in fsenv:
532 env.pop(k, None)
533 # Update aliases
534 baliases = XSH.aliases
535 for k, v in fsaliases.items():
536 if k in baliases and v == baliases[k]:
537 continue # no change from original
538 elif overwrite_aliases or k not in baliases:
539 baliases[k] = v
540 elif suppress_skip_message:
541 pass
542 else:
543 msg = (
544 "Skipping application of {0!r} alias from {1!r} "
545 "since it shares a name with an existing xonsh alias. "
546 'Use "--overwrite-alias" option to apply it anyway.'
547 'You may prevent this message with "--suppress-skip-message" or '
548 '"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True".'
549 )
550 print(msg.format(k, shell), file=_stderr)
551
552
553 source_foreign = ArgParserAlias(
554 func=source_foreign_fn, has_args=True, prog="source-foreign"
555 )
556
557
558 @unthreadable
559 def source_alias(args, stdin=None):
560 """Executes the contents of the provided files in the current context.
561 If sourced file isn't found in cwd, search for file along $PATH to source
562 instead.
563 """
564 env = XSH.env
565 encoding = env.get("XONSH_ENCODING")
566 errors = env.get("XONSH_ENCODING_ERRORS")
567 for i, fname in enumerate(args):
568 fpath = fname
569 if not os.path.isfile(fpath):
570 fpath = locate_binary(fname)
571 if fpath is None:
572 if env.get("XONSH_DEBUG"):
573 print(f"source: {fname}: No such file", file=sys.stderr)
574 if i == 0:
575 raise RuntimeError(
576 "must source at least one file, " + fname + " does not exist."
577 )
578 break
579 _, fext = os.path.splitext(fpath)
580 if fext and fext != ".xsh" and fext != ".py":
581 raise RuntimeError(
582 "attempting to source non-xonsh file! If you are "
583 "trying to source a file in another language, "
584 "then please use the appropriate source command. "
585 "For example, source-bash script.sh"
586 )
587 with open(fpath, encoding=encoding, errors=errors) as fp:
588 src = fp.read()
589 if not src.endswith("\n"):
590 src += "\n"
591 ctx = XSH.ctx
592 updates = {"__file__": fpath, "__name__": os.path.abspath(fpath)}
593 with env.swap(**make_args_env(args[i + 1 :])), swap_values(ctx, updates):
594 try:
595 XSH.builtins.execx(src, "exec", ctx, filename=fpath)
596 except Exception:
597 print_color(
598 "{RED}You may be attempting to source non-xonsh file! "
599 "{RESET}If you are trying to source a file in "
600 "another language, then please use the appropriate "
601 "source command. For example, {GREEN}source-bash "
602 "script.sh{RESET}",
603 file=sys.stderr,
604 )
605 raise
606
607
608 def source_cmd_fn(
609 files: Annotated[tp.List[str], Arg(nargs="+")],
610 login=False,
611 aliascmd=None,
612 extra_args="",
613 safe=True,
614 postcmd="",
615 funcscmd="",
616 seterrprevcmd=None,
617 overwrite_aliases=False,
618 suppress_skip_message=False,
619 show=False,
620 dryrun=False,
621 _stderr=None,
622 ):
623 """
624 Source cmd.exe files
625
626 Parameters
627 ----------
628 files
629 paths to source files.
630 login : -l, --login
631 whether the sourced shell should be login
632 envcmd : --envcmd
633 command to print environment
634 aliascmd : --aliascmd
635 command to print aliases
636 extra_args : --extra-args
637 extra arguments needed to run the shell
638 safe : -s, --safe
639 whether the source shell should be run safely, and not raise any errors, even if they occur.
640 postcmd : --postcmd
641 command(s) to run after all other commands
642 funcscmd : --funcscmd
643 code to find locations of all native functions in the shell language.
644 seterrprevcmd : --seterrprevcmd
645 command(s) to set exit-on-error before any other commands.
646 overwrite_aliases : --overwrite-aliases
647 flag for whether or not sourced aliases should replace the current xonsh aliases.
648 suppress_skip_message : --suppress-skip-message
649 flag for whether or not skip messages should be suppressed.
650 show : --show
651 show the script output.
652 dryrun : -d, --dry-run
653 Will not actually source the file.
654 """
655 args = list(files)
656 fpath = locate_binary(args[0])
657 args[0] = fpath if fpath else args[0]
658 if not os.path.isfile(args[0]):
659 return (None, f"xonsh: error: File not found: {args[0]}\n", 1)
660 prevcmd = "call "
661 prevcmd += " ".join([argvquote(arg, force=True) for arg in args])
662 prevcmd = escape_windows_cmd_string(prevcmd)
663 with XSH.env.swap(PROMPT="$P$G"):
664 return source_foreign_fn(
665 shell="cmd",
666 files_or_code=args,
667 interactive=True,
668 sourcer="call",
669 envcmd="set",
670 seterrpostcmd="if errorlevel 1 exit 1",
671 use_tmpfile=True,
672 prevcmd=prevcmd,
673 # from this function
674 login=login,
675 aliascmd=aliascmd,
676 extra_args=extra_args,
677 safe=safe,
678 postcmd=postcmd,
679 funcscmd=funcscmd,
680 seterrprevcmd=seterrprevcmd,
681 overwrite_aliases=overwrite_aliases,
682 suppress_skip_message=suppress_skip_message,
683 show=show,
684 dryrun=dryrun,
685 )
686
687
688 source_cmd = ArgParserAlias(func=source_cmd_fn, has_args=True, prog="source-cmd")
689
690
691 def xexec_fn(
692 command: Annotated[tp.List[str], Arg(nargs=argparse.REMAINDER)],
693 login=False,
694 clean=False,
695 name="",
696 _stdin=None,
697 ):
698 """exec (also aliased as xexec) uses the os.execvpe() function to
699 replace the xonsh process with the specified program.
700
701 This provides the functionality of the bash 'exec' builtin::
702
703 >>> exec bash -l -i
704 bash $
705
706 Parameters
707 ----------
708 command
709 program to launch along its arguments
710 login : -l, --login
711 the shell places a dash at the
712 beginning of the zeroth argument passed to command to simulate login
713 shell.
714 clean : -c, --clean
715 causes command to be executed with an empty environment.
716 name : -a, --name
717 the shell passes name as the zeroth argument
718 to the executed command.
719
720 Notes
721 -----
722 This command **is not** the same as the Python builtin function
723 exec(). That function is for running Python code. This command,
724 which shares the same name as the sh-lang statement, is for launching
725 a command directly in the same process. In the event of a name conflict,
726 please use the xexec command directly or dive into subprocess mode
727 explicitly with ![exec command]. For more details, please see
728 http://xon.sh/faq.html#exec.
729 """
730 if len(command) == 0:
731 return (None, "xonsh: exec: no command specified\n", 1)
732
733 cmd = command[0]
734 if name:
735 command[0] = name
736 if login:
737 command[0] = f"-{command[0]}"
738
739 denv = {}
740 if not clean:
741 denv = XSH.env.detype()
742
743 # decrement $SHLVL to mirror bash's behaviour
744 if "SHLVL" in denv:
745 old_shlvl = to_shlvl(denv["SHLVL"])
746 denv["SHLVL"] = str(adjust_shlvl(old_shlvl, -1))
747
748 try:
749 os.execvpe(cmd, command, denv)
750 except FileNotFoundError as e:
751 return (
752 None,
753 "xonsh: exec: file not found: {}: {}" "\n".format(e.args[1], command[0]),
754 1,
755 )
756
757
758 xexec = ArgParserAlias(func=xexec_fn, has_args=True, prog="xexec")
759
760
761 @lazyobject
762 def xonfig():
763 """Runs the xonsh configuration utility."""
764 from xonsh.xonfig import xonfig_main # lazy import
765
766 return xonfig_main
767
768
769 @unthreadable
770 def trace(args, stdin=None, stdout=None, stderr=None, spec=None):
771 """Runs the xonsh tracer utility."""
772 from xonsh.tracer import tracermain # lazy import
773
774 try:
775 return tracermain(args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec)
776 except SystemExit:
777 pass
778
779
780 def showcmd(args, stdin=None):
781 """usage: showcmd [-h|--help|cmd args]
782
783 Displays the command and arguments as a list of strings that xonsh would
784 run in subprocess mode. This is useful for determining how xonsh evaluates
785 your commands and arguments prior to running these commands.
786
787 optional arguments:
788 -h, --help show this help message and exit
789
790 Examples
791 --------
792 >>> showcmd echo $USER "can't" hear "the sea"
793 ['echo', 'I', "can't", 'hear', 'the sea']
794 """
795 if len(args) == 0 or (len(args) == 1 and args[0] in {"-h", "--help"}):
796 print(showcmd.__doc__.rstrip().replace("\n ", "\n"))
797 else:
798 sys.displayhook(args)
799
800
801 def detect_xpip_alias():
802 """
803 Determines the correct invocation to get xonsh's pip
804 """
805 if not getattr(sys, "executable", None):
806 return lambda args, stdin=None: (
807 "",
808 "Sorry, unable to run pip on your system (missing sys.executable)",
809 1,
810 )
811
812 basecmd = [sys.executable, "-m", "pip"]
813 try:
814 if ON_WINDOWS or IN_APPIMAGE:
815 # XXX: Does windows have an installation mode that requires UAC?
816 return basecmd
817 elif not os.access(os.path.dirname(sys.executable), os.W_OK):
818 return ["sudo"] + basecmd
819 else:
820 return basecmd
821 except Exception:
822 # Something freaky happened, return something that'll probably work
823 return basecmd
824
825
826 def make_default_aliases():
827 """Creates a new default aliases dictionary."""
828 default_aliases = {
829 "cd": cd,
830 "pushd": pushd,
831 "popd": popd,
832 "dirs": dirs,
833 "jobs": jobs,
834 "fg": fg,
835 "bg": bg,
836 "disown": disown,
837 "EOF": xonsh_exit,
838 "exit": xonsh_exit,
839 "quit": xonsh_exit,
840 "exec": xexec,
841 "xexec": xexec,
842 "source": source_alias,
843 "source-zsh": ArgParserAlias(
844 func=functools.partial(source_foreign_fn, "zsh", sourcer="source"),
845 has_args=True,
846 prog="source-zsh",
847 ),
848 "source-bash": ArgParserAlias(
849 func=functools.partial(source_foreign_fn, "bash", sourcer="source"),
850 has_args=True,
851 prog="source-bash",
852 ),
853 "source-cmd": source_cmd,
854 "source-foreign": source_foreign,
855 "history": xhm.history_main,
856 "trace": trace,
857 "timeit": timeit_alias,
858 "xonfig": xonfig,
859 "scp-resume": ["rsync", "--partial", "-h", "--progress", "--rsh=ssh"],
860 "showcmd": showcmd,
861 "ipynb": ["jupyter", "notebook", "--no-browser"],
862 "which": xxw.which,
863 "xontrib": xontribs_main,
864 "completer": xca.completer_alias,
865 "xpip": detect_xpip_alias(),
866 "xonsh-reset": xonsh_reset,
867 }
868 if ON_WINDOWS:
869 # Borrow builtin commands from cmd.exe.
870 windows_cmd_aliases = {
871 "cls",
872 "copy",
873 "del",
874 "dir",
875 "echo",
876 "erase",
877 "md",
878 "mkdir",
879 "mklink",
880 "move",
881 "rd",
882 "ren",
883 "rename",
884 "rmdir",
885 "time",
886 "type",
887 "vol",
888 }
889 for alias in windows_cmd_aliases:
890 default_aliases[alias] = ["cmd", "/c", alias]
891 default_aliases["call"] = ["source-cmd"]
892 default_aliases["source-bat"] = ["source-cmd"]
893 default_aliases["clear"] = "cls"
894 if ON_ANACONDA:
895 # Add aliases specific to the Anaconda python distribution.
896 default_aliases["activate"] = ["source-cmd", "activate.bat"]
897 default_aliases["deactivate"] = ["source-cmd", "deactivate.bat"]
898 if not locate_binary("sudo"):
899 import xonsh.winutils as winutils
900
901 def sudo(args):
902 if len(args) < 1:
903 print(
904 "You need to provide an executable to run as " "Administrator."
905 )
906 return
907 cmd = args[0]
908 if locate_binary(cmd):
909 return winutils.sudo(cmd, args[1:])
910 elif cmd.lower() in windows_cmd_aliases:
911 args = ["/D", "/C", "CD", _get_cwd(), "&&"] + args
912 return winutils.sudo("cmd", args)
913 else:
914 msg = 'Cannot find the path for executable "{0}".'
915 print(msg.format(cmd))
916
917 default_aliases["sudo"] = sudo
918 elif ON_DARWIN:
919 default_aliases["ls"] = ["ls", "-G"]
920 elif ON_FREEBSD or ON_DRAGONFLY:
921 default_aliases["grep"] = ["grep", "--color=auto"]
922 default_aliases["egrep"] = ["egrep", "--color=auto"]
923 default_aliases["fgrep"] = ["fgrep", "--color=auto"]
924 default_aliases["ls"] = ["ls", "-G"]
925 elif ON_NETBSD:
926 default_aliases["grep"] = ["grep", "--color=auto"]
927 default_aliases["egrep"] = ["egrep", "--color=auto"]
928 default_aliases["fgrep"] = ["fgrep", "--color=auto"]
929 elif ON_OPENBSD:
930 pass
931 else:
932 default_aliases["grep"] = ["grep", "--color=auto"]
933 default_aliases["egrep"] = ["egrep", "--color=auto"]
934 default_aliases["fgrep"] = ["fgrep", "--color=auto"]
935 default_aliases["ls"] = ["ls", "--color=auto", "-v"]
936 return default_aliases
937
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/aliases.py b/xonsh/aliases.py
--- a/xonsh/aliases.py
+++ b/xonsh/aliases.py
@@ -815,7 +815,7 @@
# XXX: Does windows have an installation mode that requires UAC?
return basecmd
elif not os.access(os.path.dirname(sys.executable), os.W_OK):
- return ["sudo"] + basecmd
+ return basecmd.extend(["--user"])
else:
return basecmd
except Exception:
| {"golden_diff": "diff --git a/xonsh/aliases.py b/xonsh/aliases.py\n--- a/xonsh/aliases.py\n+++ b/xonsh/aliases.py\n@@ -815,7 +815,7 @@\n # XXX: Does windows have an installation mode that requires UAC?\n return basecmd\n elif not os.access(os.path.dirname(sys.executable), os.W_OK):\n- return [\"sudo\"] + basecmd\n+ return basecmd.extend([\"--user\"])\n else:\n return basecmd\n except Exception:\n", "issue": "xpip doesn't detect/support \"pip install --user\" installs of xonsh\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n+------------------+----------------------+\r\n| xonsh | 0.9.27 |\r\n| Git SHA | 71fe9014 |\r\n| Commit Date | Jan 29 08:58:58 2021 |\r\n| Python | 3.9.5 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 3.0.19 |\r\n| shell type | prompt_toolkit |\r\n| pygments | 2.9.0 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | ubuntu |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n| on jupyter | False |\r\n| jupyter kernel | None |\r\n| xontrib 1 | apt_tabcomplete |\r\n| xontrib 2 | direnv |\r\n| xontrib 3 | kitty |\r\n| xontrib 4 | linuxbrew |\r\n+------------------+----------------------+\r\n```\r\n\r\n</details>\r\n\r\n## Expected Behavior\r\nAfter installing xonsh via `pip3 install --user xonsh` (and ensuring that `~/.local/bin` is on `$PATH`, etc), xonsh works and runs just fine. Since `xpip` is supposed to manage the Python environment where xonsh itself is defined, I would expect it to wrap a non-root `pip`, ideally invoked in a way to install in the user's dir.\r\n\r\n## Current Behavior\r\n```\r\n$ which xpip\r\nsudo /usr/bin/python3 -m pip\r\n```\r\nInstead, `xpip` wraps a `sudo` invocation that will install things globally systemwide, which is not at all how xonsh itself was installed. And, if the user tries to do something \"smart\" like `xpip install --user xontrib-whatever`, I'm not sure quite what it will do but surely nothing good.\r\n\r\n## Steps to Reproduce\r\n1. Install xonsh via `pip3 install --user xonsh`\r\n2. Run `xpip` to install something like a xonfig\r\n3. Sadness and an unexpected `sudo` that might do undesired things to your system\r\n\r\n## For community\r\n\u2b07\ufe0f **Please click the \ud83d\udc4d reaction instead of leaving a `+1` or \ud83d\udc4d comment**\n", "before_files": [{"content": "\"\"\"Aliases for the xonsh shell.\"\"\"\nimport argparse\nimport collections.abc as cabc\nimport functools\nimport inspect\nimport os\nimport re\nimport sys\nimport types\nimport typing as tp\n\nimport xonsh.completers._aliases as xca\nimport xonsh.history.main as xhm\nimport xonsh.xoreutils.which as xxw\nfrom xonsh.ast import isexpression\nfrom xonsh.built_ins import XSH\nfrom xonsh.cli_utils import Annotated, Arg, ArgParserAlias\nfrom xonsh.dirstack import _get_cwd, cd, dirs, popd, pushd\nfrom xonsh.environ import locate_binary, make_args_env\nfrom xonsh.foreign_shells import foreign_shell_data\nfrom xonsh.jobs import bg, clean_jobs, disown, fg, jobs\nfrom xonsh.lazyasd import lazyobject\nfrom xonsh.platform import (\n IN_APPIMAGE,\n ON_ANACONDA,\n ON_DARWIN,\n ON_DRAGONFLY,\n ON_FREEBSD,\n ON_NETBSD,\n ON_OPENBSD,\n ON_WINDOWS,\n)\nfrom xonsh.timings import timeit_alias\nfrom xonsh.tools import (\n ALIAS_KWARG_NAMES,\n XonshError,\n adjust_shlvl,\n argvquote,\n escape_windows_cmd_string,\n print_color,\n strip_simple_quotes,\n swap_values,\n to_repr_pretty_,\n to_shlvl,\n unthreadable,\n)\nfrom xonsh.xontribs import xontribs_main\n\n\n@lazyobject\ndef EXEC_ALIAS_RE():\n return re.compile(r\"@\\(|\\$\\(|!\\(|\\$\\[|!\\[|\\&\\&|\\|\\||\\s+and\\s+|\\s+or\\s+|[>|<]\")\n\n\nclass Aliases(cabc.MutableMapping):\n \"\"\"Represents a location to hold and look up aliases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._raw = {}\n self.update(*args, **kwargs)\n\n @staticmethod\n def _get_func_name(func):\n name = func.__name__\n\n # Strip leading underscore\n if name.startswith(\"_\"):\n name = name[1:]\n return name\n\n def _register(self, func, name=\"\", dash_case=True):\n name = name or self._get_func_name(func)\n\n if dash_case:\n name = name.replace(\"_\", \"-\")\n\n self[name] = func\n return func\n\n @tp.overload\n def register(self, func: types.FunctionType) -> types.FunctionType:\n \"\"\"simple usage\"\"\"\n\n @tp.overload\n def register(\n self, name: str, *, dash_case: bool = True\n ) -> tp.Callable[[types.FunctionType], types.FunctionType]:\n ...\n\n def register(self, func_or_name, name=None, dash_case=True):\n \"\"\"Decorator to register the given function by name.\"\"\"\n\n if isinstance(func_or_name, types.FunctionType):\n return self._register(func_or_name, name, dash_case)\n\n def wrapper(func):\n return self._register(func, func_or_name, dash_case)\n\n return wrapper\n\n def get(self, key, default=None):\n \"\"\"Returns the (possibly modified) value. If the key is not present,\n then `default` is returned.\n If the value is callable, it is returned without modification. If it\n is an iterable of strings it will be evaluated recursively to expand\n other aliases, resulting in a new list or a \"partially applied\"\n callable.\n \"\"\"\n val = self._raw.get(key)\n if val is None:\n return default\n elif isinstance(val, cabc.Iterable) or callable(val):\n return self.eval_alias(val, seen_tokens={key})\n else:\n msg = \"alias of {!r} has an inappropriate type: {!r}\"\n raise TypeError(msg.format(key, val))\n\n def eval_alias(self, value, seen_tokens=frozenset(), acc_args=()):\n \"\"\"\n \"Evaluates\" the alias ``value``, by recursively looking up the leftmost\n token and \"expanding\" if it's also an alias.\n\n A value like ``[\"cmd\", \"arg\"]`` might transform like this:\n ``> [\"cmd\", \"arg\"] -> [\"ls\", \"-al\", \"arg\"] -> callable()``\n where ``cmd=ls -al`` and ``ls`` is an alias with its value being a\n callable. The resulting callable will be \"partially applied\" with\n ``[\"-al\", \"arg\"]``.\n \"\"\"\n # Beware of mutability: default values for keyword args are evaluated\n # only once.\n if callable(value):\n return partial_eval_alias(value, acc_args=acc_args)\n else:\n expand_path = XSH.expand_path\n token, *rest = map(expand_path, value)\n if token in seen_tokens or token not in self._raw:\n # ^ Making sure things like `egrep=egrep --color=auto` works,\n # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF`\n # and `ls=ls --color=auto`\n rtn = [token]\n rtn.extend(rest)\n rtn.extend(acc_args)\n return rtn\n else:\n seen_tokens = seen_tokens | {token}\n acc_args = rest + list(acc_args)\n return self.eval_alias(self._raw[token], seen_tokens, acc_args)\n\n def expand_alias(self, line: str, cursor_index: int) -> str:\n \"\"\"Expands any aliases present in line if alias does not point to a\n builtin function and if alias is only a single command.\n The command won't be expanded if the cursor's inside/behind it.\n \"\"\"\n word = (line.split(maxsplit=1) or [\"\"])[0]\n if word in XSH.aliases and isinstance(self.get(word), cabc.Sequence): # type: ignore\n word_idx = line.find(word)\n word_edge = word_idx + len(word)\n if cursor_index > word_edge:\n # the cursor isn't inside/behind the word\n expansion = \" \".join(self.get(word))\n line = line[:word_idx] + expansion + line[word_edge:]\n return line\n\n #\n # Mutable mapping interface\n #\n\n def __getitem__(self, key):\n return self._raw[key]\n\n def __setitem__(self, key, val):\n if isinstance(val, str):\n f = \"<exec-alias:\" + key + \">\"\n if EXEC_ALIAS_RE.search(val) is not None:\n # We have a sub-command (e.g. $(cmd)) or IO redirect (e.g. >>)\n self._raw[key] = ExecAlias(val, filename=f)\n elif isexpression(val):\n # expansion substitution\n lexer = XSH.execer.parser.lexer\n self._raw[key] = list(map(strip_simple_quotes, lexer.split(val)))\n else:\n # need to exec alias\n self._raw[key] = ExecAlias(val, filename=f)\n else:\n self._raw[key] = val\n\n def _common_or(self, other):\n new_dict = self._raw.copy()\n for key in dict(other):\n new_dict[key] = other[key]\n return Aliases(new_dict)\n\n def __or__(self, other):\n return self._common_or(other)\n\n def __ror__(self, other):\n return self._common_or(other)\n\n def __ior__(self, other):\n for key in dict(other):\n self[key] = other[key]\n return self\n\n def __delitem__(self, key):\n del self._raw[key]\n\n def update(self, *args, **kwargs):\n for key, val in dict(*args, **kwargs).items():\n self[key] = val\n\n def __iter__(self):\n yield from self._raw\n\n def __len__(self):\n return len(self._raw)\n\n def __str__(self):\n return str(self._raw)\n\n def __repr__(self):\n return \"{}.{}({})\".format(\n self.__class__.__module__, self.__class__.__name__, self._raw\n )\n\n _repr_pretty_ = to_repr_pretty_\n\n\nclass ExecAlias:\n \"\"\"Provides a callable alias for xonsh source code.\"\"\"\n\n def __init__(self, src, filename=\"<exec-alias>\"):\n \"\"\"\n Parameters\n ----------\n src : str\n Source code that will be\n \"\"\"\n self.src = src\n self.filename = filename\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n execer = XSH.execer\n frame = stack[0][0] # execute as though we are at the call site\n\n alias_args = {\"args\": args}\n for i, a in enumerate(args):\n alias_args[f\"arg{i}\"] = a\n\n with XSH.env.swap(alias_args):\n execer.exec(\n self.src,\n glbs=frame.f_globals,\n locs=frame.f_locals,\n filename=self.filename,\n )\n if XSH.history is not None:\n return XSH.history.last_cmd_rtn\n\n def __repr__(self):\n return f\"ExecAlias({self.src!r}, filename={self.filename!r})\"\n\n\nclass PartialEvalAliasBase:\n \"\"\"Partially evaluated alias.\"\"\"\n\n def __init__(self, f, acc_args=()):\n \"\"\"\n Parameters\n ----------\n f : callable\n A function to dispatch to.\n acc_args : sequence of strings, optional\n Additional arguments to prepent to the argument list passed in\n when the alias is called.\n \"\"\"\n self.f = f\n self.acc_args = acc_args\n self.__name__ = getattr(f, \"__name__\", self.__class__.__name__)\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n def __repr__(self):\n return \"{name}({f!r}, acc_args={acc_args!r})\".format(\n name=self.__class__.__name__, f=self.f, acc_args=self.acc_args\n )\n\n\nclass PartialEvalAlias0(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n if args:\n msg = \"callable alias {f!r} takes no arguments, but {args!f} provided. \"\n msg += \"Of these {acc_args!r} were partially applied.\"\n raise XonshError(msg.format(f=self.f, args=args, acc_args=self.acc_args))\n return self.f()\n\n\nclass PartialEvalAlias1(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args)\n\n\nclass PartialEvalAlias2(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin)\n\n\nclass PartialEvalAlias3(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout)\n\n\nclass PartialEvalAlias4(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr)\n\n\nclass PartialEvalAlias5(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec)\n\n\nclass PartialEvalAlias6(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n\nPARTIAL_EVAL_ALIASES = (\n PartialEvalAlias0,\n PartialEvalAlias1,\n PartialEvalAlias2,\n PartialEvalAlias3,\n PartialEvalAlias4,\n PartialEvalAlias5,\n PartialEvalAlias6,\n)\n\n\ndef partial_eval_alias(f, acc_args=()):\n \"\"\"Dispatches the appropriate eval alias based on the number of args to the original callable alias\n and how many arguments to apply.\n \"\"\"\n # no partial needed if no extra args\n if not acc_args:\n return f\n # need to dispatch\n numargs = 0\n for name, param in inspect.signature(f).parameters.items():\n if (\n param.kind == param.POSITIONAL_ONLY\n or param.kind == param.POSITIONAL_OR_KEYWORD\n ):\n numargs += 1\n elif name in ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY:\n numargs += 1\n if numargs < 7:\n return PARTIAL_EVAL_ALIASES[numargs](f, acc_args=acc_args)\n else:\n e = \"Expected proxy with 6 or fewer arguments for {}, not {}\"\n raise XonshError(e.format(\", \".join(ALIAS_KWARG_NAMES), numargs))\n\n\n#\n# Actual aliases below\n#\n\n\ndef xonsh_exit(args, stdin=None):\n \"\"\"Sends signal to exit shell.\"\"\"\n if not clean_jobs():\n # Do not exit if jobs not cleaned up\n return None, None\n XSH.exit = True\n print() # gimme a newline\n return None, None\n\n\ndef xonsh_reset(args, stdin=None):\n \"\"\"Clears __xonsh__.ctx\"\"\"\n XSH.ctx.clear()\n\n\ndef source_foreign_fn(\n shell: str,\n files_or_code: Annotated[tp.List[str], Arg(nargs=\"+\")],\n interactive=True,\n login=False,\n envcmd=None,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n prevcmd=\"\",\n postcmd=\"\",\n funcscmd=\"\",\n sourcer=None,\n use_tmpfile=False,\n seterrprevcmd=None,\n seterrpostcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"Sources a file written in a foreign shell language.\n\n Parameters\n ----------\n shell\n Name or path to the foreign shell\n files_or_code\n file paths to source or code in the target language.\n interactive : -n, --non-interactive\n whether the sourced shell should be interactive\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -u, --unsafe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n prevcmd : -p, --prevcmd\n command(s) to run before any other commands, replaces traditional source.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n sourcer : --sourcer\n the source command in the target shell language.\n If this is not set, a default value will attempt to be\n looked up based on the shell name.\n use_tmpfile : --use-tmpfile\n whether the commands for source shell should be written to a temporary file.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n seterrpostcmd : --seterrpostcmd\n command(s) to set exit-on-error after all other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n extra_args = tuple(extra_args.split())\n env = XSH.env\n suppress_skip_message = (\n env.get(\"FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE\")\n if not suppress_skip_message\n else suppress_skip_message\n )\n files: tp.Tuple[str, ...] = ()\n if prevcmd:\n pass # don't change prevcmd if given explicitly\n elif os.path.isfile(files_or_code[0]):\n if not sourcer:\n return (None, \"xonsh: error: `sourcer` command is not mentioned.\\n\", 1)\n # we have filenames to source\n prevcmd = \"\".join([f\"{sourcer} {f}\\n\" for f in files_or_code])\n files = tuple(files_or_code)\n elif not prevcmd:\n prevcmd = \" \".join(files_or_code) # code to run, no files\n foreign_shell_data.cache_clear() # make sure that we don't get prev src\n fsenv, fsaliases = foreign_shell_data(\n shell=shell,\n login=login,\n interactive=interactive,\n envcmd=envcmd,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n prevcmd=prevcmd,\n postcmd=postcmd,\n funcscmd=funcscmd or None, # the default is None in the called function\n sourcer=sourcer,\n use_tmpfile=use_tmpfile,\n seterrprevcmd=seterrprevcmd,\n seterrpostcmd=seterrpostcmd,\n show=show,\n dryrun=dryrun,\n files=files,\n )\n if fsenv is None:\n if dryrun:\n return\n else:\n msg = f\"xonsh: error: Source failed: {prevcmd!r}\\n\"\n msg += \"xonsh: error: Possible reasons: File not found or syntax error\\n\"\n return (None, msg, 1)\n # apply results\n denv = env.detype()\n for k, v in fsenv.items():\n if k == \"SHLVL\": # ignore $SHLVL as sourcing should not change $SHLVL\n continue\n if k in denv and v == denv[k]:\n continue # no change from original\n env[k] = v\n # Remove any env-vars that were unset by the script.\n for k in denv:\n if k not in fsenv:\n env.pop(k, None)\n # Update aliases\n baliases = XSH.aliases\n for k, v in fsaliases.items():\n if k in baliases and v == baliases[k]:\n continue # no change from original\n elif overwrite_aliases or k not in baliases:\n baliases[k] = v\n elif suppress_skip_message:\n pass\n else:\n msg = (\n \"Skipping application of {0!r} alias from {1!r} \"\n \"since it shares a name with an existing xonsh alias. \"\n 'Use \"--overwrite-alias\" option to apply it anyway.'\n 'You may prevent this message with \"--suppress-skip-message\" or '\n '\"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True\".'\n )\n print(msg.format(k, shell), file=_stderr)\n\n\nsource_foreign = ArgParserAlias(\n func=source_foreign_fn, has_args=True, prog=\"source-foreign\"\n)\n\n\n@unthreadable\ndef source_alias(args, stdin=None):\n \"\"\"Executes the contents of the provided files in the current context.\n If sourced file isn't found in cwd, search for file along $PATH to source\n instead.\n \"\"\"\n env = XSH.env\n encoding = env.get(\"XONSH_ENCODING\")\n errors = env.get(\"XONSH_ENCODING_ERRORS\")\n for i, fname in enumerate(args):\n fpath = fname\n if not os.path.isfile(fpath):\n fpath = locate_binary(fname)\n if fpath is None:\n if env.get(\"XONSH_DEBUG\"):\n print(f\"source: {fname}: No such file\", file=sys.stderr)\n if i == 0:\n raise RuntimeError(\n \"must source at least one file, \" + fname + \" does not exist.\"\n )\n break\n _, fext = os.path.splitext(fpath)\n if fext and fext != \".xsh\" and fext != \".py\":\n raise RuntimeError(\n \"attempting to source non-xonsh file! If you are \"\n \"trying to source a file in another language, \"\n \"then please use the appropriate source command. \"\n \"For example, source-bash script.sh\"\n )\n with open(fpath, encoding=encoding, errors=errors) as fp:\n src = fp.read()\n if not src.endswith(\"\\n\"):\n src += \"\\n\"\n ctx = XSH.ctx\n updates = {\"__file__\": fpath, \"__name__\": os.path.abspath(fpath)}\n with env.swap(**make_args_env(args[i + 1 :])), swap_values(ctx, updates):\n try:\n XSH.builtins.execx(src, \"exec\", ctx, filename=fpath)\n except Exception:\n print_color(\n \"{RED}You may be attempting to source non-xonsh file! \"\n \"{RESET}If you are trying to source a file in \"\n \"another language, then please use the appropriate \"\n \"source command. For example, {GREEN}source-bash \"\n \"script.sh{RESET}\",\n file=sys.stderr,\n )\n raise\n\n\ndef source_cmd_fn(\n files: Annotated[tp.List[str], Arg(nargs=\"+\")],\n login=False,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n postcmd=\"\",\n funcscmd=\"\",\n seterrprevcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"\n Source cmd.exe files\n\n Parameters\n ----------\n files\n paths to source files.\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -s, --safe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n args = list(files)\n fpath = locate_binary(args[0])\n args[0] = fpath if fpath else args[0]\n if not os.path.isfile(args[0]):\n return (None, f\"xonsh: error: File not found: {args[0]}\\n\", 1)\n prevcmd = \"call \"\n prevcmd += \" \".join([argvquote(arg, force=True) for arg in args])\n prevcmd = escape_windows_cmd_string(prevcmd)\n with XSH.env.swap(PROMPT=\"$P$G\"):\n return source_foreign_fn(\n shell=\"cmd\",\n files_or_code=args,\n interactive=True,\n sourcer=\"call\",\n envcmd=\"set\",\n seterrpostcmd=\"if errorlevel 1 exit 1\",\n use_tmpfile=True,\n prevcmd=prevcmd,\n # from this function\n login=login,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n postcmd=postcmd,\n funcscmd=funcscmd,\n seterrprevcmd=seterrprevcmd,\n overwrite_aliases=overwrite_aliases,\n suppress_skip_message=suppress_skip_message,\n show=show,\n dryrun=dryrun,\n )\n\n\nsource_cmd = ArgParserAlias(func=source_cmd_fn, has_args=True, prog=\"source-cmd\")\n\n\ndef xexec_fn(\n command: Annotated[tp.List[str], Arg(nargs=argparse.REMAINDER)],\n login=False,\n clean=False,\n name=\"\",\n _stdin=None,\n):\n \"\"\"exec (also aliased as xexec) uses the os.execvpe() function to\n replace the xonsh process with the specified program.\n\n This provides the functionality of the bash 'exec' builtin::\n\n >>> exec bash -l -i\n bash $\n\n Parameters\n ----------\n command\n program to launch along its arguments\n login : -l, --login\n the shell places a dash at the\n beginning of the zeroth argument passed to command to simulate login\n shell.\n clean : -c, --clean\n causes command to be executed with an empty environment.\n name : -a, --name\n the shell passes name as the zeroth argument\n to the executed command.\n\n Notes\n -----\n This command **is not** the same as the Python builtin function\n exec(). That function is for running Python code. This command,\n which shares the same name as the sh-lang statement, is for launching\n a command directly in the same process. In the event of a name conflict,\n please use the xexec command directly or dive into subprocess mode\n explicitly with ![exec command]. For more details, please see\n http://xon.sh/faq.html#exec.\n \"\"\"\n if len(command) == 0:\n return (None, \"xonsh: exec: no command specified\\n\", 1)\n\n cmd = command[0]\n if name:\n command[0] = name\n if login:\n command[0] = f\"-{command[0]}\"\n\n denv = {}\n if not clean:\n denv = XSH.env.detype()\n\n # decrement $SHLVL to mirror bash's behaviour\n if \"SHLVL\" in denv:\n old_shlvl = to_shlvl(denv[\"SHLVL\"])\n denv[\"SHLVL\"] = str(adjust_shlvl(old_shlvl, -1))\n\n try:\n os.execvpe(cmd, command, denv)\n except FileNotFoundError as e:\n return (\n None,\n \"xonsh: exec: file not found: {}: {}\" \"\\n\".format(e.args[1], command[0]),\n 1,\n )\n\n\nxexec = ArgParserAlias(func=xexec_fn, has_args=True, prog=\"xexec\")\n\n\n@lazyobject\ndef xonfig():\n \"\"\"Runs the xonsh configuration utility.\"\"\"\n from xonsh.xonfig import xonfig_main # lazy import\n\n return xonfig_main\n\n\n@unthreadable\ndef trace(args, stdin=None, stdout=None, stderr=None, spec=None):\n \"\"\"Runs the xonsh tracer utility.\"\"\"\n from xonsh.tracer import tracermain # lazy import\n\n try:\n return tracermain(args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec)\n except SystemExit:\n pass\n\n\ndef showcmd(args, stdin=None):\n \"\"\"usage: showcmd [-h|--help|cmd args]\n\n Displays the command and arguments as a list of strings that xonsh would\n run in subprocess mode. This is useful for determining how xonsh evaluates\n your commands and arguments prior to running these commands.\n\n optional arguments:\n -h, --help show this help message and exit\n\n Examples\n --------\n >>> showcmd echo $USER \"can't\" hear \"the sea\"\n ['echo', 'I', \"can't\", 'hear', 'the sea']\n \"\"\"\n if len(args) == 0 or (len(args) == 1 and args[0] in {\"-h\", \"--help\"}):\n print(showcmd.__doc__.rstrip().replace(\"\\n \", \"\\n\"))\n else:\n sys.displayhook(args)\n\n\ndef detect_xpip_alias():\n \"\"\"\n Determines the correct invocation to get xonsh's pip\n \"\"\"\n if not getattr(sys, \"executable\", None):\n return lambda args, stdin=None: (\n \"\",\n \"Sorry, unable to run pip on your system (missing sys.executable)\",\n 1,\n )\n\n basecmd = [sys.executable, \"-m\", \"pip\"]\n try:\n if ON_WINDOWS or IN_APPIMAGE:\n # XXX: Does windows have an installation mode that requires UAC?\n return basecmd\n elif not os.access(os.path.dirname(sys.executable), os.W_OK):\n return [\"sudo\"] + basecmd\n else:\n return basecmd\n except Exception:\n # Something freaky happened, return something that'll probably work\n return basecmd\n\n\ndef make_default_aliases():\n \"\"\"Creates a new default aliases dictionary.\"\"\"\n default_aliases = {\n \"cd\": cd,\n \"pushd\": pushd,\n \"popd\": popd,\n \"dirs\": dirs,\n \"jobs\": jobs,\n \"fg\": fg,\n \"bg\": bg,\n \"disown\": disown,\n \"EOF\": xonsh_exit,\n \"exit\": xonsh_exit,\n \"quit\": xonsh_exit,\n \"exec\": xexec,\n \"xexec\": xexec,\n \"source\": source_alias,\n \"source-zsh\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"zsh\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-zsh\",\n ),\n \"source-bash\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"bash\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-bash\",\n ),\n \"source-cmd\": source_cmd,\n \"source-foreign\": source_foreign,\n \"history\": xhm.history_main,\n \"trace\": trace,\n \"timeit\": timeit_alias,\n \"xonfig\": xonfig,\n \"scp-resume\": [\"rsync\", \"--partial\", \"-h\", \"--progress\", \"--rsh=ssh\"],\n \"showcmd\": showcmd,\n \"ipynb\": [\"jupyter\", \"notebook\", \"--no-browser\"],\n \"which\": xxw.which,\n \"xontrib\": xontribs_main,\n \"completer\": xca.completer_alias,\n \"xpip\": detect_xpip_alias(),\n \"xonsh-reset\": xonsh_reset,\n }\n if ON_WINDOWS:\n # Borrow builtin commands from cmd.exe.\n windows_cmd_aliases = {\n \"cls\",\n \"copy\",\n \"del\",\n \"dir\",\n \"echo\",\n \"erase\",\n \"md\",\n \"mkdir\",\n \"mklink\",\n \"move\",\n \"rd\",\n \"ren\",\n \"rename\",\n \"rmdir\",\n \"time\",\n \"type\",\n \"vol\",\n }\n for alias in windows_cmd_aliases:\n default_aliases[alias] = [\"cmd\", \"/c\", alias]\n default_aliases[\"call\"] = [\"source-cmd\"]\n default_aliases[\"source-bat\"] = [\"source-cmd\"]\n default_aliases[\"clear\"] = \"cls\"\n if ON_ANACONDA:\n # Add aliases specific to the Anaconda python distribution.\n default_aliases[\"activate\"] = [\"source-cmd\", \"activate.bat\"]\n default_aliases[\"deactivate\"] = [\"source-cmd\", \"deactivate.bat\"]\n if not locate_binary(\"sudo\"):\n import xonsh.winutils as winutils\n\n def sudo(args):\n if len(args) < 1:\n print(\n \"You need to provide an executable to run as \" \"Administrator.\"\n )\n return\n cmd = args[0]\n if locate_binary(cmd):\n return winutils.sudo(cmd, args[1:])\n elif cmd.lower() in windows_cmd_aliases:\n args = [\"/D\", \"/C\", \"CD\", _get_cwd(), \"&&\"] + args\n return winutils.sudo(\"cmd\", args)\n else:\n msg = 'Cannot find the path for executable \"{0}\".'\n print(msg.format(cmd))\n\n default_aliases[\"sudo\"] = sudo\n elif ON_DARWIN:\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_FREEBSD or ON_DRAGONFLY:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_NETBSD:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n elif ON_OPENBSD:\n pass\n else:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"--color=auto\", \"-v\"]\n return default_aliases\n", "path": "xonsh/aliases.py"}], "after_files": [{"content": "\"\"\"Aliases for the xonsh shell.\"\"\"\nimport argparse\nimport collections.abc as cabc\nimport functools\nimport inspect\nimport os\nimport re\nimport sys\nimport types\nimport typing as tp\n\nimport xonsh.completers._aliases as xca\nimport xonsh.history.main as xhm\nimport xonsh.xoreutils.which as xxw\nfrom xonsh.ast import isexpression\nfrom xonsh.built_ins import XSH\nfrom xonsh.cli_utils import Annotated, Arg, ArgParserAlias\nfrom xonsh.dirstack import _get_cwd, cd, dirs, popd, pushd\nfrom xonsh.environ import locate_binary, make_args_env\nfrom xonsh.foreign_shells import foreign_shell_data\nfrom xonsh.jobs import bg, clean_jobs, disown, fg, jobs\nfrom xonsh.lazyasd import lazyobject\nfrom xonsh.platform import (\n IN_APPIMAGE,\n ON_ANACONDA,\n ON_DARWIN,\n ON_DRAGONFLY,\n ON_FREEBSD,\n ON_NETBSD,\n ON_OPENBSD,\n ON_WINDOWS,\n)\nfrom xonsh.timings import timeit_alias\nfrom xonsh.tools import (\n ALIAS_KWARG_NAMES,\n XonshError,\n adjust_shlvl,\n argvquote,\n escape_windows_cmd_string,\n print_color,\n strip_simple_quotes,\n swap_values,\n to_repr_pretty_,\n to_shlvl,\n unthreadable,\n)\nfrom xonsh.xontribs import xontribs_main\n\n\n@lazyobject\ndef EXEC_ALIAS_RE():\n return re.compile(r\"@\\(|\\$\\(|!\\(|\\$\\[|!\\[|\\&\\&|\\|\\||\\s+and\\s+|\\s+or\\s+|[>|<]\")\n\n\nclass Aliases(cabc.MutableMapping):\n \"\"\"Represents a location to hold and look up aliases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._raw = {}\n self.update(*args, **kwargs)\n\n @staticmethod\n def _get_func_name(func):\n name = func.__name__\n\n # Strip leading underscore\n if name.startswith(\"_\"):\n name = name[1:]\n return name\n\n def _register(self, func, name=\"\", dash_case=True):\n name = name or self._get_func_name(func)\n\n if dash_case:\n name = name.replace(\"_\", \"-\")\n\n self[name] = func\n return func\n\n @tp.overload\n def register(self, func: types.FunctionType) -> types.FunctionType:\n \"\"\"simple usage\"\"\"\n\n @tp.overload\n def register(\n self, name: str, *, dash_case: bool = True\n ) -> tp.Callable[[types.FunctionType], types.FunctionType]:\n ...\n\n def register(self, func_or_name, name=None, dash_case=True):\n \"\"\"Decorator to register the given function by name.\"\"\"\n\n if isinstance(func_or_name, types.FunctionType):\n return self._register(func_or_name, name, dash_case)\n\n def wrapper(func):\n return self._register(func, func_or_name, dash_case)\n\n return wrapper\n\n def get(self, key, default=None):\n \"\"\"Returns the (possibly modified) value. If the key is not present,\n then `default` is returned.\n If the value is callable, it is returned without modification. If it\n is an iterable of strings it will be evaluated recursively to expand\n other aliases, resulting in a new list or a \"partially applied\"\n callable.\n \"\"\"\n val = self._raw.get(key)\n if val is None:\n return default\n elif isinstance(val, cabc.Iterable) or callable(val):\n return self.eval_alias(val, seen_tokens={key})\n else:\n msg = \"alias of {!r} has an inappropriate type: {!r}\"\n raise TypeError(msg.format(key, val))\n\n def eval_alias(self, value, seen_tokens=frozenset(), acc_args=()):\n \"\"\"\n \"Evaluates\" the alias ``value``, by recursively looking up the leftmost\n token and \"expanding\" if it's also an alias.\n\n A value like ``[\"cmd\", \"arg\"]`` might transform like this:\n ``> [\"cmd\", \"arg\"] -> [\"ls\", \"-al\", \"arg\"] -> callable()``\n where ``cmd=ls -al`` and ``ls`` is an alias with its value being a\n callable. The resulting callable will be \"partially applied\" with\n ``[\"-al\", \"arg\"]``.\n \"\"\"\n # Beware of mutability: default values for keyword args are evaluated\n # only once.\n if callable(value):\n return partial_eval_alias(value, acc_args=acc_args)\n else:\n expand_path = XSH.expand_path\n token, *rest = map(expand_path, value)\n if token in seen_tokens or token not in self._raw:\n # ^ Making sure things like `egrep=egrep --color=auto` works,\n # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF`\n # and `ls=ls --color=auto`\n rtn = [token]\n rtn.extend(rest)\n rtn.extend(acc_args)\n return rtn\n else:\n seen_tokens = seen_tokens | {token}\n acc_args = rest + list(acc_args)\n return self.eval_alias(self._raw[token], seen_tokens, acc_args)\n\n def expand_alias(self, line: str, cursor_index: int) -> str:\n \"\"\"Expands any aliases present in line if alias does not point to a\n builtin function and if alias is only a single command.\n The command won't be expanded if the cursor's inside/behind it.\n \"\"\"\n word = (line.split(maxsplit=1) or [\"\"])[0]\n if word in XSH.aliases and isinstance(self.get(word), cabc.Sequence): # type: ignore\n word_idx = line.find(word)\n word_edge = word_idx + len(word)\n if cursor_index > word_edge:\n # the cursor isn't inside/behind the word\n expansion = \" \".join(self.get(word))\n line = line[:word_idx] + expansion + line[word_edge:]\n return line\n\n #\n # Mutable mapping interface\n #\n\n def __getitem__(self, key):\n return self._raw[key]\n\n def __setitem__(self, key, val):\n if isinstance(val, str):\n f = \"<exec-alias:\" + key + \">\"\n if EXEC_ALIAS_RE.search(val) is not None:\n # We have a sub-command (e.g. $(cmd)) or IO redirect (e.g. >>)\n self._raw[key] = ExecAlias(val, filename=f)\n elif isexpression(val):\n # expansion substitution\n lexer = XSH.execer.parser.lexer\n self._raw[key] = list(map(strip_simple_quotes, lexer.split(val)))\n else:\n # need to exec alias\n self._raw[key] = ExecAlias(val, filename=f)\n else:\n self._raw[key] = val\n\n def _common_or(self, other):\n new_dict = self._raw.copy()\n for key in dict(other):\n new_dict[key] = other[key]\n return Aliases(new_dict)\n\n def __or__(self, other):\n return self._common_or(other)\n\n def __ror__(self, other):\n return self._common_or(other)\n\n def __ior__(self, other):\n for key in dict(other):\n self[key] = other[key]\n return self\n\n def __delitem__(self, key):\n del self._raw[key]\n\n def update(self, *args, **kwargs):\n for key, val in dict(*args, **kwargs).items():\n self[key] = val\n\n def __iter__(self):\n yield from self._raw\n\n def __len__(self):\n return len(self._raw)\n\n def __str__(self):\n return str(self._raw)\n\n def __repr__(self):\n return \"{}.{}({})\".format(\n self.__class__.__module__, self.__class__.__name__, self._raw\n )\n\n _repr_pretty_ = to_repr_pretty_\n\n\nclass ExecAlias:\n \"\"\"Provides a callable alias for xonsh source code.\"\"\"\n\n def __init__(self, src, filename=\"<exec-alias>\"):\n \"\"\"\n Parameters\n ----------\n src : str\n Source code that will be\n \"\"\"\n self.src = src\n self.filename = filename\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n execer = XSH.execer\n frame = stack[0][0] # execute as though we are at the call site\n\n alias_args = {\"args\": args}\n for i, a in enumerate(args):\n alias_args[f\"arg{i}\"] = a\n\n with XSH.env.swap(alias_args):\n execer.exec(\n self.src,\n glbs=frame.f_globals,\n locs=frame.f_locals,\n filename=self.filename,\n )\n if XSH.history is not None:\n return XSH.history.last_cmd_rtn\n\n def __repr__(self):\n return f\"ExecAlias({self.src!r}, filename={self.filename!r})\"\n\n\nclass PartialEvalAliasBase:\n \"\"\"Partially evaluated alias.\"\"\"\n\n def __init__(self, f, acc_args=()):\n \"\"\"\n Parameters\n ----------\n f : callable\n A function to dispatch to.\n acc_args : sequence of strings, optional\n Additional arguments to prepent to the argument list passed in\n when the alias is called.\n \"\"\"\n self.f = f\n self.acc_args = acc_args\n self.__name__ = getattr(f, \"__name__\", self.__class__.__name__)\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n def __repr__(self):\n return \"{name}({f!r}, acc_args={acc_args!r})\".format(\n name=self.__class__.__name__, f=self.f, acc_args=self.acc_args\n )\n\n\nclass PartialEvalAlias0(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n if args:\n msg = \"callable alias {f!r} takes no arguments, but {args!f} provided. \"\n msg += \"Of these {acc_args!r} were partially applied.\"\n raise XonshError(msg.format(f=self.f, args=args, acc_args=self.acc_args))\n return self.f()\n\n\nclass PartialEvalAlias1(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args)\n\n\nclass PartialEvalAlias2(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin)\n\n\nclass PartialEvalAlias3(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout)\n\n\nclass PartialEvalAlias4(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr)\n\n\nclass PartialEvalAlias5(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec)\n\n\nclass PartialEvalAlias6(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n\nPARTIAL_EVAL_ALIASES = (\n PartialEvalAlias0,\n PartialEvalAlias1,\n PartialEvalAlias2,\n PartialEvalAlias3,\n PartialEvalAlias4,\n PartialEvalAlias5,\n PartialEvalAlias6,\n)\n\n\ndef partial_eval_alias(f, acc_args=()):\n \"\"\"Dispatches the appropriate eval alias based on the number of args to the original callable alias\n and how many arguments to apply.\n \"\"\"\n # no partial needed if no extra args\n if not acc_args:\n return f\n # need to dispatch\n numargs = 0\n for name, param in inspect.signature(f).parameters.items():\n if (\n param.kind == param.POSITIONAL_ONLY\n or param.kind == param.POSITIONAL_OR_KEYWORD\n ):\n numargs += 1\n elif name in ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY:\n numargs += 1\n if numargs < 7:\n return PARTIAL_EVAL_ALIASES[numargs](f, acc_args=acc_args)\n else:\n e = \"Expected proxy with 6 or fewer arguments for {}, not {}\"\n raise XonshError(e.format(\", \".join(ALIAS_KWARG_NAMES), numargs))\n\n\n#\n# Actual aliases below\n#\n\n\ndef xonsh_exit(args, stdin=None):\n \"\"\"Sends signal to exit shell.\"\"\"\n if not clean_jobs():\n # Do not exit if jobs not cleaned up\n return None, None\n XSH.exit = True\n print() # gimme a newline\n return None, None\n\n\ndef xonsh_reset(args, stdin=None):\n \"\"\"Clears __xonsh__.ctx\"\"\"\n XSH.ctx.clear()\n\n\ndef source_foreign_fn(\n shell: str,\n files_or_code: Annotated[tp.List[str], Arg(nargs=\"+\")],\n interactive=True,\n login=False,\n envcmd=None,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n prevcmd=\"\",\n postcmd=\"\",\n funcscmd=\"\",\n sourcer=None,\n use_tmpfile=False,\n seterrprevcmd=None,\n seterrpostcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"Sources a file written in a foreign shell language.\n\n Parameters\n ----------\n shell\n Name or path to the foreign shell\n files_or_code\n file paths to source or code in the target language.\n interactive : -n, --non-interactive\n whether the sourced shell should be interactive\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -u, --unsafe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n prevcmd : -p, --prevcmd\n command(s) to run before any other commands, replaces traditional source.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n sourcer : --sourcer\n the source command in the target shell language.\n If this is not set, a default value will attempt to be\n looked up based on the shell name.\n use_tmpfile : --use-tmpfile\n whether the commands for source shell should be written to a temporary file.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n seterrpostcmd : --seterrpostcmd\n command(s) to set exit-on-error after all other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n extra_args = tuple(extra_args.split())\n env = XSH.env\n suppress_skip_message = (\n env.get(\"FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE\")\n if not suppress_skip_message\n else suppress_skip_message\n )\n files: tp.Tuple[str, ...] = ()\n if prevcmd:\n pass # don't change prevcmd if given explicitly\n elif os.path.isfile(files_or_code[0]):\n if not sourcer:\n return (None, \"xonsh: error: `sourcer` command is not mentioned.\\n\", 1)\n # we have filenames to source\n prevcmd = \"\".join([f\"{sourcer} {f}\\n\" for f in files_or_code])\n files = tuple(files_or_code)\n elif not prevcmd:\n prevcmd = \" \".join(files_or_code) # code to run, no files\n foreign_shell_data.cache_clear() # make sure that we don't get prev src\n fsenv, fsaliases = foreign_shell_data(\n shell=shell,\n login=login,\n interactive=interactive,\n envcmd=envcmd,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n prevcmd=prevcmd,\n postcmd=postcmd,\n funcscmd=funcscmd or None, # the default is None in the called function\n sourcer=sourcer,\n use_tmpfile=use_tmpfile,\n seterrprevcmd=seterrprevcmd,\n seterrpostcmd=seterrpostcmd,\n show=show,\n dryrun=dryrun,\n files=files,\n )\n if fsenv is None:\n if dryrun:\n return\n else:\n msg = f\"xonsh: error: Source failed: {prevcmd!r}\\n\"\n msg += \"xonsh: error: Possible reasons: File not found or syntax error\\n\"\n return (None, msg, 1)\n # apply results\n denv = env.detype()\n for k, v in fsenv.items():\n if k == \"SHLVL\": # ignore $SHLVL as sourcing should not change $SHLVL\n continue\n if k in denv and v == denv[k]:\n continue # no change from original\n env[k] = v\n # Remove any env-vars that were unset by the script.\n for k in denv:\n if k not in fsenv:\n env.pop(k, None)\n # Update aliases\n baliases = XSH.aliases\n for k, v in fsaliases.items():\n if k in baliases and v == baliases[k]:\n continue # no change from original\n elif overwrite_aliases or k not in baliases:\n baliases[k] = v\n elif suppress_skip_message:\n pass\n else:\n msg = (\n \"Skipping application of {0!r} alias from {1!r} \"\n \"since it shares a name with an existing xonsh alias. \"\n 'Use \"--overwrite-alias\" option to apply it anyway.'\n 'You may prevent this message with \"--suppress-skip-message\" or '\n '\"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True\".'\n )\n print(msg.format(k, shell), file=_stderr)\n\n\nsource_foreign = ArgParserAlias(\n func=source_foreign_fn, has_args=True, prog=\"source-foreign\"\n)\n\n\n@unthreadable\ndef source_alias(args, stdin=None):\n \"\"\"Executes the contents of the provided files in the current context.\n If sourced file isn't found in cwd, search for file along $PATH to source\n instead.\n \"\"\"\n env = XSH.env\n encoding = env.get(\"XONSH_ENCODING\")\n errors = env.get(\"XONSH_ENCODING_ERRORS\")\n for i, fname in enumerate(args):\n fpath = fname\n if not os.path.isfile(fpath):\n fpath = locate_binary(fname)\n if fpath is None:\n if env.get(\"XONSH_DEBUG\"):\n print(f\"source: {fname}: No such file\", file=sys.stderr)\n if i == 0:\n raise RuntimeError(\n \"must source at least one file, \" + fname + \" does not exist.\"\n )\n break\n _, fext = os.path.splitext(fpath)\n if fext and fext != \".xsh\" and fext != \".py\":\n raise RuntimeError(\n \"attempting to source non-xonsh file! If you are \"\n \"trying to source a file in another language, \"\n \"then please use the appropriate source command. \"\n \"For example, source-bash script.sh\"\n )\n with open(fpath, encoding=encoding, errors=errors) as fp:\n src = fp.read()\n if not src.endswith(\"\\n\"):\n src += \"\\n\"\n ctx = XSH.ctx\n updates = {\"__file__\": fpath, \"__name__\": os.path.abspath(fpath)}\n with env.swap(**make_args_env(args[i + 1 :])), swap_values(ctx, updates):\n try:\n XSH.builtins.execx(src, \"exec\", ctx, filename=fpath)\n except Exception:\n print_color(\n \"{RED}You may be attempting to source non-xonsh file! \"\n \"{RESET}If you are trying to source a file in \"\n \"another language, then please use the appropriate \"\n \"source command. For example, {GREEN}source-bash \"\n \"script.sh{RESET}\",\n file=sys.stderr,\n )\n raise\n\n\ndef source_cmd_fn(\n files: Annotated[tp.List[str], Arg(nargs=\"+\")],\n login=False,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n postcmd=\"\",\n funcscmd=\"\",\n seterrprevcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"\n Source cmd.exe files\n\n Parameters\n ----------\n files\n paths to source files.\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -s, --safe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n args = list(files)\n fpath = locate_binary(args[0])\n args[0] = fpath if fpath else args[0]\n if not os.path.isfile(args[0]):\n return (None, f\"xonsh: error: File not found: {args[0]}\\n\", 1)\n prevcmd = \"call \"\n prevcmd += \" \".join([argvquote(arg, force=True) for arg in args])\n prevcmd = escape_windows_cmd_string(prevcmd)\n with XSH.env.swap(PROMPT=\"$P$G\"):\n return source_foreign_fn(\n shell=\"cmd\",\n files_or_code=args,\n interactive=True,\n sourcer=\"call\",\n envcmd=\"set\",\n seterrpostcmd=\"if errorlevel 1 exit 1\",\n use_tmpfile=True,\n prevcmd=prevcmd,\n # from this function\n login=login,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n postcmd=postcmd,\n funcscmd=funcscmd,\n seterrprevcmd=seterrprevcmd,\n overwrite_aliases=overwrite_aliases,\n suppress_skip_message=suppress_skip_message,\n show=show,\n dryrun=dryrun,\n )\n\n\nsource_cmd = ArgParserAlias(func=source_cmd_fn, has_args=True, prog=\"source-cmd\")\n\n\ndef xexec_fn(\n command: Annotated[tp.List[str], Arg(nargs=argparse.REMAINDER)],\n login=False,\n clean=False,\n name=\"\",\n _stdin=None,\n):\n \"\"\"exec (also aliased as xexec) uses the os.execvpe() function to\n replace the xonsh process with the specified program.\n\n This provides the functionality of the bash 'exec' builtin::\n\n >>> exec bash -l -i\n bash $\n\n Parameters\n ----------\n command\n program to launch along its arguments\n login : -l, --login\n the shell places a dash at the\n beginning of the zeroth argument passed to command to simulate login\n shell.\n clean : -c, --clean\n causes command to be executed with an empty environment.\n name : -a, --name\n the shell passes name as the zeroth argument\n to the executed command.\n\n Notes\n -----\n This command **is not** the same as the Python builtin function\n exec(). That function is for running Python code. This command,\n which shares the same name as the sh-lang statement, is for launching\n a command directly in the same process. In the event of a name conflict,\n please use the xexec command directly or dive into subprocess mode\n explicitly with ![exec command]. For more details, please see\n http://xon.sh/faq.html#exec.\n \"\"\"\n if len(command) == 0:\n return (None, \"xonsh: exec: no command specified\\n\", 1)\n\n cmd = command[0]\n if name:\n command[0] = name\n if login:\n command[0] = f\"-{command[0]}\"\n\n denv = {}\n if not clean:\n denv = XSH.env.detype()\n\n # decrement $SHLVL to mirror bash's behaviour\n if \"SHLVL\" in denv:\n old_shlvl = to_shlvl(denv[\"SHLVL\"])\n denv[\"SHLVL\"] = str(adjust_shlvl(old_shlvl, -1))\n\n try:\n os.execvpe(cmd, command, denv)\n except FileNotFoundError as e:\n return (\n None,\n \"xonsh: exec: file not found: {}: {}\" \"\\n\".format(e.args[1], command[0]),\n 1,\n )\n\n\nxexec = ArgParserAlias(func=xexec_fn, has_args=True, prog=\"xexec\")\n\n\n@lazyobject\ndef xonfig():\n \"\"\"Runs the xonsh configuration utility.\"\"\"\n from xonsh.xonfig import xonfig_main # lazy import\n\n return xonfig_main\n\n\n@unthreadable\ndef trace(args, stdin=None, stdout=None, stderr=None, spec=None):\n \"\"\"Runs the xonsh tracer utility.\"\"\"\n from xonsh.tracer import tracermain # lazy import\n\n try:\n return tracermain(args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec)\n except SystemExit:\n pass\n\n\ndef showcmd(args, stdin=None):\n \"\"\"usage: showcmd [-h|--help|cmd args]\n\n Displays the command and arguments as a list of strings that xonsh would\n run in subprocess mode. This is useful for determining how xonsh evaluates\n your commands and arguments prior to running these commands.\n\n optional arguments:\n -h, --help show this help message and exit\n\n Examples\n --------\n >>> showcmd echo $USER \"can't\" hear \"the sea\"\n ['echo', 'I', \"can't\", 'hear', 'the sea']\n \"\"\"\n if len(args) == 0 or (len(args) == 1 and args[0] in {\"-h\", \"--help\"}):\n print(showcmd.__doc__.rstrip().replace(\"\\n \", \"\\n\"))\n else:\n sys.displayhook(args)\n\n\ndef detect_xpip_alias():\n \"\"\"\n Determines the correct invocation to get xonsh's pip\n \"\"\"\n if not getattr(sys, \"executable\", None):\n return lambda args, stdin=None: (\n \"\",\n \"Sorry, unable to run pip on your system (missing sys.executable)\",\n 1,\n )\n\n basecmd = [sys.executable, \"-m\", \"pip\"]\n try:\n if ON_WINDOWS or IN_APPIMAGE:\n # XXX: Does windows have an installation mode that requires UAC?\n return basecmd\n elif not os.access(os.path.dirname(sys.executable), os.W_OK):\n return basecmd.extend([\"--user\"])\n else:\n return basecmd\n except Exception:\n # Something freaky happened, return something that'll probably work\n return basecmd\n\n\ndef make_default_aliases():\n \"\"\"Creates a new default aliases dictionary.\"\"\"\n default_aliases = {\n \"cd\": cd,\n \"pushd\": pushd,\n \"popd\": popd,\n \"dirs\": dirs,\n \"jobs\": jobs,\n \"fg\": fg,\n \"bg\": bg,\n \"disown\": disown,\n \"EOF\": xonsh_exit,\n \"exit\": xonsh_exit,\n \"quit\": xonsh_exit,\n \"exec\": xexec,\n \"xexec\": xexec,\n \"source\": source_alias,\n \"source-zsh\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"zsh\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-zsh\",\n ),\n \"source-bash\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"bash\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-bash\",\n ),\n \"source-cmd\": source_cmd,\n \"source-foreign\": source_foreign,\n \"history\": xhm.history_main,\n \"trace\": trace,\n \"timeit\": timeit_alias,\n \"xonfig\": xonfig,\n \"scp-resume\": [\"rsync\", \"--partial\", \"-h\", \"--progress\", \"--rsh=ssh\"],\n \"showcmd\": showcmd,\n \"ipynb\": [\"jupyter\", \"notebook\", \"--no-browser\"],\n \"which\": xxw.which,\n \"xontrib\": xontribs_main,\n \"completer\": xca.completer_alias,\n \"xpip\": detect_xpip_alias(),\n \"xonsh-reset\": xonsh_reset,\n }\n if ON_WINDOWS:\n # Borrow builtin commands from cmd.exe.\n windows_cmd_aliases = {\n \"cls\",\n \"copy\",\n \"del\",\n \"dir\",\n \"echo\",\n \"erase\",\n \"md\",\n \"mkdir\",\n \"mklink\",\n \"move\",\n \"rd\",\n \"ren\",\n \"rename\",\n \"rmdir\",\n \"time\",\n \"type\",\n \"vol\",\n }\n for alias in windows_cmd_aliases:\n default_aliases[alias] = [\"cmd\", \"/c\", alias]\n default_aliases[\"call\"] = [\"source-cmd\"]\n default_aliases[\"source-bat\"] = [\"source-cmd\"]\n default_aliases[\"clear\"] = \"cls\"\n if ON_ANACONDA:\n # Add aliases specific to the Anaconda python distribution.\n default_aliases[\"activate\"] = [\"source-cmd\", \"activate.bat\"]\n default_aliases[\"deactivate\"] = [\"source-cmd\", \"deactivate.bat\"]\n if not locate_binary(\"sudo\"):\n import xonsh.winutils as winutils\n\n def sudo(args):\n if len(args) < 1:\n print(\n \"You need to provide an executable to run as \" \"Administrator.\"\n )\n return\n cmd = args[0]\n if locate_binary(cmd):\n return winutils.sudo(cmd, args[1:])\n elif cmd.lower() in windows_cmd_aliases:\n args = [\"/D\", \"/C\", \"CD\", _get_cwd(), \"&&\"] + args\n return winutils.sudo(\"cmd\", args)\n else:\n msg = 'Cannot find the path for executable \"{0}\".'\n print(msg.format(cmd))\n\n default_aliases[\"sudo\"] = sudo\n elif ON_DARWIN:\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_FREEBSD or ON_DRAGONFLY:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_NETBSD:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n elif ON_OPENBSD:\n pass\n else:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"--color=auto\", \"-v\"]\n return default_aliases\n", "path": "xonsh/aliases.py"}]} |
gh_patches_debug_1434 | rasdani/github-patches | git_diff | pypi__warehouse-8550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/pypi/{package}/{version}/json: yanking an older release updates latest release's yanked_reason field
**Describe the bug**
Yanking an older version of a package leads to unexpected side-effect for latest version's package info provided via the JSON endpoint. In particular, the `yanked_reason` field gets updated.
**Expected behavior**
When yanking a version of a package, no other verision's `yanked_reason` field should be updated.
**To Reproduce**
1. Create new package on test.pypi.org
2. Release version `0.2.0`.
3. Release version `0.3.0`.
4. Yank version `0.2.0`.
5. Check json endpoint of package version `0.3.0`.
```console
$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked'
false
$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked_reason'
"Testing Yank"
```
**My Platform**
N/A
**Additional context**
* Test package: https://test.pypi.org/project/abn-test-rss-yank/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/legacy/api/json.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from collections import OrderedDict
14
15 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
16 from pyramid.view import view_config
17 from sqlalchemy.orm import Load
18 from sqlalchemy.orm.exc import NoResultFound
19
20 from warehouse.cache.http import cache_control
21 from warehouse.cache.origin import origin_cache
22 from warehouse.packaging.models import File, Project, Release
23
24 # Generate appropriate CORS headers for the JSON endpoint.
25 # We want to allow Cross-Origin requests here so that users can interact
26 # with these endpoints via XHR/Fetch APIs in the browser.
27 _CORS_HEADERS = {
28 "Access-Control-Allow-Origin": "*",
29 "Access-Control-Allow-Headers": ", ".join(
30 [
31 "Content-Type",
32 "If-Match",
33 "If-Modified-Since",
34 "If-None-Match",
35 "If-Unmodified-Since",
36 ]
37 ),
38 "Access-Control-Allow-Methods": "GET",
39 "Access-Control-Max-Age": "86400", # 1 day.
40 "Access-Control-Expose-Headers": ", ".join(["X-PyPI-Last-Serial"]),
41 }
42
43 _CACHE_DECORATOR = [
44 cache_control(15 * 60), # 15 minutes
45 origin_cache(
46 1 * 24 * 60 * 60, # 1 day
47 stale_while_revalidate=5 * 60, # 5 minutes
48 stale_if_error=1 * 24 * 60 * 60, # 1 day
49 ),
50 ]
51
52
53 @view_config(
54 route_name="legacy.api.json.project",
55 context=Project,
56 renderer="json",
57 decorator=_CACHE_DECORATOR,
58 )
59 def json_project(project, request):
60 if project.name != request.matchdict.get("name", project.name):
61 return HTTPMovedPermanently(
62 request.current_route_path(name=project.name), headers=_CORS_HEADERS
63 )
64
65 try:
66 release = (
67 request.db.query(Release)
68 .filter(Release.project == project, Release.yanked.is_(False))
69 .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())
70 .limit(1)
71 .one()
72 )
73 except NoResultFound:
74 return HTTPNotFound(headers=_CORS_HEADERS)
75
76 return json_release(release, request)
77
78
79 @view_config(
80 route_name="legacy.api.json.project_slash",
81 context=Project,
82 decorator=_CACHE_DECORATOR,
83 )
84 def json_project_slash(project, request):
85 return HTTPMovedPermanently(
86 # Respond with redirect to url without trailing slash
87 request.route_path("legacy.api.json.project", name=project.name),
88 headers=_CORS_HEADERS,
89 )
90
91
92 @view_config(
93 route_name="legacy.api.json.release",
94 context=Release,
95 renderer="json",
96 decorator=_CACHE_DECORATOR,
97 )
98 def json_release(release, request):
99 project = release.project
100
101 if project.name != request.matchdict.get("name", project.name):
102 return HTTPMovedPermanently(
103 request.current_route_path(name=project.name), headers=_CORS_HEADERS
104 )
105
106 # Apply CORS headers.
107 request.response.headers.update(_CORS_HEADERS)
108
109 # Get the latest serial number for this project.
110 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
111
112 # Get all of the releases and files for this project.
113 release_files = (
114 request.db.query(Release, File)
115 .options(
116 Load(Release).load_only(
117 "version", "requires_python", "yanked", "yanked_reason"
118 )
119 )
120 .outerjoin(File)
121 .filter(Release.project == project)
122 .order_by(Release._pypi_ordering.desc(), File.filename)
123 .all()
124 )
125
126 # Map our releases + files into a dictionary that maps each release to a
127 # list of all its files.
128 releases = {}
129 for r, file_ in release_files:
130 files = releases.setdefault(r, [])
131 if file_ is not None:
132 files.append(file_)
133
134 # Serialize our database objects to match the way that PyPI legacy
135 # presented this data.
136 releases = {
137 r.version: [
138 {
139 "filename": f.filename,
140 "packagetype": f.packagetype,
141 "python_version": f.python_version,
142 "has_sig": f.has_signature,
143 "comment_text": f.comment_text,
144 "md5_digest": f.md5_digest,
145 "digests": {"md5": f.md5_digest, "sha256": f.sha256_digest},
146 "size": f.size,
147 # TODO: Remove this once we've had a long enough time with it
148 # here to consider it no longer in use.
149 "downloads": -1,
150 "upload_time": f.upload_time.strftime("%Y-%m-%dT%H:%M:%S"),
151 "upload_time_iso_8601": f.upload_time.isoformat() + "Z",
152 "url": request.route_url("packaging.file", path=f.path),
153 "requires_python": r.requires_python if r.requires_python else None,
154 "yanked": r.yanked,
155 "yanked_reason": r.yanked_reason or None,
156 }
157 for f in fs
158 ]
159 for r, fs in releases.items()
160 }
161
162 return {
163 "info": {
164 "name": project.name,
165 "version": release.version,
166 "summary": release.summary,
167 "description_content_type": release.description.content_type,
168 "description": release.description.raw,
169 "keywords": release.keywords,
170 "license": release.license,
171 "classifiers": list(release.classifiers),
172 "author": release.author,
173 "author_email": release.author_email,
174 "maintainer": release.maintainer,
175 "maintainer_email": release.maintainer_email,
176 "requires_python": release.requires_python,
177 "platform": release.platform,
178 "downloads": {"last_day": -1, "last_week": -1, "last_month": -1},
179 "package_url": request.route_url("packaging.project", name=project.name),
180 "project_url": request.route_url("packaging.project", name=project.name),
181 "project_urls": OrderedDict(release.urls) if release.urls else None,
182 "release_url": request.route_url(
183 "packaging.release", name=project.name, version=release.version
184 ),
185 "requires_dist": (
186 list(release.requires_dist) if release.requires_dist else None
187 ),
188 "docs_url": project.documentation_url,
189 "bugtrack_url": None,
190 "home_page": release.home_page,
191 "download_url": release.download_url,
192 "yanked": release.yanked,
193 "yanked_reason": r.yanked_reason or None,
194 },
195 "urls": releases[release.version],
196 "releases": releases,
197 "last_serial": project.last_serial,
198 }
199
200
201 @view_config(
202 route_name="legacy.api.json.release_slash",
203 context=Release,
204 decorator=_CACHE_DECORATOR,
205 )
206 def json_release_slash(release, request):
207 return HTTPMovedPermanently(
208 # Respond with redirect to url without trailing slash
209 request.route_path(
210 "legacy.api.json.release",
211 name=release.project.name,
212 version=release.version,
213 ),
214 headers=_CORS_HEADERS,
215 )
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py
--- a/warehouse/legacy/api/json.py
+++ b/warehouse/legacy/api/json.py
@@ -190,7 +190,7 @@
"home_page": release.home_page,
"download_url": release.download_url,
"yanked": release.yanked,
- "yanked_reason": r.yanked_reason or None,
+ "yanked_reason": release.yanked_reason or None,
},
"urls": releases[release.version],
"releases": releases,
| {"golden_diff": "diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py\n--- a/warehouse/legacy/api/json.py\n+++ b/warehouse/legacy/api/json.py\n@@ -190,7 +190,7 @@\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n \"yanked\": release.yanked,\n- \"yanked_reason\": r.yanked_reason or None,\n+ \"yanked_reason\": release.yanked_reason or None,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n", "issue": "/pypi/{package}/{version}/json: yanking an older release updates latest release's yanked_reason field\n**Describe the bug**\r\nYanking an older version of a package leads to unexpected side-effect for latest version's package info provided via the JSON endpoint. In particular, the `yanked_reason` field gets updated.\r\n\r\n**Expected behavior**\r\nWhen yanking a version of a package, no other verision's `yanked_reason` field should be updated.\r\n\r\n**To Reproduce**\r\n1. Create new package on test.pypi.org\r\n2. Release version `0.2.0`.\r\n3. Release version `0.3.0`.\r\n4. Yank version `0.2.0`.\r\n5. Check json endpoint of package version `0.3.0`.\r\n\r\n```console\r\n$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked'\r\nfalse\r\n$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked_reason'\r\n\"Testing Yank\"\r\n```\r\n\r\n**My Platform**\r\nN/A\r\n\r\n**Additional context**\r\n* Test package: https://test.pypi.org/project/abn-test-rss-yank/\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Project, Release\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join(\n [\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]\n ),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\"X-PyPI-Last-Serial\"]),\n}\n\n_CACHE_DECORATOR = [\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n]\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project, Release.yanked.is_(False))\n .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.project_slash\",\n context=Project,\n decorator=_CACHE_DECORATOR,\n)\ndef json_project_slash(project, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\"legacy.api.json.project\", name=project.name),\n headers=_CORS_HEADERS,\n )\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(\n Load(Release).load_only(\n \"version\", \"requires_python\", \"yanked\", \"yanked_reason\"\n )\n )\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"upload_time_iso_8601\": f.upload_time.isoformat() + \"Z\",\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n \"requires_python\": r.requires_python if r.requires_python else None,\n \"yanked\": r.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description.content_type,\n \"description\": release.description.raw,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_urls\": OrderedDict(release.urls) if release.urls else None,\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n \"requires_dist\": (\n list(release.requires_dist) if release.requires_dist else None\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": None,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n \"yanked\": release.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n\n\n@view_config(\n route_name=\"legacy.api.json.release_slash\",\n context=Release,\n decorator=_CACHE_DECORATOR,\n)\ndef json_release_slash(release, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\n \"legacy.api.json.release\",\n name=release.project.name,\n version=release.version,\n ),\n headers=_CORS_HEADERS,\n )\n", "path": "warehouse/legacy/api/json.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Project, Release\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join(\n [\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]\n ),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\"X-PyPI-Last-Serial\"]),\n}\n\n_CACHE_DECORATOR = [\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n]\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project, Release.yanked.is_(False))\n .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.project_slash\",\n context=Project,\n decorator=_CACHE_DECORATOR,\n)\ndef json_project_slash(project, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\"legacy.api.json.project\", name=project.name),\n headers=_CORS_HEADERS,\n )\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(\n Load(Release).load_only(\n \"version\", \"requires_python\", \"yanked\", \"yanked_reason\"\n )\n )\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"upload_time_iso_8601\": f.upload_time.isoformat() + \"Z\",\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n \"requires_python\": r.requires_python if r.requires_python else None,\n \"yanked\": r.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description.content_type,\n \"description\": release.description.raw,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_urls\": OrderedDict(release.urls) if release.urls else None,\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n \"requires_dist\": (\n list(release.requires_dist) if release.requires_dist else None\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": None,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n \"yanked\": release.yanked,\n \"yanked_reason\": release.yanked_reason or None,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n\n\n@view_config(\n route_name=\"legacy.api.json.release_slash\",\n context=Release,\n decorator=_CACHE_DECORATOR,\n)\ndef json_release_slash(release, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\n \"legacy.api.json.release\",\n name=release.project.name,\n version=release.version,\n ),\n headers=_CORS_HEADERS,\n )\n", "path": "warehouse/legacy/api/json.py"}]} |
gh_patches_debug_1435 | rasdani/github-patches | git_diff | translate__pootle-5160 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure tests can be run with `--reuse-db`
When iterating over a test that require DB access (or a few of them), currently a site-wide setup is made which in such scenario ends up being relatively time-consuming and tedious.
Ideally one could use [pytest-django's `--reuse-db` flag](http://pytest-django.readthedocs.org/en/latest/database.html#reuse-db-reuse-the-testing-database-between-test-runs) to considerably reduce setup time on test iterations, however at the current state of things such feature cannot be used due to the way the Pootle test DB environment is setup.
Let's try to fix that so we can benefit from `--reuse-db`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytest_pootle/plugin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import os
10 import shutil
11 from pkgutil import iter_modules
12
13 import pytest
14
15 from . import fixtures
16 from .env import PootleTestEnv
17 from .fixtures import models as fixtures_models
18 from .fixtures.core import management as fixtures_core_management
19 from .fixtures.core import utils as fixtures_core_utils
20 from .fixtures import formats as fixtures_formats
21 from .fixtures import pootle_fs as fixtures_fs
22
23
24 def _load_fixtures(*modules):
25 for mod in modules:
26 path = mod.__path__
27 prefix = '%s.' % mod.__name__
28
29 for loader_, name, is_pkg in iter_modules(path, prefix):
30 if not is_pkg:
31 yield name
32
33
34 @pytest.fixture
35 def po_test_dir(request, tmpdir):
36 po_dir = str(tmpdir.mkdir("po"))
37
38 def rm_po_dir():
39 if os.path.exists(po_dir):
40 shutil.rmtree(po_dir)
41
42 request.addfinalizer(rm_po_dir)
43 return po_dir
44
45
46 @pytest.fixture
47 def po_directory(request, po_test_dir, settings):
48 """Sets up a tmp directory for PO files."""
49 from pootle_store.models import fs
50
51 translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY
52
53 # Adjust locations
54 settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir
55 fs.location = po_test_dir
56
57 def _cleanup():
58 settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory
59
60 request.addfinalizer(_cleanup)
61
62
63 @pytest.fixture(scope='session')
64 def tests_use_db(request):
65 return bool(
66 [item for item in request.node.items
67 if item.get_marker('django_db')])
68
69
70 @pytest.fixture(scope='session')
71 def tests_use_vfolders(request):
72 return bool(
73 [item for item in request.node.items
74 if item.get_marker('pootle_vfolders')])
75
76
77 @pytest.fixture(scope='session')
78 def tests_use_migration(request, tests_use_db):
79 return bool(
80 tests_use_db
81 and [item for item in request.node.items
82 if item.get_marker('django_migration')])
83
84
85 @pytest.fixture(autouse=True, scope='session')
86 def setup_db_if_needed(request, tests_use_db):
87 """Sets up the site DB only if tests requested to use the DB (autouse)."""
88 if tests_use_db:
89 return request.getfuncargvalue('post_db_setup')
90
91
92 @pytest.fixture(scope='session')
93 def post_db_setup(translations_directory, django_db_setup, django_db_blocker,
94 tests_use_db, tests_use_vfolders, request):
95 """Sets up the site DB for the test session."""
96 if tests_use_db:
97 with django_db_blocker.unblock():
98 PootleTestEnv().setup(
99 vfolders=tests_use_vfolders)
100
101
102 @pytest.fixture(scope='session')
103 def django_db_use_migrations(tests_use_migration):
104 return tests_use_migration
105
106
107 pytest_plugins = tuple(
108 _load_fixtures(
109 fixtures,
110 fixtures_core_management,
111 fixtures_core_utils,
112 fixtures_formats,
113 fixtures_models,
114 fixtures_fs))
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytest_pootle/plugin.py b/pytest_pootle/plugin.py
--- a/pytest_pootle/plugin.py
+++ b/pytest_pootle/plugin.py
@@ -85,7 +85,7 @@
@pytest.fixture(autouse=True, scope='session')
def setup_db_if_needed(request, tests_use_db):
"""Sets up the site DB only if tests requested to use the DB (autouse)."""
- if tests_use_db:
+ if tests_use_db and not request.config.getvalue('reuse_db'):
return request.getfuncargvalue('post_db_setup')
| {"golden_diff": "diff --git a/pytest_pootle/plugin.py b/pytest_pootle/plugin.py\n--- a/pytest_pootle/plugin.py\n+++ b/pytest_pootle/plugin.py\n@@ -85,7 +85,7 @@\n @pytest.fixture(autouse=True, scope='session')\n def setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n- if tests_use_db:\n+ if tests_use_db and not request.config.getvalue('reuse_db'):\n return request.getfuncargvalue('post_db_setup')\n", "issue": "Ensure tests can be run with `--reuse-db`\nWhen iterating over a test that require DB access (or a few of them), currently a site-wide setup is made which in such scenario ends up being relatively time-consuming and tedious.\n\nIdeally one could use [pytest-django's `--reuse-db` flag](http://pytest-django.readthedocs.org/en/latest/database.html#reuse-db-reuse-the-testing-database-between-test-runs) to considerably reduce setup time on test iterations, however at the current state of things such feature cannot be used due to the way the Pootle test DB environment is setup.\n\nLet's try to fix that so we can benefit from `--reuse-db`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nimport shutil\nfrom pkgutil import iter_modules\n\nimport pytest\n\nfrom . import fixtures\nfrom .env import PootleTestEnv\nfrom .fixtures import models as fixtures_models\nfrom .fixtures.core import management as fixtures_core_management\nfrom .fixtures.core import utils as fixtures_core_utils\nfrom .fixtures import formats as fixtures_formats\nfrom .fixtures import pootle_fs as fixtures_fs\n\n\ndef _load_fixtures(*modules):\n for mod in modules:\n path = mod.__path__\n prefix = '%s.' % mod.__name__\n\n for loader_, name, is_pkg in iter_modules(path, prefix):\n if not is_pkg:\n yield name\n\n\[email protected]\ndef po_test_dir(request, tmpdir):\n po_dir = str(tmpdir.mkdir(\"po\"))\n\n def rm_po_dir():\n if os.path.exists(po_dir):\n shutil.rmtree(po_dir)\n\n request.addfinalizer(rm_po_dir)\n return po_dir\n\n\[email protected]\ndef po_directory(request, po_test_dir, settings):\n \"\"\"Sets up a tmp directory for PO files.\"\"\"\n from pootle_store.models import fs\n\n translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY\n\n # Adjust locations\n settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir\n fs.location = po_test_dir\n\n def _cleanup():\n settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory\n\n request.addfinalizer(_cleanup)\n\n\[email protected](scope='session')\ndef tests_use_db(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('django_db')])\n\n\[email protected](scope='session')\ndef tests_use_vfolders(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('pootle_vfolders')])\n\n\[email protected](scope='session')\ndef tests_use_migration(request, tests_use_db):\n return bool(\n tests_use_db\n and [item for item in request.node.items\n if item.get_marker('django_migration')])\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n if tests_use_db:\n return request.getfuncargvalue('post_db_setup')\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, django_db_setup, django_db_blocker,\n tests_use_db, tests_use_vfolders, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n if tests_use_db:\n with django_db_blocker.unblock():\n PootleTestEnv().setup(\n vfolders=tests_use_vfolders)\n\n\[email protected](scope='session')\ndef django_db_use_migrations(tests_use_migration):\n return tests_use_migration\n\n\npytest_plugins = tuple(\n _load_fixtures(\n fixtures,\n fixtures_core_management,\n fixtures_core_utils,\n fixtures_formats,\n fixtures_models,\n fixtures_fs))\n", "path": "pytest_pootle/plugin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nimport shutil\nfrom pkgutil import iter_modules\n\nimport pytest\n\nfrom . import fixtures\nfrom .env import PootleTestEnv\nfrom .fixtures import models as fixtures_models\nfrom .fixtures.core import management as fixtures_core_management\nfrom .fixtures.core import utils as fixtures_core_utils\nfrom .fixtures import formats as fixtures_formats\nfrom .fixtures import pootle_fs as fixtures_fs\n\n\ndef _load_fixtures(*modules):\n for mod in modules:\n path = mod.__path__\n prefix = '%s.' % mod.__name__\n\n for loader_, name, is_pkg in iter_modules(path, prefix):\n if not is_pkg:\n yield name\n\n\[email protected]\ndef po_test_dir(request, tmpdir):\n po_dir = str(tmpdir.mkdir(\"po\"))\n\n def rm_po_dir():\n if os.path.exists(po_dir):\n shutil.rmtree(po_dir)\n\n request.addfinalizer(rm_po_dir)\n return po_dir\n\n\[email protected]\ndef po_directory(request, po_test_dir, settings):\n \"\"\"Sets up a tmp directory for PO files.\"\"\"\n from pootle_store.models import fs\n\n translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY\n\n # Adjust locations\n settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir\n fs.location = po_test_dir\n\n def _cleanup():\n settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory\n\n request.addfinalizer(_cleanup)\n\n\[email protected](scope='session')\ndef tests_use_db(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('django_db')])\n\n\[email protected](scope='session')\ndef tests_use_vfolders(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('pootle_vfolders')])\n\n\[email protected](scope='session')\ndef tests_use_migration(request, tests_use_db):\n return bool(\n tests_use_db\n and [item for item in request.node.items\n if item.get_marker('django_migration')])\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n if tests_use_db and not request.config.getvalue('reuse_db'):\n return request.getfuncargvalue('post_db_setup')\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, django_db_setup, django_db_blocker,\n tests_use_db, tests_use_vfolders, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n if tests_use_db:\n with django_db_blocker.unblock():\n PootleTestEnv().setup(\n vfolders=tests_use_vfolders)\n\n\[email protected](scope='session')\ndef django_db_use_migrations(tests_use_migration):\n return tests_use_migration\n\n\npytest_plugins = tuple(\n _load_fixtures(\n fixtures,\n fixtures_core_management,\n fixtures_core_utils,\n fixtures_formats,\n fixtures_models,\n fixtures_fs))\n", "path": "pytest_pootle/plugin.py"}]} |
gh_patches_debug_1436 | rasdani/github-patches | git_diff | pypi__warehouse-6294 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPI accepts packages with dependencies on local versions (e.g., 0.1.0+local)
PyPI accepts packages with dependencies on local versions (e.g., 0.1.0+local). I'm not sure if this is intentional or not, since PyPI will reject packages whose version is a local version.
I tested this was the case using this test package:
```
import setuptools
import os
import re
setuptools.setup(
name="test-pypi-version-specifier-main",
version="0.0.2",
author="Edward Z. Yang",
author_email="[email protected]",
description="Testing package",
long_description="Yarr",
long_description_content_type="text/markdown",
url="https://localhost/",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"test-pypi-version-specifier-dep==0.0.1+cuda9",
],
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/forklift/legacy.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import email
14 import hashlib
15 import hmac
16 import os.path
17 import re
18 import tarfile
19 import tempfile
20 import zipfile
21
22 from cgi import FieldStorage, parse_header
23 from itertools import chain
24
25 import packaging.requirements
26 import packaging.specifiers
27 import packaging.utils
28 import packaging.version
29 import pkg_resources
30 import requests
31 import stdlib_list
32 import wtforms
33 import wtforms.validators
34
35 from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone
36 from pyramid.response import Response
37 from pyramid.view import view_config
38 from sqlalchemy import exists, func, orm
39 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
40
41 from warehouse import forms
42 from warehouse.admin.squats import Squat
43 from warehouse.classifiers.models import Classifier
44 from warehouse.metrics import IMetricsService
45 from warehouse.packaging.interfaces import IFileStorage
46 from warehouse.packaging.models import (
47 BlacklistedProject,
48 Dependency,
49 DependencyKind,
50 Description,
51 File,
52 Filename,
53 JournalEntry,
54 Project,
55 Release,
56 Role,
57 )
58 from warehouse.utils import http, readme
59
60 MAX_FILESIZE = 60 * 1024 * 1024 # 60M
61 MAX_SIGSIZE = 8 * 1024 # 8K
62
63 PATH_HASHER = "blake2_256"
64
65
66 def namespace_stdlib_list(module_list):
67 for module_name in module_list:
68 parts = module_name.split(".")
69 for i, part in enumerate(parts):
70 yield ".".join(parts[: i + 1])
71
72
73 STDLIB_PROHIBITTED = {
74 packaging.utils.canonicalize_name(s.rstrip("-_.").lstrip("-_."))
75 for s in chain.from_iterable(
76 namespace_stdlib_list(stdlib_list.stdlib_list(version))
77 for version in stdlib_list.short_versions
78 )
79 }
80
81 # Wheel platform checking
82
83 # Note: defining new platform ABI compatibility tags that don't
84 # have a python.org binary release to anchor them is a
85 # complex task that needs more than just OS+architecture info.
86 # For Linux specifically, the platform ABI is defined by each
87 # individual distro version, so wheels built on one version may
88 # not even work on older versions of the same distro, let alone
89 # a completely different distro.
90 #
91 # That means new entries should only be added given an
92 # accompanying ABI spec that explains how to build a
93 # compatible binary (see the manylinux specs as examples).
94
95 # These platforms can be handled by a simple static list:
96 _allowed_platforms = {
97 "any",
98 "win32",
99 "win_amd64",
100 "win_ia64",
101 "manylinux1_x86_64",
102 "manylinux1_i686",
103 "manylinux2010_x86_64",
104 "manylinux2010_i686",
105 "linux_armv6l",
106 "linux_armv7l",
107 }
108 # macosx is a little more complicated:
109 _macosx_platform_re = re.compile(r"macosx_10_(\d+)+_(?P<arch>.*)")
110 _macosx_arches = {
111 "ppc",
112 "ppc64",
113 "i386",
114 "x86_64",
115 "intel",
116 "fat",
117 "fat32",
118 "fat64",
119 "universal",
120 }
121
122
123 # Actual checking code;
124 def _valid_platform_tag(platform_tag):
125 if platform_tag in _allowed_platforms:
126 return True
127 m = _macosx_platform_re.match(platform_tag)
128 if m and m.group("arch") in _macosx_arches:
129 return True
130 return False
131
132
133 _error_message_order = ["metadata_version", "name", "version"]
134
135
136 _dist_file_regexes = {
137 # True/False is for legacy or not.
138 True: re.compile(r".+?\.(exe|tar\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$", re.I),
139 False: re.compile(r".+?\.(tar\.gz|zip|whl|egg)$", re.I),
140 }
141
142
143 _wheel_file_re = re.compile(
144 r"""
145 ^
146 (?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
147 (
148 (-(?P<build>\d.*?))?
149 -(?P<pyver>.+?)
150 -(?P<abi>.+?)
151 -(?P<plat>.+?)
152 (?:\.whl|\.dist-info)
153 )
154 $
155 """,
156 re.VERBOSE,
157 )
158
159
160 _project_name_re = re.compile(
161 r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
162 )
163
164
165 _legacy_specifier_re = re.compile(r"^(?P<name>\S+)(?: \((?P<specifier>\S+)\))?$")
166
167
168 _valid_description_content_types = {"text/plain", "text/x-rst", "text/markdown"}
169
170 _valid_markdown_variants = {"CommonMark", "GFM"}
171
172
173 def _exc_with_message(exc, message):
174 # The crappy old API that PyPI offered uses the status to pass down
175 # messages to the client. So this function will make that easier to do.
176 resp = exc(message)
177 resp.status = "{} {}".format(resp.status_code, message)
178 return resp
179
180
181 def _validate_pep440_version(form, field):
182 parsed = packaging.version.parse(field.data)
183
184 # Check that this version is a valid PEP 440 version at all.
185 if not isinstance(parsed, packaging.version.Version):
186 raise wtforms.validators.ValidationError(
187 "Start and end with a letter or numeral containing only "
188 "ASCII numeric and '.', '_' and '-'."
189 )
190
191 # Check that this version does not have a PEP 440 local segment attached
192 # to it.
193 if parsed.local is not None:
194 raise wtforms.validators.ValidationError("Can't use PEP 440 local versions.")
195
196
197 def _parse_legacy_requirement(requirement):
198 parsed = _legacy_specifier_re.search(requirement)
199 if parsed is None:
200 raise ValueError("Invalid requirement.")
201 return parsed.groupdict()["name"], parsed.groupdict()["specifier"]
202
203
204 def _validate_pep440_specifier(specifier):
205 try:
206 packaging.specifiers.SpecifierSet(specifier)
207 except packaging.specifiers.InvalidSpecifier:
208 raise wtforms.validators.ValidationError(
209 "Invalid specifier in requirement."
210 ) from None
211
212
213 def _validate_pep440_specifier_field(form, field):
214 return _validate_pep440_specifier(field.data)
215
216
217 def _validate_legacy_non_dist_req(requirement):
218 try:
219 req = packaging.requirements.Requirement(requirement.replace("_", ""))
220 except packaging.requirements.InvalidRequirement:
221 raise wtforms.validators.ValidationError(
222 "Invalid requirement: {!r}".format(requirement)
223 ) from None
224
225 if req.url is not None:
226 raise wtforms.validators.ValidationError(
227 "Can't direct dependency: {!r}".format(requirement)
228 )
229
230 if any(
231 not identifier.isalnum() or identifier[0].isdigit()
232 for identifier in req.name.split(".")
233 ):
234 raise wtforms.validators.ValidationError("Use a valid Python identifier.")
235
236
237 def _validate_legacy_non_dist_req_list(form, field):
238 for datum in field.data:
239 _validate_legacy_non_dist_req(datum)
240
241
242 def _validate_legacy_dist_req(requirement):
243 try:
244 req = packaging.requirements.Requirement(requirement)
245 except packaging.requirements.InvalidRequirement:
246 raise wtforms.validators.ValidationError(
247 "Invalid requirement: {!r}.".format(requirement)
248 ) from None
249
250 if req.url is not None:
251 raise wtforms.validators.ValidationError(
252 "Can't have direct dependency: {!r}".format(requirement)
253 )
254
255
256 def _validate_legacy_dist_req_list(form, field):
257 for datum in field.data:
258 _validate_legacy_dist_req(datum)
259
260
261 def _validate_requires_external(requirement):
262 name, specifier = _parse_legacy_requirement(requirement)
263
264 # TODO: Is it really reasonable to parse the specifier using PEP 440?
265 if specifier is not None:
266 _validate_pep440_specifier(specifier)
267
268
269 def _validate_requires_external_list(form, field):
270 for datum in field.data:
271 _validate_requires_external(datum)
272
273
274 def _validate_project_url(value):
275 try:
276 label, url = value.split(", ", 1)
277 except ValueError:
278 raise wtforms.validators.ValidationError(
279 "Use both a label and an URL."
280 ) from None
281
282 if not label:
283 raise wtforms.validators.ValidationError("Use a label.")
284
285 if len(label) > 32:
286 raise wtforms.validators.ValidationError("Use 32 characters or less.")
287
288 if not url:
289 raise wtforms.validators.ValidationError("Use an URL.")
290
291 if not http.is_valid_uri(url, require_authority=False):
292 raise wtforms.validators.ValidationError("Use valid URL.")
293
294
295 def _validate_project_url_list(form, field):
296 for datum in field.data:
297 _validate_project_url(datum)
298
299
300 def _validate_rfc822_email_field(form, field):
301 email_validator = wtforms.validators.Email(message="Use a valid email address")
302 addresses = email.utils.getaddresses([field.data])
303
304 for real_name, address in addresses:
305 email_validator(form, type("field", (), {"data": address}))
306
307
308 def _validate_description_content_type(form, field):
309 def _raise(message):
310 raise wtforms.validators.ValidationError(
311 f"Invalid description content type: {message}"
312 )
313
314 content_type, parameters = parse_header(field.data)
315 if content_type not in _valid_description_content_types:
316 _raise("type/subtype is not valid")
317
318 charset = parameters.get("charset")
319 if charset and charset != "UTF-8":
320 _raise("Use a valid charset")
321
322 variant = parameters.get("variant")
323 if (
324 content_type == "text/markdown"
325 and variant
326 and variant not in _valid_markdown_variants
327 ):
328 _raise(
329 "Use a valid variant, expected one of {}".format(
330 ", ".join(_valid_markdown_variants)
331 )
332 )
333
334
335 def _construct_dependencies(form, types):
336 for name, kind in types.items():
337 for item in getattr(form, name).data:
338 yield Dependency(kind=kind.value, specifier=item)
339
340
341 class ListField(wtforms.Field):
342 def process_formdata(self, valuelist):
343 self.data = [v.strip() for v in valuelist if v.strip()]
344
345
346 # TODO: Eventually this whole validation thing should move to the packaging
347 # library and we should just call that. However until PEP 426 is done
348 # that library won't have an API for this.
349 class MetadataForm(forms.Form):
350
351 # Metadata version
352 metadata_version = wtforms.StringField(
353 description="Metadata-Version",
354 validators=[
355 wtforms.validators.DataRequired(),
356 wtforms.validators.AnyOf(
357 # Note: This isn't really Metadata 2.0, however bdist_wheel
358 # claims it is producing a Metadata 2.0 metadata when in
359 # reality it's more like 1.2 with some extensions.
360 ["1.0", "1.1", "1.2", "2.0", "2.1"],
361 message="Use a known metadata version.",
362 ),
363 ],
364 )
365
366 # Identity Project and Release
367 name = wtforms.StringField(
368 description="Name",
369 validators=[
370 wtforms.validators.DataRequired(),
371 wtforms.validators.Regexp(
372 _project_name_re,
373 re.IGNORECASE,
374 message=(
375 "Start and end with a letter or numeral containing "
376 "only ASCII numeric and '.', '_' and '-'."
377 ),
378 ),
379 ],
380 )
381 version = wtforms.StringField(
382 description="Version",
383 validators=[
384 wtforms.validators.DataRequired(),
385 wtforms.validators.Regexp(
386 r"^(?!\s).*(?<!\s)$",
387 message="Can't have leading or trailing whitespace.",
388 ),
389 _validate_pep440_version,
390 ],
391 )
392
393 # Additional Release metadata
394 summary = wtforms.StringField(
395 description="Summary",
396 validators=[
397 wtforms.validators.Optional(),
398 wtforms.validators.Length(max=512),
399 wtforms.validators.Regexp(
400 r"^.+$", # Rely on the fact that . doesn't match a newline.
401 message="Use a single line only.",
402 ),
403 ],
404 )
405 description = wtforms.StringField(
406 description="Description", validators=[wtforms.validators.Optional()]
407 )
408 author = wtforms.StringField(
409 description="Author", validators=[wtforms.validators.Optional()]
410 )
411 description_content_type = wtforms.StringField(
412 description="Description-Content-Type",
413 validators=[wtforms.validators.Optional(), _validate_description_content_type],
414 )
415 author_email = wtforms.StringField(
416 description="Author-email",
417 validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],
418 )
419 maintainer = wtforms.StringField(
420 description="Maintainer", validators=[wtforms.validators.Optional()]
421 )
422 maintainer_email = wtforms.StringField(
423 description="Maintainer-email",
424 validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],
425 )
426 license = wtforms.StringField(
427 description="License", validators=[wtforms.validators.Optional()]
428 )
429 keywords = wtforms.StringField(
430 description="Keywords", validators=[wtforms.validators.Optional()]
431 )
432 classifiers = wtforms.fields.SelectMultipleField(description="Classifier")
433 platform = wtforms.StringField(
434 description="Platform", validators=[wtforms.validators.Optional()]
435 )
436
437 # URLs
438 home_page = wtforms.StringField(
439 description="Home-Page",
440 validators=[wtforms.validators.Optional(), forms.URIValidator()],
441 )
442 download_url = wtforms.StringField(
443 description="Download-URL",
444 validators=[wtforms.validators.Optional(), forms.URIValidator()],
445 )
446
447 # Dependency Information
448 requires_python = wtforms.StringField(
449 description="Requires-Python",
450 validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],
451 )
452
453 # File information
454 pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])
455 filetype = wtforms.StringField(
456 validators=[
457 wtforms.validators.DataRequired(),
458 wtforms.validators.AnyOf(
459 [
460 "bdist_dmg",
461 "bdist_dumb",
462 "bdist_egg",
463 "bdist_msi",
464 "bdist_rpm",
465 "bdist_wheel",
466 "bdist_wininst",
467 "sdist",
468 ],
469 message="Use a known file type.",
470 ),
471 ]
472 )
473 comment = wtforms.StringField(validators=[wtforms.validators.Optional()])
474 md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])
475 sha256_digest = wtforms.StringField(
476 validators=[
477 wtforms.validators.Optional(),
478 wtforms.validators.Regexp(
479 r"^[A-F0-9]{64}$",
480 re.IGNORECASE,
481 message="Use a valid, hex-encoded, SHA256 message digest.",
482 ),
483 ]
484 )
485 blake2_256_digest = wtforms.StringField(
486 validators=[
487 wtforms.validators.Optional(),
488 wtforms.validators.Regexp(
489 r"^[A-F0-9]{64}$",
490 re.IGNORECASE,
491 message="Use a valid, hex-encoded, BLAKE2 message digest.",
492 ),
493 ]
494 )
495
496 # Legacy dependency information
497 requires = ListField(
498 validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]
499 )
500 provides = ListField(
501 validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]
502 )
503 obsoletes = ListField(
504 validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]
505 )
506
507 # Newer dependency information
508 requires_dist = ListField(
509 description="Requires-Dist",
510 validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],
511 )
512 provides_dist = ListField(
513 description="Provides-Dist",
514 validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],
515 )
516 obsoletes_dist = ListField(
517 description="Obsoletes-Dist",
518 validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],
519 )
520 requires_external = ListField(
521 description="Requires-External",
522 validators=[wtforms.validators.Optional(), _validate_requires_external_list],
523 )
524
525 # Newer metadata information
526 project_urls = ListField(
527 description="Project-URL",
528 validators=[wtforms.validators.Optional(), _validate_project_url_list],
529 )
530
531 def full_validate(self):
532 # All non source releases *must* have a pyversion
533 if (
534 self.filetype.data
535 and self.filetype.data != "sdist"
536 and not self.pyversion.data
537 ):
538 raise wtforms.validators.ValidationError(
539 "Python version is required for binary distribution uploads."
540 )
541
542 # All source releases *must* have a pyversion of "source"
543 if self.filetype.data == "sdist":
544 if not self.pyversion.data:
545 self.pyversion.data = "source"
546 elif self.pyversion.data != "source":
547 raise wtforms.validators.ValidationError(
548 "Use 'source' as Python version for an sdist."
549 )
550
551 # We *must* have at least one digest to verify against.
552 if not self.md5_digest.data and not self.sha256_digest.data:
553 raise wtforms.validators.ValidationError(
554 "Include at least one message digest."
555 )
556
557
558 _safe_zipnames = re.compile(r"(purelib|platlib|headers|scripts|data).+", re.I)
559 # .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2
560 _tar_filenames_re = re.compile(r"\.(?:tar$|t(?:ar\.)?(?P<z_type>gz|bz2)$)")
561
562
563 def _is_valid_dist_file(filename, filetype):
564 """
565 Perform some basic checks to see whether the indicated file could be
566 a valid distribution file.
567 """
568
569 # If our file is a zipfile, then ensure that it's members are only
570 # compressed with supported compression methods.
571 if zipfile.is_zipfile(filename):
572 with zipfile.ZipFile(filename) as zfp:
573 for zinfo in zfp.infolist():
574 if zinfo.compress_type not in {
575 zipfile.ZIP_STORED,
576 zipfile.ZIP_DEFLATED,
577 }:
578 return False
579
580 tar_fn_match = _tar_filenames_re.search(filename)
581 if tar_fn_match:
582 # Ensure that this is a valid tar file, and that it contains PKG-INFO.
583 z_type = tar_fn_match.group("z_type") or ""
584 try:
585 with tarfile.open(filename, f"r:{z_type}") as tar:
586 # This decompresses the entire stream to validate it and the
587 # tar within. Easy CPU DoS attack. :/
588 bad_tar = True
589 member = tar.next()
590 while member:
591 parts = os.path.split(member.name)
592 if len(parts) == 2 and parts[1] == "PKG-INFO":
593 bad_tar = False
594 member = tar.next()
595 if bad_tar:
596 return False
597 except tarfile.ReadError:
598 return False
599 elif filename.endswith(".exe"):
600 # The only valid filetype for a .exe file is "bdist_wininst".
601 if filetype != "bdist_wininst":
602 return False
603
604 # Ensure that the .exe is a valid zip file, and that all of the files
605 # contained within it have safe filenames.
606 try:
607 with zipfile.ZipFile(filename, "r") as zfp:
608 # We need the no branch below to work around a bug in
609 # coverage.py where it's detecting a missed branch where there
610 # isn't one.
611 for zipname in zfp.namelist(): # pragma: no branch
612 if not _safe_zipnames.match(zipname):
613 return False
614 except zipfile.BadZipFile:
615 return False
616 elif filename.endswith(".msi"):
617 # The only valid filetype for a .msi is "bdist_msi"
618 if filetype != "bdist_msi":
619 return False
620
621 # Check the first 8 bytes of the MSI file. This was taken from the
622 # legacy implementation of PyPI which itself took it from the
623 # implementation of `file` I believe.
624 with open(filename, "rb") as fp:
625 if fp.read(8) != b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1":
626 return False
627 elif filename.endswith(".zip") or filename.endswith(".egg"):
628 # Ensure that the .zip/.egg is a valid zip file, and that it has a
629 # PKG-INFO file.
630 try:
631 with zipfile.ZipFile(filename, "r") as zfp:
632 for zipname in zfp.namelist():
633 parts = os.path.split(zipname)
634 if len(parts) == 2 and parts[1] == "PKG-INFO":
635 # We need the no branch below to work around a bug in
636 # coverage.py where it's detecting a missed branch
637 # where there isn't one.
638 break # pragma: no branch
639 else:
640 return False
641 except zipfile.BadZipFile:
642 return False
643 elif filename.endswith(".whl"):
644 # Ensure that the .whl is a valid zip file, and that it has a WHEEL
645 # file.
646 try:
647 with zipfile.ZipFile(filename, "r") as zfp:
648 for zipname in zfp.namelist():
649 parts = os.path.split(zipname)
650 if len(parts) == 2 and parts[1] == "WHEEL":
651 # We need the no branch below to work around a bug in
652 # coverage.py where it's detecting a missed branch
653 # where there isn't one.
654 break # pragma: no branch
655 else:
656 return False
657 except zipfile.BadZipFile:
658 return False
659
660 # If we haven't yet decided it's not valid, then we'll assume it is and
661 # allow it.
662 return True
663
664
665 def _is_duplicate_file(db_session, filename, hashes):
666 """
667 Check to see if file already exists, and if it's content matches.
668 A file is considered to exist if its filename *or* blake2 digest are
669 present in a file row in the database.
670
671 Returns:
672 - True: This file is a duplicate and all further processing should halt.
673 - False: This file exists, but it is not a duplicate.
674 - None: This file does not exist.
675 """
676
677 file_ = (
678 db_session.query(File)
679 .filter(
680 (File.filename == filename)
681 | (File.blake2_256_digest == hashes["blake2_256"])
682 )
683 .first()
684 )
685
686 if file_ is not None:
687 return (
688 file_.filename == filename
689 and file_.sha256_digest == hashes["sha256"]
690 and file_.md5_digest == hashes["md5"]
691 and file_.blake2_256_digest == hashes["blake2_256"]
692 )
693
694 return None
695
696
697 def _no_deprecated_classifiers(request):
698 deprecated_classifiers = {
699 classifier.classifier
700 for classifier in (
701 request.db.query(Classifier.classifier)
702 .filter(Classifier.deprecated.is_(True))
703 .all()
704 )
705 }
706
707 def validate_no_deprecated_classifiers(form, field):
708 invalid_classifiers = set(field.data or []) & deprecated_classifiers
709 if invalid_classifiers:
710 first_invalid_classifier = sorted(invalid_classifiers)[0]
711 host = request.registry.settings.get("warehouse.domain")
712 classifiers_url = request.route_url("classifiers", _host=host)
713
714 raise wtforms.validators.ValidationError(
715 f"Classifier {first_invalid_classifier!r} has been "
716 f"deprecated, see {classifiers_url} for a list of valid "
717 "classifiers."
718 )
719
720 return validate_no_deprecated_classifiers
721
722
723 @view_config(
724 route_name="forklift.legacy.file_upload",
725 uses_session=True,
726 require_csrf=False,
727 require_methods=["POST"],
728 )
729 def file_upload(request):
730 # If we're in read-only mode, let upload clients know
731 if request.flags.enabled("read-only"):
732 raise _exc_with_message(
733 HTTPForbidden, "Read-only mode: Uploads are temporarily disabled"
734 )
735
736 # Log an attempt to upload
737 metrics = request.find_service(IMetricsService, context=None)
738 metrics.increment("warehouse.upload.attempt")
739
740 # Before we do anything, if there isn't an authenticated user with this
741 # request, then we'll go ahead and bomb out.
742 if request.authenticated_userid is None:
743 raise _exc_with_message(
744 HTTPForbidden, "Invalid or non-existent authentication information."
745 )
746
747 # Ensure that user has a verified, primary email address. This should both
748 # reduce the ease of spam account creation and activity, as well as act as
749 # a forcing function for https://github.com/pypa/warehouse/issues/3632.
750 # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,
751 # we might consider a different condition, possibly looking at
752 # User.is_active instead.
753 if not (request.user.primary_email and request.user.primary_email.verified):
754 raise _exc_with_message(
755 HTTPBadRequest,
756 (
757 "User {!r} does not have a verified primary email address. "
758 "Please add a verified primary email before attempting to "
759 "upload to PyPI. See {project_help} for more information."
760 "for more information."
761 ).format(
762 request.user.username,
763 project_help=request.help_url(_anchor="verified-email"),
764 ),
765 ) from None
766
767 # Do some cleanup of the various form fields
768 for key in list(request.POST):
769 value = request.POST.get(key)
770 if isinstance(value, str):
771 # distutils "helpfully" substitutes unknown, but "required" values
772 # with the string "UNKNOWN". This is basically never what anyone
773 # actually wants so we'll just go ahead and delete anything whose
774 # value is UNKNOWN.
775 if value.strip() == "UNKNOWN":
776 del request.POST[key]
777
778 # Escape NUL characters, which psycopg doesn't like
779 if "\x00" in value:
780 request.POST[key] = value.replace("\x00", "\\x00")
781
782 # We require protocol_version 1, it's the only supported version however
783 # passing a different version should raise an error.
784 if request.POST.get("protocol_version", "1") != "1":
785 raise _exc_with_message(HTTPBadRequest, "Unknown protocol version.")
786
787 # Check if any fields were supplied as a tuple and have become a
788 # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a
789 # FieldStorage, however.
790 # ref: https://github.com/pypa/warehouse/issues/2185
791 # ref: https://github.com/pypa/warehouse/issues/2491
792 for field in set(request.POST) - {"content", "gpg_signature"}:
793 values = request.POST.getall(field)
794 if any(isinstance(value, FieldStorage) for value in values):
795 raise _exc_with_message(HTTPBadRequest, f"{field}: Should not be a tuple.")
796
797 # Look up all of the valid classifiers
798 all_classifiers = request.db.query(Classifier).all()
799
800 # Validate and process the incoming metadata.
801 form = MetadataForm(request.POST)
802
803 # Add a validator for deprecated classifiers
804 form.classifiers.validators.append(_no_deprecated_classifiers(request))
805
806 form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]
807 if not form.validate():
808 for field_name in _error_message_order:
809 if field_name in form.errors:
810 break
811 else:
812 field_name = sorted(form.errors.keys())[0]
813
814 if field_name in form:
815 field = form[field_name]
816 if field.description and isinstance(field, wtforms.StringField):
817 error_message = (
818 "{value!r} is an invalid value for {field}. ".format(
819 value=field.data, field=field.description
820 )
821 + "Error: {} ".format(form.errors[field_name][0])
822 + "See "
823 "https://packaging.python.org/specifications/core-metadata"
824 )
825 else:
826 error_message = "Invalid value for {field}. Error: {msgs[0]}".format(
827 field=field_name, msgs=form.errors[field_name]
828 )
829 else:
830 error_message = "Error: {}".format(form.errors[field_name][0])
831
832 raise _exc_with_message(HTTPBadRequest, error_message)
833
834 # Ensure that we have file data in the request.
835 if "content" not in request.POST:
836 raise _exc_with_message(HTTPBadRequest, "Upload payload does not have a file.")
837
838 # Look up the project first before doing anything else, this is so we can
839 # automatically register it if we need to and can check permissions before
840 # going any further.
841 try:
842 project = (
843 request.db.query(Project)
844 .filter(
845 Project.normalized_name == func.normalize_pep426_name(form.name.data)
846 )
847 .one()
848 )
849 except NoResultFound:
850 # Check for AdminFlag set by a PyPI Administrator disabling new project
851 # registration, reasons for this include Spammers, security
852 # vulnerabilities, or just wanting to be lazy and not worry ;)
853 if request.flags.enabled("disallow-new-project-registration"):
854 raise _exc_with_message(
855 HTTPForbidden,
856 (
857 "New project registration temporarily disabled. "
858 "See {projecthelp} for details"
859 ).format(projecthelp=request.help_url(_anchor="admin-intervention")),
860 ) from None
861
862 # Before we create the project, we're going to check our blacklist to
863 # see if this project is even allowed to be registered. If it is not,
864 # then we're going to deny the request to create this project.
865 if request.db.query(
866 exists().where(
867 BlacklistedProject.name == func.normalize_pep426_name(form.name.data)
868 )
869 ).scalar():
870 raise _exc_with_message(
871 HTTPBadRequest,
872 (
873 "The name {name!r} isn't allowed. "
874 "See {projecthelp} "
875 "for more information."
876 ).format(
877 name=form.name.data,
878 projecthelp=request.help_url(_anchor="project-name"),
879 ),
880 ) from None
881
882 # Also check for collisions with Python Standard Library modules.
883 if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:
884 raise _exc_with_message(
885 HTTPBadRequest,
886 (
887 "The name {name!r} isn't allowed (conflict with Python "
888 "Standard Library module name). See "
889 "{projecthelp} for more information."
890 ).format(
891 name=form.name.data,
892 projecthelp=request.help_url(_anchor="project-name"),
893 ),
894 ) from None
895
896 # The project doesn't exist in our database, so first we'll check for
897 # projects with a similar name
898 squattees = (
899 request.db.query(Project)
900 .filter(
901 func.levenshtein(
902 Project.normalized_name, func.normalize_pep426_name(form.name.data)
903 )
904 <= 2
905 )
906 .all()
907 )
908
909 # Next we'll create the project
910 project = Project(name=form.name.data)
911 request.db.add(project)
912
913 # Now that the project exists, add any squats which it is the squatter for
914 for squattee in squattees:
915 request.db.add(Squat(squatter=project, squattee=squattee))
916
917 # Then we'll add a role setting the current user as the "Owner" of the
918 # project.
919 request.db.add(Role(user=request.user, project=project, role_name="Owner"))
920 # TODO: This should be handled by some sort of database trigger or a
921 # SQLAlchemy hook or the like instead of doing it inline in this
922 # view.
923 request.db.add(
924 JournalEntry(
925 name=project.name,
926 action="create",
927 submitted_by=request.user,
928 submitted_from=request.remote_addr,
929 )
930 )
931 request.db.add(
932 JournalEntry(
933 name=project.name,
934 action="add Owner {}".format(request.user.username),
935 submitted_by=request.user,
936 submitted_from=request.remote_addr,
937 )
938 )
939
940 # Check that the user has permission to do things to this project, if this
941 # is a new project this will act as a sanity check for the role we just
942 # added above.
943 if not request.has_permission("upload", project):
944 raise _exc_with_message(
945 HTTPForbidden,
946 (
947 "The credential associated with user '{0}' "
948 "isn't allowed to upload to project '{1}'. "
949 "See {2} for more information."
950 ).format(
951 request.user.username,
952 project.name,
953 request.help_url(_anchor="project-name"),
954 ),
955 )
956
957 # Update name if it differs but is still equivalent. We don't need to check if
958 # they are equivalent when normalized because that's already been done when we
959 # queried for the project.
960 if project.name != form.name.data:
961 project.name = form.name.data
962
963 # Render our description so we can save from having to render this data every time
964 # we load a project description page.
965 rendered = None
966 if form.description.data:
967 description_content_type = form.description_content_type.data
968 if not description_content_type:
969 description_content_type = "text/x-rst"
970
971 rendered = readme.render(
972 form.description.data, description_content_type, use_fallback=False
973 )
974
975 # Uploading should prevent broken rendered descriptions.
976 if rendered is None:
977 if form.description_content_type.data:
978 message = (
979 "The description failed to render "
980 "for '{description_content_type}'."
981 ).format(description_content_type=description_content_type)
982 else:
983 message = (
984 "The description failed to render "
985 "in the default format of reStructuredText."
986 )
987 raise _exc_with_message(
988 HTTPBadRequest,
989 "{message} See {projecthelp} for more information.".format(
990 message=message,
991 projecthelp=request.help_url(_anchor="description-content-type"),
992 ),
993 ) from None
994
995 try:
996 canonical_version = packaging.utils.canonicalize_version(form.version.data)
997 release = (
998 request.db.query(Release)
999 .filter(
1000 (Release.project == project)
1001 & (Release.canonical_version == canonical_version)
1002 )
1003 .one()
1004 )
1005 except MultipleResultsFound:
1006 # There are multiple releases of this project which have the same
1007 # canonical version that were uploaded before we checked for
1008 # canonical version equivalence, so return the exact match instead
1009 release = (
1010 request.db.query(Release)
1011 .filter(
1012 (Release.project == project) & (Release.version == form.version.data)
1013 )
1014 .one()
1015 )
1016 except NoResultFound:
1017 release = Release(
1018 project=project,
1019 _classifiers=[
1020 c for c in all_classifiers if c.classifier in form.classifiers.data
1021 ],
1022 dependencies=list(
1023 _construct_dependencies(
1024 form,
1025 {
1026 "requires": DependencyKind.requires,
1027 "provides": DependencyKind.provides,
1028 "obsoletes": DependencyKind.obsoletes,
1029 "requires_dist": DependencyKind.requires_dist,
1030 "provides_dist": DependencyKind.provides_dist,
1031 "obsoletes_dist": DependencyKind.obsoletes_dist,
1032 "requires_external": DependencyKind.requires_external,
1033 "project_urls": DependencyKind.project_url,
1034 },
1035 )
1036 ),
1037 canonical_version=canonical_version,
1038 description=Description(
1039 content_type=form.description_content_type.data,
1040 raw=form.description.data or "",
1041 html=rendered or "",
1042 rendered_by=readme.renderer_version(),
1043 ),
1044 **{
1045 k: getattr(form, k).data
1046 for k in {
1047 # This is a list of all the fields in the form that we
1048 # should pull off and insert into our new release.
1049 "version",
1050 "summary",
1051 "license",
1052 "author",
1053 "author_email",
1054 "maintainer",
1055 "maintainer_email",
1056 "keywords",
1057 "platform",
1058 "home_page",
1059 "download_url",
1060 "requires_python",
1061 }
1062 },
1063 uploader=request.user,
1064 uploaded_via=request.user_agent,
1065 )
1066 request.db.add(release)
1067 # TODO: This should be handled by some sort of database trigger or
1068 # a SQLAlchemy hook or the like instead of doing it inline in
1069 # this view.
1070 request.db.add(
1071 JournalEntry(
1072 name=release.project.name,
1073 version=release.version,
1074 action="new release",
1075 submitted_by=request.user,
1076 submitted_from=request.remote_addr,
1077 )
1078 )
1079
1080 # TODO: We need a better solution to this than to just do it inline inside
1081 # this method. Ideally the version field would just be sortable, but
1082 # at least this should be some sort of hook or trigger.
1083 releases = (
1084 request.db.query(Release)
1085 .filter(Release.project == project)
1086 .options(orm.load_only(Release._pypi_ordering))
1087 .all()
1088 )
1089 for i, r in enumerate(
1090 sorted(releases, key=lambda x: packaging.version.parse(x.version))
1091 ):
1092 r._pypi_ordering = i
1093
1094 # Pull the filename out of our POST data.
1095 filename = request.POST["content"].filename
1096
1097 # Make sure that the filename does not contain any path separators.
1098 if "/" in filename or "\\" in filename:
1099 raise _exc_with_message(
1100 HTTPBadRequest, "Cannot upload a file with '/' or '\\' in the name."
1101 )
1102
1103 # Make sure the filename ends with an allowed extension.
1104 if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:
1105 raise _exc_with_message(
1106 HTTPBadRequest,
1107 "Invalid file extension: Use .egg, .tar.gz, .whl or .zip "
1108 "extension. (https://www.python.org/dev/peps/pep-0527)",
1109 )
1110
1111 # Make sure that our filename matches the project that it is being uploaded
1112 # to.
1113 prefix = pkg_resources.safe_name(project.name).lower()
1114 if not pkg_resources.safe_name(filename).lower().startswith(prefix):
1115 raise _exc_with_message(
1116 HTTPBadRequest,
1117 "Start filename for {!r} with {!r}.".format(project.name, prefix),
1118 )
1119
1120 # Check the content type of what is being uploaded
1121 if not request.POST["content"].type or request.POST["content"].type.startswith(
1122 "image/"
1123 ):
1124 raise _exc_with_message(HTTPBadRequest, "Invalid distribution file.")
1125
1126 # Ensure that the package filetype is allowed.
1127 # TODO: Once PEP 527 is completely implemented we should be able to delete
1128 # this and just move it into the form itself.
1129 if not project.allow_legacy_files and form.filetype.data not in {
1130 "sdist",
1131 "bdist_wheel",
1132 "bdist_egg",
1133 }:
1134 raise _exc_with_message(HTTPBadRequest, "Unknown type of file.")
1135
1136 # The project may or may not have a file size specified on the project, if
1137 # it does then it may or may not be smaller or larger than our global file
1138 # size limits.
1139 file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))
1140
1141 with tempfile.TemporaryDirectory() as tmpdir:
1142 temporary_filename = os.path.join(tmpdir, filename)
1143
1144 # Buffer the entire file onto disk, checking the hash of the file as we
1145 # go along.
1146 with open(temporary_filename, "wb") as fp:
1147 file_size = 0
1148 file_hashes = {
1149 "md5": hashlib.md5(),
1150 "sha256": hashlib.sha256(),
1151 "blake2_256": hashlib.blake2b(digest_size=256 // 8),
1152 }
1153 for chunk in iter(lambda: request.POST["content"].file.read(8096), b""):
1154 file_size += len(chunk)
1155 if file_size > file_size_limit:
1156 raise _exc_with_message(
1157 HTTPBadRequest,
1158 "File too large. "
1159 + "Limit for project {name!r} is {limit} MB. ".format(
1160 name=project.name, limit=file_size_limit // (1024 * 1024)
1161 )
1162 + "See "
1163 + request.help_url(_anchor="file-size-limit"),
1164 )
1165 fp.write(chunk)
1166 for hasher in file_hashes.values():
1167 hasher.update(chunk)
1168
1169 # Take our hash functions and compute the final hashes for them now.
1170 file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}
1171
1172 # Actually verify the digests that we've gotten. We're going to use
1173 # hmac.compare_digest even though we probably don't actually need to
1174 # because it's better safe than sorry. In the case of multiple digests
1175 # we expect them all to be given.
1176 if not all(
1177 [
1178 hmac.compare_digest(
1179 getattr(form, "{}_digest".format(digest_name)).data.lower(),
1180 digest_value,
1181 )
1182 for digest_name, digest_value in file_hashes.items()
1183 if getattr(form, "{}_digest".format(digest_name)).data
1184 ]
1185 ):
1186 raise _exc_with_message(
1187 HTTPBadRequest,
1188 "The digest supplied does not match a digest calculated "
1189 "from the uploaded file.",
1190 )
1191
1192 # Check to see if the file that was uploaded exists already or not.
1193 is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)
1194 if is_duplicate:
1195 return Response()
1196 elif is_duplicate is not None:
1197 raise _exc_with_message(
1198 HTTPBadRequest,
1199 # Note: Changing this error message to something that doesn't
1200 # start with "File already exists" will break the
1201 # --skip-existing functionality in twine
1202 # ref: https://github.com/pypa/warehouse/issues/3482
1203 # ref: https://github.com/pypa/twine/issues/332
1204 "File already exists. See "
1205 + request.help_url(_anchor="file-name-reuse"),
1206 )
1207
1208 # Check to see if the file that was uploaded exists in our filename log
1209 if request.db.query(
1210 request.db.query(Filename).filter(Filename.filename == filename).exists()
1211 ).scalar():
1212 raise _exc_with_message(
1213 HTTPBadRequest,
1214 "This filename has already been used, use a "
1215 "different version. "
1216 "See " + request.help_url(_anchor="file-name-reuse"),
1217 )
1218
1219 # Check to see if uploading this file would create a duplicate sdist
1220 # for the current release.
1221 if (
1222 form.filetype.data == "sdist"
1223 and request.db.query(
1224 request.db.query(File)
1225 .filter((File.release == release) & (File.packagetype == "sdist"))
1226 .exists()
1227 ).scalar()
1228 ):
1229 raise _exc_with_message(
1230 HTTPBadRequest, "Only one sdist may be uploaded per release."
1231 )
1232
1233 # Check the file to make sure it is a valid distribution file.
1234 if not _is_valid_dist_file(temporary_filename, form.filetype.data):
1235 raise _exc_with_message(HTTPBadRequest, "Invalid distribution file.")
1236
1237 # Check that if it's a binary wheel, it's on a supported platform
1238 if filename.endswith(".whl"):
1239 wheel_info = _wheel_file_re.match(filename)
1240 plats = wheel_info.group("plat").split(".")
1241 for plat in plats:
1242 if not _valid_platform_tag(plat):
1243 raise _exc_with_message(
1244 HTTPBadRequest,
1245 "Binary wheel '{filename}' has an unsupported "
1246 "platform tag '{plat}'.".format(filename=filename, plat=plat),
1247 )
1248
1249 # Also buffer the entire signature file to disk.
1250 if "gpg_signature" in request.POST:
1251 has_signature = True
1252 with open(os.path.join(tmpdir, filename + ".asc"), "wb") as fp:
1253 signature_size = 0
1254 for chunk in iter(
1255 lambda: request.POST["gpg_signature"].file.read(8096), b""
1256 ):
1257 signature_size += len(chunk)
1258 if signature_size > MAX_SIGSIZE:
1259 raise _exc_with_message(HTTPBadRequest, "Signature too large.")
1260 fp.write(chunk)
1261
1262 # Check whether signature is ASCII armored
1263 with open(os.path.join(tmpdir, filename + ".asc"), "rb") as fp:
1264 if not fp.read().startswith(b"-----BEGIN PGP SIGNATURE-----"):
1265 raise _exc_with_message(
1266 HTTPBadRequest, "PGP signature isn't ASCII armored."
1267 )
1268 else:
1269 has_signature = False
1270
1271 # TODO: This should be handled by some sort of database trigger or a
1272 # SQLAlchemy hook or the like instead of doing it inline in this
1273 # view.
1274 request.db.add(Filename(filename=filename))
1275
1276 # Store the information about the file in the database.
1277 file_ = File(
1278 release=release,
1279 filename=filename,
1280 python_version=form.pyversion.data,
1281 packagetype=form.filetype.data,
1282 comment_text=form.comment.data,
1283 size=file_size,
1284 has_signature=bool(has_signature),
1285 md5_digest=file_hashes["md5"],
1286 sha256_digest=file_hashes["sha256"],
1287 blake2_256_digest=file_hashes["blake2_256"],
1288 # Figure out what our filepath is going to be, we're going to use a
1289 # directory structure based on the hash of the file contents. This
1290 # will ensure that the contents of the file cannot change without
1291 # it also changing the path that the file is saved too.
1292 path="/".join(
1293 [
1294 file_hashes[PATH_HASHER][:2],
1295 file_hashes[PATH_HASHER][2:4],
1296 file_hashes[PATH_HASHER][4:],
1297 filename,
1298 ]
1299 ),
1300 uploaded_via=request.user_agent,
1301 )
1302 request.db.add(file_)
1303
1304 # TODO: This should be handled by some sort of database trigger or a
1305 # SQLAlchemy hook or the like instead of doing it inline in this
1306 # view.
1307 request.db.add(
1308 JournalEntry(
1309 name=release.project.name,
1310 version=release.version,
1311 action="add {python_version} file {filename}".format(
1312 python_version=file_.python_version, filename=file_.filename
1313 ),
1314 submitted_by=request.user,
1315 submitted_from=request.remote_addr,
1316 )
1317 )
1318
1319 # TODO: We need a better answer about how to make this transactional so
1320 # this won't take affect until after a commit has happened, for
1321 # now we'll just ignore it and save it before the transaction is
1322 # committed.
1323 storage = request.find_service(IFileStorage)
1324 storage.store(
1325 file_.path,
1326 os.path.join(tmpdir, filename),
1327 meta={
1328 "project": file_.release.project.normalized_name,
1329 "version": file_.release.version,
1330 "package-type": file_.packagetype,
1331 "python-version": file_.python_version,
1332 },
1333 )
1334 if has_signature:
1335 storage.store(
1336 file_.pgp_path,
1337 os.path.join(tmpdir, filename + ".asc"),
1338 meta={
1339 "project": file_.release.project.normalized_name,
1340 "version": file_.release.version,
1341 "package-type": file_.packagetype,
1342 "python-version": file_.python_version,
1343 },
1344 )
1345
1346 # Log a successful upload
1347 metrics.increment("warehouse.upload.ok", tags=[f"filetype:{form.filetype.data}"])
1348
1349 return Response()
1350
1351
1352 def _legacy_purge(status, *args, **kwargs):
1353 if status:
1354 requests.post(*args, **kwargs)
1355
1356
1357 @view_config(
1358 route_name="forklift.legacy.submit", require_csrf=False, require_methods=["POST"]
1359 )
1360 @view_config(
1361 route_name="forklift.legacy.submit_pkg_info",
1362 require_csrf=False,
1363 require_methods=["POST"],
1364 )
1365 def submit(request):
1366 return _exc_with_message(
1367 HTTPGone,
1368 (
1369 "Project pre-registration is no longer required or supported, "
1370 "upload your files instead."
1371 ),
1372 )
1373
1374
1375 @view_config(
1376 route_name="forklift.legacy.doc_upload",
1377 require_csrf=False,
1378 require_methods=["POST"],
1379 )
1380 def doc_upload(request):
1381 return _exc_with_message(
1382 HTTPGone,
1383 "Uploading documentation is no longer supported, we recommend using "
1384 "https://readthedocs.org/.",
1385 )
1386
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py
--- a/warehouse/forklift/legacy.py
+++ b/warehouse/forklift/legacy.py
@@ -252,6 +252,11 @@
"Can't have direct dependency: {!r}".format(requirement)
)
+ if any(packaging.version.Version(spec.version).local for spec in req.specifier):
+ raise wtforms.validators.ValidationError(
+ "Can't have dependency with local version: {!r}".format(requirement)
+ )
+
def _validate_legacy_dist_req_list(form, field):
for datum in field.data:
| {"golden_diff": "diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py\n--- a/warehouse/forklift/legacy.py\n+++ b/warehouse/forklift/legacy.py\n@@ -252,6 +252,11 @@\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n \n+ if any(packaging.version.Version(spec.version).local for spec in req.specifier):\n+ raise wtforms.validators.ValidationError(\n+ \"Can't have dependency with local version: {!r}\".format(requirement)\n+ )\n+\n \n def _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n", "issue": "PyPI accepts packages with dependencies on local versions (e.g., 0.1.0+local)\nPyPI accepts packages with dependencies on local versions (e.g., 0.1.0+local). I'm not sure if this is intentional or not, since PyPI will reject packages whose version is a local version.\r\n\r\nI tested this was the case using this test package:\r\n\r\n```\r\nimport setuptools\r\nimport os\r\nimport re\r\n\r\nsetuptools.setup(\r\n name=\"test-pypi-version-specifier-main\",\r\n version=\"0.0.2\",\r\n author=\"Edward Z. Yang\",\r\n author_email=\"[email protected]\",\r\n description=\"Testing package\",\r\n long_description=\"Yarr\",\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://localhost/\",\r\n packages=setuptools.find_packages(),\r\n include_package_data=True,\r\n classifiers=[\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n install_requires=[\r\n \"test-pypi-version-specifier-dep==0.0.1+cuda9\",\r\n ],\r\n)\r\n\r\n```\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n", "path": "warehouse/forklift/legacy.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n if any(packaging.version.Version(spec.version).local for spec in req.specifier):\n raise wtforms.validators.ValidationError(\n \"Can't have dependency with local version: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n", "path": "warehouse/forklift/legacy.py"}]} |
gh_patches_debug_1437 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-2888 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dock restoreState fails silently if a dock object is the only item in a V- or HContainer
### Short description
Calling restoreState() on a dockArea where a dock is the only object inside a container leads to the state being not correctly restored.
I don't know how it happened that a dock could appear as the only item in a container but one user managed to do that.
I couldn't find out how to trigger that.
Anyway, in that case way too many container are apoptose'd and the state is nor correctly restored
### Code to reproduce
```python
import json
import sys
import pyqtgraph as pg
from PySide6.QtWidgets import QMainWindow
from pyqtgraph.dockarea import Dock, DockArea
class DockApp(QMainWindow):
def __init__(self, title):
super().__init__()
self.dock_area = DockArea()
self.setCentralWidget(self.dock_area)
self.dock_area.addDock(Dock(name="Plot 1", closable=False), 'left')
self.dock_area.addDock(Dock(name="Plot 2", closable=False), 'left')
self.dock_area.addDock(Dock(name="Plot 4", closable=False), 'left')
self.dock_area.addDock(Dock(name="Table 1", closable=False), 'left')
self.dock_area.addDock(Dock(name="Table 2", closable=False), 'left')
self.dock_area.addDock(Dock(name="Table 3", closable=False), 'left')
state = json.loads("""{
"main": [
"vertical",
[
[
"horizontal",
[
[
"vertical",
[
[
"vertical",
[
[
"dock",
"Plot 1",
{}
]
],
{
"sizes": [
314
]
}
],
[
"dock",
"Plot 2",
{}
]
],
{
"sizes": [
314,
313
]
}
],
[
"vertical",
[
[
"dock",
"Table 3",
{}
],
[
"dock",
"Table 2",
{}
],
[
"dock",
"Table 1",
{}
]
],
{
"sizes": [
208,
207,
208
]
}
]
],
{
"sizes": [
784,
783
]
}
],
[
"dock",
"Plot 4",
{}
]
],
{
"sizes": [
631,
210
]
}
],
"float": []
}""")
self.dock_area.restoreState(state)
if __name__ == '__main__':
app = pg.Qt.mkQApp("LiveTrace")
window = DockApp(title='Test')
window.show()
sys.exit(app.exec())
```
### Expected behavior
All 6 docks should be restored like this:

### Real behavior
Only 2 docks are visible. All other docks are missing.

Note: No exception is thrown
### Tested environment(s)
* PyQtGraph version: 0.13.3
* Qt Python binding: PySide6 6.6.0 Qt 6.6.0
* Python version: 3.10.11 AMD64
* NumPy version: 1.26.2
* Operating system: Windows 10 22H2
* Installation method: pip
### Additional context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/dockarea/Dock.py`
Content:
```
1 import warnings
2
3 from ..Qt import QtCore, QtGui, QtWidgets
4 from ..widgets.VerticalLabel import VerticalLabel
5 from .DockDrop import DockDrop
6
7
8 class Dock(QtWidgets.QWidget):
9
10 sigStretchChanged = QtCore.Signal()
11 sigClosed = QtCore.Signal(object)
12
13 def __init__(self, name, area=None, size=(10, 10), widget=None, hideTitle=False, autoOrientation=True, label=None, **kargs):
14 QtWidgets.QWidget.__init__(self)
15 self.dockdrop = DockDrop(self)
16 self._container = None
17 self._name = name
18 self.area = area
19 self.label = label
20 if self.label is None:
21 self.label = DockLabel(name, **kargs)
22 self.label.dock = self
23 if self.label.isClosable():
24 self.label.sigCloseClicked.connect(self.close)
25 self.labelHidden = False
26 self.moveLabel = True ## If false, the dock is no longer allowed to move the label.
27 self.autoOrient = autoOrientation
28 self.orientation = 'horizontal'
29 #self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)
30 self.topLayout = QtWidgets.QGridLayout()
31 self.topLayout.setContentsMargins(0, 0, 0, 0)
32 self.topLayout.setSpacing(0)
33 self.setLayout(self.topLayout)
34 self.topLayout.addWidget(self.label, 0, 1)
35 self.widgetArea = QtWidgets.QWidget()
36 self.topLayout.addWidget(self.widgetArea, 1, 1)
37 self.layout = QtWidgets.QGridLayout()
38 self.layout.setContentsMargins(0, 0, 0, 0)
39 self.layout.setSpacing(0)
40 self.widgetArea.setLayout(self.layout)
41 self.widgetArea.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)
42 self.widgets = []
43 self.currentRow = 0
44 #self.titlePos = 'top'
45 self.dockdrop.raiseOverlay()
46 self.hStyle = """
47 Dock > QWidget {
48 border: 1px solid #000;
49 border-radius: 5px;
50 border-top-left-radius: 0px;
51 border-top-right-radius: 0px;
52 border-top-width: 0px;
53 }"""
54 self.vStyle = """
55 Dock > QWidget {
56 border: 1px solid #000;
57 border-radius: 5px;
58 border-top-left-radius: 0px;
59 border-bottom-left-radius: 0px;
60 border-left-width: 0px;
61 }"""
62 self.nStyle = """
63 Dock > QWidget {
64 border: 1px solid #000;
65 border-radius: 5px;
66 }"""
67 self.dragStyle = """
68 Dock > QWidget {
69 border: 4px solid #00F;
70 border-radius: 5px;
71 }"""
72 self.setAutoFillBackground(False)
73 self.widgetArea.setStyleSheet(self.hStyle)
74
75 self.setStretch(*size)
76
77 if widget is not None:
78 self.addWidget(widget)
79
80 if hideTitle:
81 self.hideTitleBar()
82
83 def implements(self, name=None):
84 if name is None:
85 return ['dock']
86 else:
87 return name == 'dock'
88
89 def setStretch(self, x=None, y=None):
90 """
91 Set the 'target' size for this Dock.
92 The actual size will be determined by comparing this Dock's
93 stretch value to the rest of the docks it shares space with.
94 """
95 if x is None:
96 x = 0
97 if y is None:
98 y = 0
99 self._stretch = (x, y)
100 self.sigStretchChanged.emit()
101
102 def stretch(self):
103 return self._stretch
104
105 def hideTitleBar(self):
106 """
107 Hide the title bar for this Dock.
108 This will prevent the Dock being moved by the user.
109 """
110 self.label.hide()
111 self.labelHidden = True
112 self.dockdrop.removeAllowedArea('center')
113 self.updateStyle()
114
115 def showTitleBar(self):
116 """
117 Show the title bar for this Dock.
118 """
119 self.label.show()
120 self.labelHidden = False
121 self.dockdrop.addAllowedArea('center')
122 self.updateStyle()
123
124 def title(self):
125 """
126 Gets the text displayed in the title bar for this dock.
127 """
128 return self.label.text()
129
130 def setTitle(self, text):
131 """
132 Sets the text displayed in title bar for this Dock.
133 """
134 self.label.setText(text)
135
136 def setOrientation(self, o='auto', force=False):
137 """
138 Sets the orientation of the title bar for this Dock.
139 Must be one of 'auto', 'horizontal', or 'vertical'.
140 By default ('auto'), the orientation is determined
141 based on the aspect ratio of the Dock.
142 """
143 # setOrientation may be called before the container is set in some cases
144 # (via resizeEvent), so there's no need to do anything here until called
145 # again by containerChanged
146 if self.container() is None:
147 return
148
149 if o == 'auto' and self.autoOrient:
150 if self.container().type() == 'tab':
151 o = 'horizontal'
152 elif self.width() > self.height()*1.5:
153 o = 'vertical'
154 else:
155 o = 'horizontal'
156 if force or self.orientation != o:
157 self.orientation = o
158 self.label.setOrientation(o)
159 self.updateStyle()
160
161 def updateStyle(self):
162 ## updates orientation and appearance of title bar
163 if self.labelHidden:
164 self.widgetArea.setStyleSheet(self.nStyle)
165 elif self.orientation == 'vertical':
166 self.label.setOrientation('vertical')
167 if self.moveLabel:
168 self.topLayout.addWidget(self.label, 1, 0)
169 self.widgetArea.setStyleSheet(self.vStyle)
170 else:
171 self.label.setOrientation('horizontal')
172 if self.moveLabel:
173 self.topLayout.addWidget(self.label, 0, 1)
174 self.widgetArea.setStyleSheet(self.hStyle)
175
176 def resizeEvent(self, ev):
177 self.setOrientation()
178 self.dockdrop.resizeOverlay(self.size())
179
180 def name(self):
181 return self._name
182
183 def addWidget(self, widget, row=None, col=0, rowspan=1, colspan=1):
184 """
185 Add a new widget to the interior of this Dock.
186 Each Dock uses a QGridLayout to arrange widgets within.
187 """
188 if row is None:
189 row = self.currentRow
190 self.currentRow = max(row+1, self.currentRow)
191 self.widgets.append(widget)
192 self.layout.addWidget(widget, row, col, rowspan, colspan)
193 self.dockdrop.raiseOverlay()
194
195 def startDrag(self):
196 self.drag = QtGui.QDrag(self)
197 mime = QtCore.QMimeData()
198 self.drag.setMimeData(mime)
199 self.widgetArea.setStyleSheet(self.dragStyle)
200 self.update()
201 action = self.drag.exec() if hasattr(self.drag, 'exec') else self.drag.exec_()
202 self.updateStyle()
203
204 def float(self):
205 self.area.floatDock(self)
206
207 def container(self):
208 return self._container
209
210 def containerChanged(self, c):
211 if self._container is not None:
212 # ask old container to close itself if it is no longer needed
213 self._container.apoptose()
214 self._container = c
215 if c is None:
216 self.area = None
217 else:
218 self.area = c.area
219 if c.type() != 'tab':
220 self.moveLabel = True
221 self.label.setDim(False)
222 else:
223 self.moveLabel = False
224
225 self.setOrientation(force=True)
226
227 def raiseDock(self):
228 """If this Dock is stacked underneath others, raise it to the top."""
229 self.container().raiseDock(self)
230
231 def close(self):
232 """Remove this dock from the DockArea it lives inside."""
233 if self._container is None:
234 warnings.warn(f"Cannot close dock {self} because it is not open.", RuntimeWarning, stacklevel=2)
235 return
236
237 self.setParent(None)
238 QtWidgets.QLabel.close(self.label)
239 self.label.setParent(None)
240 self._container.apoptose()
241 self._container = None
242 self.sigClosed.emit(self)
243
244 def __repr__(self):
245 return "<Dock %s %s>" % (self.name(), self.stretch())
246
247 def dragEnterEvent(self, *args):
248 self.dockdrop.dragEnterEvent(*args)
249
250 def dragMoveEvent(self, *args):
251 self.dockdrop.dragMoveEvent(*args)
252
253 def dragLeaveEvent(self, *args):
254 self.dockdrop.dragLeaveEvent(*args)
255
256 def dropEvent(self, *args):
257 self.dockdrop.dropEvent(*args)
258
259
260 class DockLabel(VerticalLabel):
261
262 sigClicked = QtCore.Signal(object, object)
263 sigCloseClicked = QtCore.Signal()
264
265 def __init__(self, text, closable=False, fontSize="12px"):
266 self.dim = False
267 self.fixedWidth = False
268 self.fontSize = fontSize
269 VerticalLabel.__init__(self, text, orientation='horizontal', forceWidth=False)
270 self.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop|QtCore.Qt.AlignmentFlag.AlignHCenter)
271 self.dock = None
272 self.updateStyle()
273 self.setAutoFillBackground(False)
274 self.mouseMoved = False
275
276 self.closeButton = None
277 if closable:
278 self.closeButton = QtWidgets.QToolButton(self)
279 self.closeButton.clicked.connect(self.sigCloseClicked)
280 self.closeButton.setIcon(QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_TitleBarCloseButton))
281
282 def updateStyle(self):
283 r = '3px'
284 if self.dim:
285 fg = '#aaa'
286 bg = '#44a'
287 border = '#339'
288 else:
289 fg = '#fff'
290 bg = '#66c'
291 border = '#55B'
292
293 if self.orientation == 'vertical':
294 self.vStyle = """DockLabel {
295 background-color : %s;
296 color : %s;
297 border-top-right-radius: 0px;
298 border-top-left-radius: %s;
299 border-bottom-right-radius: 0px;
300 border-bottom-left-radius: %s;
301 border-width: 0px;
302 border-right: 2px solid %s;
303 padding-top: 3px;
304 padding-bottom: 3px;
305 font-size: %s;
306 }""" % (bg, fg, r, r, border, self.fontSize)
307 self.setStyleSheet(self.vStyle)
308 else:
309 self.hStyle = """DockLabel {
310 background-color : %s;
311 color : %s;
312 border-top-right-radius: %s;
313 border-top-left-radius: %s;
314 border-bottom-right-radius: 0px;
315 border-bottom-left-radius: 0px;
316 border-width: 0px;
317 border-bottom: 2px solid %s;
318 padding-left: 3px;
319 padding-right: 3px;
320 font-size: %s;
321 }""" % (bg, fg, r, r, border, self.fontSize)
322 self.setStyleSheet(self.hStyle)
323
324 def setDim(self, d):
325 if self.dim != d:
326 self.dim = d
327 self.updateStyle()
328
329 def setOrientation(self, o):
330 VerticalLabel.setOrientation(self, o)
331 self.updateStyle()
332
333 def isClosable(self):
334 return self.closeButton is not None
335
336 def mousePressEvent(self, ev):
337 lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
338 self.pressPos = lpos
339 self.mouseMoved = False
340 ev.accept()
341
342 def mouseMoveEvent(self, ev):
343 if not self.mouseMoved:
344 lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
345 self.mouseMoved = (lpos - self.pressPos).manhattanLength() > QtWidgets.QApplication.startDragDistance()
346
347 if self.mouseMoved and ev.buttons() == QtCore.Qt.MouseButton.LeftButton:
348 self.dock.startDrag()
349 ev.accept()
350
351 def mouseReleaseEvent(self, ev):
352 ev.accept()
353 if not self.mouseMoved:
354 self.sigClicked.emit(self, ev)
355
356 def mouseDoubleClickEvent(self, ev):
357 if ev.button() == QtCore.Qt.MouseButton.LeftButton:
358 self.dock.float()
359
360 def resizeEvent (self, ev):
361 if self.closeButton:
362 if self.orientation == 'vertical':
363 size = ev.size().width()
364 pos = QtCore.QPoint(0, 0)
365 else:
366 size = ev.size().height()
367 pos = QtCore.QPoint(ev.size().width() - size, 0)
368 self.closeButton.setFixedSize(QtCore.QSize(size, size))
369 self.closeButton.move(pos)
370 super(DockLabel,self).resizeEvent(ev)
371
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/dockarea/Dock.py b/pyqtgraph/dockarea/Dock.py
--- a/pyqtgraph/dockarea/Dock.py
+++ b/pyqtgraph/dockarea/Dock.py
@@ -210,7 +210,7 @@
def containerChanged(self, c):
if self._container is not None:
# ask old container to close itself if it is no longer needed
- self._container.apoptose()
+ self._container.apoptose(propagate=False)
self._container = c
if c is None:
self.area = None
| {"golden_diff": "diff --git a/pyqtgraph/dockarea/Dock.py b/pyqtgraph/dockarea/Dock.py\n--- a/pyqtgraph/dockarea/Dock.py\n+++ b/pyqtgraph/dockarea/Dock.py\n@@ -210,7 +210,7 @@\n def containerChanged(self, c):\n if self._container is not None:\n # ask old container to close itself if it is no longer needed\n- self._container.apoptose()\n+ self._container.apoptose(propagate=False)\n self._container = c\n if c is None:\n self.area = None\n", "issue": "Dock restoreState fails silently if a dock object is the only item in a V- or HContainer\n### Short description\r\nCalling restoreState() on a dockArea where a dock is the only object inside a container leads to the state being not correctly restored.\r\nI don't know how it happened that a dock could appear as the only item in a container but one user managed to do that.\r\nI couldn't find out how to trigger that.\r\nAnyway, in that case way too many container are apoptose'd and the state is nor correctly restored\r\n\r\n### Code to reproduce\r\n```python\r\nimport json\r\nimport sys\r\n\r\nimport pyqtgraph as pg\r\nfrom PySide6.QtWidgets import QMainWindow\r\nfrom pyqtgraph.dockarea import Dock, DockArea\r\n\r\n\r\nclass DockApp(QMainWindow):\r\n def __init__(self, title):\r\n super().__init__()\r\n self.dock_area = DockArea()\r\n self.setCentralWidget(self.dock_area)\r\n self.dock_area.addDock(Dock(name=\"Plot 1\", closable=False), 'left')\r\n self.dock_area.addDock(Dock(name=\"Plot 2\", closable=False), 'left')\r\n self.dock_area.addDock(Dock(name=\"Plot 4\", closable=False), 'left')\r\n self.dock_area.addDock(Dock(name=\"Table 1\", closable=False), 'left')\r\n self.dock_area.addDock(Dock(name=\"Table 2\", closable=False), 'left')\r\n self.dock_area.addDock(Dock(name=\"Table 3\", closable=False), 'left')\r\n\r\n state = json.loads(\"\"\"{\r\n \"main\": [\r\n \"vertical\",\r\n [\r\n [\r\n \"horizontal\",\r\n [\r\n [\r\n \"vertical\",\r\n [\r\n [\r\n \"vertical\",\r\n [\r\n [\r\n \"dock\",\r\n \"Plot 1\",\r\n {}\r\n ]\r\n ],\r\n {\r\n \"sizes\": [\r\n 314\r\n ]\r\n }\r\n ],\r\n [\r\n \"dock\",\r\n \"Plot 2\",\r\n {}\r\n ]\r\n ],\r\n {\r\n \"sizes\": [\r\n 314,\r\n 313\r\n ]\r\n }\r\n ],\r\n [\r\n \"vertical\",\r\n [\r\n [\r\n \"dock\",\r\n \"Table 3\",\r\n {}\r\n ],\r\n [\r\n \"dock\",\r\n \"Table 2\",\r\n {}\r\n ],\r\n [\r\n \"dock\",\r\n \"Table 1\",\r\n {}\r\n ]\r\n ],\r\n {\r\n \"sizes\": [\r\n 208,\r\n 207,\r\n 208\r\n ]\r\n }\r\n ]\r\n ],\r\n {\r\n \"sizes\": [\r\n 784,\r\n 783\r\n ]\r\n }\r\n ],\r\n [\r\n \"dock\",\r\n \"Plot 4\",\r\n {}\r\n ]\r\n ],\r\n {\r\n \"sizes\": [\r\n 631,\r\n 210\r\n ]\r\n }\r\n ],\r\n \"float\": []\r\n }\"\"\")\r\n self.dock_area.restoreState(state)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = pg.Qt.mkQApp(\"LiveTrace\")\r\n window = DockApp(title='Test')\r\n window.show()\r\n sys.exit(app.exec())\r\n```\r\n\r\n### Expected behavior\r\nAll 6 docks should be restored like this:\r\n\r\n\r\n\r\n### Real behavior\r\nOnly 2 docks are visible. All other docks are missing.\r\n\r\n\r\n\r\nNote: No exception is thrown\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.13.3\r\n * Qt Python binding: PySide6 6.6.0 Qt 6.6.0\r\n * Python version: 3.10.11 AMD64\r\n * NumPy version: 1.26.2\r\n * Operating system: Windows 10 22H2\r\n * Installation method: pip\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "import warnings\n\nfrom ..Qt import QtCore, QtGui, QtWidgets\nfrom ..widgets.VerticalLabel import VerticalLabel\nfrom .DockDrop import DockDrop\n\n\nclass Dock(QtWidgets.QWidget):\n\n sigStretchChanged = QtCore.Signal()\n sigClosed = QtCore.Signal(object)\n\n def __init__(self, name, area=None, size=(10, 10), widget=None, hideTitle=False, autoOrientation=True, label=None, **kargs):\n QtWidgets.QWidget.__init__(self)\n self.dockdrop = DockDrop(self)\n self._container = None\n self._name = name\n self.area = area\n self.label = label\n if self.label is None:\n self.label = DockLabel(name, **kargs)\n self.label.dock = self\n if self.label.isClosable():\n self.label.sigCloseClicked.connect(self.close)\n self.labelHidden = False\n self.moveLabel = True ## If false, the dock is no longer allowed to move the label.\n self.autoOrient = autoOrientation\n self.orientation = 'horizontal'\n #self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.topLayout = QtWidgets.QGridLayout()\n self.topLayout.setContentsMargins(0, 0, 0, 0)\n self.topLayout.setSpacing(0)\n self.setLayout(self.topLayout)\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea = QtWidgets.QWidget()\n self.topLayout.addWidget(self.widgetArea, 1, 1)\n self.layout = QtWidgets.QGridLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.setSpacing(0)\n self.widgetArea.setLayout(self.layout)\n self.widgetArea.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n self.widgets = []\n self.currentRow = 0\n #self.titlePos = 'top'\n self.dockdrop.raiseOverlay()\n self.hStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-top-right-radius: 0px;\n border-top-width: 0px;\n }\"\"\"\n self.vStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-bottom-left-radius: 0px;\n border-left-width: 0px;\n }\"\"\"\n self.nStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n }\"\"\"\n self.dragStyle = \"\"\"\n Dock > QWidget {\n border: 4px solid #00F;\n border-radius: 5px;\n }\"\"\"\n self.setAutoFillBackground(False)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n self.setStretch(*size)\n\n if widget is not None:\n self.addWidget(widget)\n\n if hideTitle:\n self.hideTitleBar()\n\n def implements(self, name=None):\n if name is None:\n return ['dock']\n else:\n return name == 'dock'\n\n def setStretch(self, x=None, y=None):\n \"\"\"\n Set the 'target' size for this Dock.\n The actual size will be determined by comparing this Dock's\n stretch value to the rest of the docks it shares space with.\n \"\"\"\n if x is None:\n x = 0\n if y is None:\n y = 0\n self._stretch = (x, y)\n self.sigStretchChanged.emit()\n \n def stretch(self):\n return self._stretch\n\n def hideTitleBar(self):\n \"\"\"\n Hide the title bar for this Dock.\n This will prevent the Dock being moved by the user.\n \"\"\"\n self.label.hide()\n self.labelHidden = True\n self.dockdrop.removeAllowedArea('center')\n self.updateStyle()\n\n def showTitleBar(self):\n \"\"\"\n Show the title bar for this Dock.\n \"\"\"\n self.label.show()\n self.labelHidden = False\n self.dockdrop.addAllowedArea('center')\n self.updateStyle()\n\n def title(self):\n \"\"\"\n Gets the text displayed in the title bar for this dock.\n \"\"\"\n return self.label.text()\n\n def setTitle(self, text):\n \"\"\"\n Sets the text displayed in title bar for this Dock.\n \"\"\"\n self.label.setText(text)\n\n def setOrientation(self, o='auto', force=False):\n \"\"\"\n Sets the orientation of the title bar for this Dock.\n Must be one of 'auto', 'horizontal', or 'vertical'.\n By default ('auto'), the orientation is determined\n based on the aspect ratio of the Dock.\n \"\"\"\n # setOrientation may be called before the container is set in some cases\n # (via resizeEvent), so there's no need to do anything here until called\n # again by containerChanged\n if self.container() is None:\n return\n\n if o == 'auto' and self.autoOrient:\n if self.container().type() == 'tab':\n o = 'horizontal'\n elif self.width() > self.height()*1.5:\n o = 'vertical'\n else:\n o = 'horizontal'\n if force or self.orientation != o:\n self.orientation = o\n self.label.setOrientation(o)\n self.updateStyle()\n\n def updateStyle(self):\n ## updates orientation and appearance of title bar\n if self.labelHidden:\n self.widgetArea.setStyleSheet(self.nStyle)\n elif self.orientation == 'vertical':\n self.label.setOrientation('vertical')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 1, 0)\n self.widgetArea.setStyleSheet(self.vStyle)\n else:\n self.label.setOrientation('horizontal')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n def resizeEvent(self, ev):\n self.setOrientation()\n self.dockdrop.resizeOverlay(self.size())\n\n def name(self):\n return self._name\n\n def addWidget(self, widget, row=None, col=0, rowspan=1, colspan=1):\n \"\"\"\n Add a new widget to the interior of this Dock.\n Each Dock uses a QGridLayout to arrange widgets within.\n \"\"\"\n if row is None:\n row = self.currentRow\n self.currentRow = max(row+1, self.currentRow)\n self.widgets.append(widget)\n self.layout.addWidget(widget, row, col, rowspan, colspan)\n self.dockdrop.raiseOverlay()\n \n def startDrag(self):\n self.drag = QtGui.QDrag(self)\n mime = QtCore.QMimeData()\n self.drag.setMimeData(mime)\n self.widgetArea.setStyleSheet(self.dragStyle)\n self.update()\n action = self.drag.exec() if hasattr(self.drag, 'exec') else self.drag.exec_()\n self.updateStyle()\n\n def float(self):\n self.area.floatDock(self)\n \n def container(self):\n return self._container\n\n def containerChanged(self, c):\n if self._container is not None:\n # ask old container to close itself if it is no longer needed\n self._container.apoptose()\n self._container = c\n if c is None:\n self.area = None\n else:\n self.area = c.area\n if c.type() != 'tab':\n self.moveLabel = True\n self.label.setDim(False)\n else:\n self.moveLabel = False\n \n self.setOrientation(force=True)\n\n def raiseDock(self):\n \"\"\"If this Dock is stacked underneath others, raise it to the top.\"\"\"\n self.container().raiseDock(self)\n\n def close(self):\n \"\"\"Remove this dock from the DockArea it lives inside.\"\"\"\n if self._container is None:\n warnings.warn(f\"Cannot close dock {self} because it is not open.\", RuntimeWarning, stacklevel=2)\n return\n\n self.setParent(None)\n QtWidgets.QLabel.close(self.label)\n self.label.setParent(None)\n self._container.apoptose()\n self._container = None\n self.sigClosed.emit(self)\n\n def __repr__(self):\n return \"<Dock %s %s>\" % (self.name(), self.stretch())\n\n def dragEnterEvent(self, *args):\n self.dockdrop.dragEnterEvent(*args)\n\n def dragMoveEvent(self, *args):\n self.dockdrop.dragMoveEvent(*args)\n\n def dragLeaveEvent(self, *args):\n self.dockdrop.dragLeaveEvent(*args)\n\n def dropEvent(self, *args):\n self.dockdrop.dropEvent(*args)\n\n\nclass DockLabel(VerticalLabel):\n\n sigClicked = QtCore.Signal(object, object)\n sigCloseClicked = QtCore.Signal()\n\n def __init__(self, text, closable=False, fontSize=\"12px\"):\n self.dim = False\n self.fixedWidth = False\n self.fontSize = fontSize\n VerticalLabel.__init__(self, text, orientation='horizontal', forceWidth=False)\n self.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop|QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.dock = None\n self.updateStyle()\n self.setAutoFillBackground(False)\n self.mouseMoved = False\n\n self.closeButton = None\n if closable:\n self.closeButton = QtWidgets.QToolButton(self)\n self.closeButton.clicked.connect(self.sigCloseClicked)\n self.closeButton.setIcon(QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_TitleBarCloseButton))\n\n def updateStyle(self):\n r = '3px'\n if self.dim:\n fg = '#aaa'\n bg = '#44a'\n border = '#339'\n else:\n fg = '#fff'\n bg = '#66c'\n border = '#55B'\n\n if self.orientation == 'vertical':\n self.vStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: 0px;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: %s;\n border-width: 0px;\n border-right: 2px solid %s;\n padding-top: 3px;\n padding-bottom: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.vStyle)\n else:\n self.hStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: %s;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: 0px;\n border-width: 0px;\n border-bottom: 2px solid %s;\n padding-left: 3px;\n padding-right: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.hStyle)\n\n def setDim(self, d):\n if self.dim != d:\n self.dim = d\n self.updateStyle()\n\n def setOrientation(self, o):\n VerticalLabel.setOrientation(self, o)\n self.updateStyle()\n\n def isClosable(self):\n return self.closeButton is not None\n\n def mousePressEvent(self, ev):\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.pressPos = lpos\n self.mouseMoved = False\n ev.accept()\n\n def mouseMoveEvent(self, ev):\n if not self.mouseMoved:\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.mouseMoved = (lpos - self.pressPos).manhattanLength() > QtWidgets.QApplication.startDragDistance()\n\n if self.mouseMoved and ev.buttons() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.startDrag()\n ev.accept()\n\n def mouseReleaseEvent(self, ev):\n ev.accept()\n if not self.mouseMoved:\n self.sigClicked.emit(self, ev)\n\n def mouseDoubleClickEvent(self, ev):\n if ev.button() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.float()\n\n def resizeEvent (self, ev):\n if self.closeButton:\n if self.orientation == 'vertical':\n size = ev.size().width()\n pos = QtCore.QPoint(0, 0)\n else:\n size = ev.size().height()\n pos = QtCore.QPoint(ev.size().width() - size, 0)\n self.closeButton.setFixedSize(QtCore.QSize(size, size))\n self.closeButton.move(pos)\n super(DockLabel,self).resizeEvent(ev)\n", "path": "pyqtgraph/dockarea/Dock.py"}], "after_files": [{"content": "import warnings\n\nfrom ..Qt import QtCore, QtGui, QtWidgets\nfrom ..widgets.VerticalLabel import VerticalLabel\nfrom .DockDrop import DockDrop\n\n\nclass Dock(QtWidgets.QWidget):\n\n sigStretchChanged = QtCore.Signal()\n sigClosed = QtCore.Signal(object)\n\n def __init__(self, name, area=None, size=(10, 10), widget=None, hideTitle=False, autoOrientation=True, label=None, **kargs):\n QtWidgets.QWidget.__init__(self)\n self.dockdrop = DockDrop(self)\n self._container = None\n self._name = name\n self.area = area\n self.label = label\n if self.label is None:\n self.label = DockLabel(name, **kargs)\n self.label.dock = self\n if self.label.isClosable():\n self.label.sigCloseClicked.connect(self.close)\n self.labelHidden = False\n self.moveLabel = True ## If false, the dock is no longer allowed to move the label.\n self.autoOrient = autoOrientation\n self.orientation = 'horizontal'\n #self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.topLayout = QtWidgets.QGridLayout()\n self.topLayout.setContentsMargins(0, 0, 0, 0)\n self.topLayout.setSpacing(0)\n self.setLayout(self.topLayout)\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea = QtWidgets.QWidget()\n self.topLayout.addWidget(self.widgetArea, 1, 1)\n self.layout = QtWidgets.QGridLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.setSpacing(0)\n self.widgetArea.setLayout(self.layout)\n self.widgetArea.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n self.widgets = []\n self.currentRow = 0\n #self.titlePos = 'top'\n self.dockdrop.raiseOverlay()\n self.hStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-top-right-radius: 0px;\n border-top-width: 0px;\n }\"\"\"\n self.vStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-bottom-left-radius: 0px;\n border-left-width: 0px;\n }\"\"\"\n self.nStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n }\"\"\"\n self.dragStyle = \"\"\"\n Dock > QWidget {\n border: 4px solid #00F;\n border-radius: 5px;\n }\"\"\"\n self.setAutoFillBackground(False)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n self.setStretch(*size)\n\n if widget is not None:\n self.addWidget(widget)\n\n if hideTitle:\n self.hideTitleBar()\n\n def implements(self, name=None):\n if name is None:\n return ['dock']\n else:\n return name == 'dock'\n\n def setStretch(self, x=None, y=None):\n \"\"\"\n Set the 'target' size for this Dock.\n The actual size will be determined by comparing this Dock's\n stretch value to the rest of the docks it shares space with.\n \"\"\"\n if x is None:\n x = 0\n if y is None:\n y = 0\n self._stretch = (x, y)\n self.sigStretchChanged.emit()\n \n def stretch(self):\n return self._stretch\n\n def hideTitleBar(self):\n \"\"\"\n Hide the title bar for this Dock.\n This will prevent the Dock being moved by the user.\n \"\"\"\n self.label.hide()\n self.labelHidden = True\n self.dockdrop.removeAllowedArea('center')\n self.updateStyle()\n\n def showTitleBar(self):\n \"\"\"\n Show the title bar for this Dock.\n \"\"\"\n self.label.show()\n self.labelHidden = False\n self.dockdrop.addAllowedArea('center')\n self.updateStyle()\n\n def title(self):\n \"\"\"\n Gets the text displayed in the title bar for this dock.\n \"\"\"\n return self.label.text()\n\n def setTitle(self, text):\n \"\"\"\n Sets the text displayed in title bar for this Dock.\n \"\"\"\n self.label.setText(text)\n\n def setOrientation(self, o='auto', force=False):\n \"\"\"\n Sets the orientation of the title bar for this Dock.\n Must be one of 'auto', 'horizontal', or 'vertical'.\n By default ('auto'), the orientation is determined\n based on the aspect ratio of the Dock.\n \"\"\"\n # setOrientation may be called before the container is set in some cases\n # (via resizeEvent), so there's no need to do anything here until called\n # again by containerChanged\n if self.container() is None:\n return\n\n if o == 'auto' and self.autoOrient:\n if self.container().type() == 'tab':\n o = 'horizontal'\n elif self.width() > self.height()*1.5:\n o = 'vertical'\n else:\n o = 'horizontal'\n if force or self.orientation != o:\n self.orientation = o\n self.label.setOrientation(o)\n self.updateStyle()\n\n def updateStyle(self):\n ## updates orientation and appearance of title bar\n if self.labelHidden:\n self.widgetArea.setStyleSheet(self.nStyle)\n elif self.orientation == 'vertical':\n self.label.setOrientation('vertical')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 1, 0)\n self.widgetArea.setStyleSheet(self.vStyle)\n else:\n self.label.setOrientation('horizontal')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n def resizeEvent(self, ev):\n self.setOrientation()\n self.dockdrop.resizeOverlay(self.size())\n\n def name(self):\n return self._name\n\n def addWidget(self, widget, row=None, col=0, rowspan=1, colspan=1):\n \"\"\"\n Add a new widget to the interior of this Dock.\n Each Dock uses a QGridLayout to arrange widgets within.\n \"\"\"\n if row is None:\n row = self.currentRow\n self.currentRow = max(row+1, self.currentRow)\n self.widgets.append(widget)\n self.layout.addWidget(widget, row, col, rowspan, colspan)\n self.dockdrop.raiseOverlay()\n \n def startDrag(self):\n self.drag = QtGui.QDrag(self)\n mime = QtCore.QMimeData()\n self.drag.setMimeData(mime)\n self.widgetArea.setStyleSheet(self.dragStyle)\n self.update()\n action = self.drag.exec() if hasattr(self.drag, 'exec') else self.drag.exec_()\n self.updateStyle()\n\n def float(self):\n self.area.floatDock(self)\n \n def container(self):\n return self._container\n\n def containerChanged(self, c):\n if self._container is not None:\n # ask old container to close itself if it is no longer needed\n self._container.apoptose(propagate=False)\n self._container = c\n if c is None:\n self.area = None\n else:\n self.area = c.area\n if c.type() != 'tab':\n self.moveLabel = True\n self.label.setDim(False)\n else:\n self.moveLabel = False\n \n self.setOrientation(force=True)\n\n def raiseDock(self):\n \"\"\"If this Dock is stacked underneath others, raise it to the top.\"\"\"\n self.container().raiseDock(self)\n\n def close(self):\n \"\"\"Remove this dock from the DockArea it lives inside.\"\"\"\n if self._container is None:\n warnings.warn(f\"Cannot close dock {self} because it is not open.\", RuntimeWarning, stacklevel=2)\n return\n\n self.setParent(None)\n QtWidgets.QLabel.close(self.label)\n self.label.setParent(None)\n self._container.apoptose()\n self._container = None\n self.sigClosed.emit(self)\n\n def __repr__(self):\n return \"<Dock %s %s>\" % (self.name(), self.stretch())\n\n def dragEnterEvent(self, *args):\n self.dockdrop.dragEnterEvent(*args)\n\n def dragMoveEvent(self, *args):\n self.dockdrop.dragMoveEvent(*args)\n\n def dragLeaveEvent(self, *args):\n self.dockdrop.dragLeaveEvent(*args)\n\n def dropEvent(self, *args):\n self.dockdrop.dropEvent(*args)\n\n\nclass DockLabel(VerticalLabel):\n\n sigClicked = QtCore.Signal(object, object)\n sigCloseClicked = QtCore.Signal()\n\n def __init__(self, text, closable=False, fontSize=\"12px\"):\n self.dim = False\n self.fixedWidth = False\n self.fontSize = fontSize\n VerticalLabel.__init__(self, text, orientation='horizontal', forceWidth=False)\n self.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop|QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.dock = None\n self.updateStyle()\n self.setAutoFillBackground(False)\n self.mouseMoved = False\n\n self.closeButton = None\n if closable:\n self.closeButton = QtWidgets.QToolButton(self)\n self.closeButton.clicked.connect(self.sigCloseClicked)\n self.closeButton.setIcon(QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_TitleBarCloseButton))\n\n def updateStyle(self):\n r = '3px'\n if self.dim:\n fg = '#aaa'\n bg = '#44a'\n border = '#339'\n else:\n fg = '#fff'\n bg = '#66c'\n border = '#55B'\n\n if self.orientation == 'vertical':\n self.vStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: 0px;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: %s;\n border-width: 0px;\n border-right: 2px solid %s;\n padding-top: 3px;\n padding-bottom: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.vStyle)\n else:\n self.hStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: %s;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: 0px;\n border-width: 0px;\n border-bottom: 2px solid %s;\n padding-left: 3px;\n padding-right: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.hStyle)\n\n def setDim(self, d):\n if self.dim != d:\n self.dim = d\n self.updateStyle()\n\n def setOrientation(self, o):\n VerticalLabel.setOrientation(self, o)\n self.updateStyle()\n\n def isClosable(self):\n return self.closeButton is not None\n\n def mousePressEvent(self, ev):\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.pressPos = lpos\n self.mouseMoved = False\n ev.accept()\n\n def mouseMoveEvent(self, ev):\n if not self.mouseMoved:\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.mouseMoved = (lpos - self.pressPos).manhattanLength() > QtWidgets.QApplication.startDragDistance()\n\n if self.mouseMoved and ev.buttons() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.startDrag()\n ev.accept()\n\n def mouseReleaseEvent(self, ev):\n ev.accept()\n if not self.mouseMoved:\n self.sigClicked.emit(self, ev)\n\n def mouseDoubleClickEvent(self, ev):\n if ev.button() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.float()\n\n def resizeEvent (self, ev):\n if self.closeButton:\n if self.orientation == 'vertical':\n size = ev.size().width()\n pos = QtCore.QPoint(0, 0)\n else:\n size = ev.size().height()\n pos = QtCore.QPoint(ev.size().width() - size, 0)\n self.closeButton.setFixedSize(QtCore.QSize(size, size))\n self.closeButton.move(pos)\n super(DockLabel,self).resizeEvent(ev)\n", "path": "pyqtgraph/dockarea/Dock.py"}]} |
gh_patches_debug_1438 | rasdani/github-patches | git_diff | interactions-py__interactions.py-89 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Subcommands are registered to guilds where there are not allowed
I've noticed that when I use `guild_ids` on a subcommand for a command which is shared between multiple guilds, that subcommand is registered to all guilds where one of its subcommands is allowed.
## Steps
1. Register a subcommand `s1` for command `c` for **Guild A**:
```python
@slash.subcommand(
base="c",
name="s1",
guilds_ids=[GUILD_A_ID],
)
def _handle(ctx):
# ....
```
1. Register a subcommand `s2` for command `c` for **Guild B**:
```python
@slash.subcommand(
base="c",
name="s2",
guilds_ids=[GUILD_A_ID],
)
def _handle(ctx):
# ....
```
## Expected behavior
**Guild A** has `/c s1` command only and **Guild B** has `/c s2` command only.
## Actual behavior
**Guild A** has `/c s1` and `/c s2` but only can use `/c s1`, and **Guild B** has `/c s1` and `/c s2` but only can use `/c s2`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord_slash/client.py`
Content:
```
1 import logging
2 import typing
3 import discord
4 from inspect import iscoroutinefunction, getdoc
5 from discord.ext import commands
6 from . import http
7 from . import model
8 from . import error
9 from . import context
10 from .utils import manage_commands
11
12
13 class SlashCommand:
14 """
15 Slash command extension class.
16
17 :param client: discord.py Client or Bot instance.
18 :type client: Union[discord.Client, discord.ext.commands.Bot]
19 :param auto_register: Whether to register commands automatically. Default `False`.
20 :type auto_register: bool
21 :param override_type: Whether to override checking type of the client and try register event.
22 :type override_type: bool
23
24 :ivar _discord: Discord client of this client.
25 :ivar commands: Dictionary of the registered commands via :func:`.slash` decorator.
26 :ivar req: :class:`.http.SlashCommandRequest` of this client.
27 :ivar logger: Logger of this client.
28 :ivar auto_register: Whether to register commands automatically.
29 :ivar auto_delete: Whether to delete commands not found in the project automatically.
30 :ivar has_listener: Whether discord client has listener add function.
31 """
32
33 def __init__(self,
34 client: typing.Union[discord.Client, commands.Bot],
35 auto_register: bool = False,
36 auto_delete: bool = False,
37 override_type: bool = False):
38 self._discord = client
39 self.commands = {}
40 self.subcommands = {}
41 self.logger = logging.getLogger("discord_slash")
42 self.req = http.SlashCommandRequest(self.logger, self._discord)
43 self.auto_register = auto_register
44 self.auto_delete = auto_delete
45
46 if self.auto_register and self.auto_delete:
47 self._discord.loop.create_task(self.sync_all_commands())
48 elif self.auto_register:
49 self._discord.loop.create_task(self.register_all_commands())
50 elif self.auto_delete:
51 self._discord.loop.create_task(self.delete_unused_commands())
52
53 if not isinstance(client, commands.Bot) and not isinstance(client,
54 commands.AutoShardedBot) and not override_type:
55 self.logger.info("Detected discord.Client! Overriding on_socket_response.")
56 self._discord.on_socket_response = self.on_socket_response
57 self.has_listener = False
58 else:
59 if not hasattr(self._discord, 'slash'):
60 self._discord.slash = self
61 else:
62 raise error.DuplicateSlashClient("You can't have duplicate SlashCommand instances!")
63
64 self._discord.add_listener(self.on_socket_response)
65 self.has_listener = True
66 default_add_function = self._discord.add_cog
67 def override_add_cog(cog: commands.Cog):
68 default_add_function(cog)
69 self.get_cog_commands(cog)
70 self._discord.add_cog = override_add_cog
71 default_remove_function = self._discord.remove_cog
72 def override_remove_cog(name: str):
73 cog = self._discord.get_cog(name)
74 if cog is None:
75 return
76 self.remove_cog_commands(cog)
77 default_remove_function(name)
78 self._discord.remove_cog = override_remove_cog
79
80
81
82 def get_cog_commands(self, cog: commands.Cog):
83 """
84 Gets slash command from :class:`discord.ext.commands.Cog`.
85
86 .. note::
87 Since version ``1.0.9``, this gets called automatically during cog initialization.
88
89 :param cog: Cog that has slash commands.
90 :type cog: discord.ext.commands.Cog
91 """
92 if hasattr(cog, '_slash_registered'): # Temporary warning
93 return self.logger.warning("Calling get_cog_commands is no longer required "
94 "to add cog slash commands. Make sure to remove all calls to this function.")
95 cog._slash_registered = True # Assuming all went well
96 func_list = [getattr(cog, x) for x in dir(cog)]
97 res = [x for x in func_list if isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]
98 for x in res:
99 x.cog = cog
100 if isinstance(x, model.CogCommandObject):
101 if x.name in self.commands:
102 raise error.DuplicateCommand(x.name)
103 self.commands[x.name] = x
104 else:
105 if x.base in self.commands:
106 for i in self.commands[x.base].allowed_guild_ids:
107 if i not in x.allowed_guild_ids:
108 x.allowed_guild_ids.append(i)
109 self.commands[x.base].has_subcommands = True
110 else:
111 _cmd = {
112 "func": None,
113 "description": x.base_description,
114 "auto_convert": {},
115 "guild_ids": x.allowed_guild_ids,
116 "api_options": [],
117 "has_subcommands": True
118 }
119 self.commands[x.base] = model.CommandObject(x.base, _cmd)
120 if x.base not in self.subcommands:
121 self.subcommands[x.base] = {}
122 if x.subcommand_group:
123 if x.subcommand_group not in self.subcommands[x.base]:
124 self.subcommands[x.base][x.subcommand_group] = {}
125 if x.name in self.subcommands[x.base][x.subcommand_group]:
126 raise error.DuplicateCommand(f"{x.base} {x.subcommand_group} {x.name}")
127 self.subcommands[x.base][x.subcommand_group][x.name] = x
128 else:
129 if x.name in self.subcommands[x.base]:
130 raise error.DuplicateCommand(f"{x.base} {x.name}")
131 self.subcommands[x.base][x.name] = x
132
133 def remove_cog_commands(self, cog):
134 """
135 Removes slash command from :class:`discord.ext.commands.Cog`.
136
137 .. note::
138 Since version ``1.0.9``, this gets called automatically during cog de-initialization.
139
140 :param cog: Cog that has slash commands.
141 :type cog: discord.ext.commands.Cog
142 """
143 if hasattr(cog, '_slash_registered'):
144 del cog._slash_registered
145 func_list = [getattr(cog, x) for x in dir(cog)]
146 res = [x for x in func_list if
147 isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]
148 for x in res:
149 if isinstance(x, model.CogCommandObject):
150 if x.name not in self.commands:
151 continue # Just in case it is removed due to subcommand.
152 if x.name in self.subcommands:
153 self.commands[x.name].func = None
154 continue # Let's remove completely when every subcommand is removed.
155 del self.commands[x.name]
156 else:
157 if x.base not in self.subcommands:
158 continue # Just in case...
159 if x.subcommand_group:
160 del self.subcommands[x.base][x.subcommand_group][x.name]
161 if not self.subcommands[x.base][x.subcommand_group]:
162 del self.subcommands[x.base][x.subcommand_group]
163 else:
164 del self.subcommands[x.base][x.name]
165 if not self.subcommands[x.base]:
166 del self.subcommands[x.base]
167 if x.base in self.commands:
168 if self.commands[x.base].func:
169 self.commands[x.base].has_subcommands = False
170 else:
171 del self.commands[x.base]
172
173 async def to_dict(self):
174 """
175 Converts all commands currently registered to :class:`SlashCommand` to a dictionary.
176 Returns a dictionary in the format:
177
178 .. code-block:: python
179
180 {
181 "global" : [], # list of global commands
182 "guild" : {
183 0000: [] # list of commands in the guild 0000
184 }
185 }
186
187 Commands are in the format specified by discord `here <https://discord.com/developers/docs/interactions/slash-commands#applicationcommand>`_
188 """
189 await self._discord.wait_until_ready() # In case commands are still not registered to SlashCommand.
190 commands = {
191 "global": [],
192 "guild": {}
193 }
194 for x in self.commands:
195 selected = self.commands[x]
196 if selected.has_subcommands and selected.func:
197 # Registering both subcommand and command with same base name / name
198 # will result in only subcommand being registered,
199 # so we will warn this at registering subcommands.
200 self.logger.warning(f"Detected command name with same subcommand base name! "
201 f"This command will only have subcommand: {x}")
202
203 options = []
204 if selected.has_subcommands:
205 tgt = self.subcommands[x]
206 for y in tgt:
207 sub = tgt[y]
208 if isinstance(sub, model.SubcommandObject):
209 _dict = {
210 "name": sub.name,
211 "description": sub.description or "No Description.",
212 "type": model.SlashCommandOptionType.SUB_COMMAND,
213 "options": sub.options or []
214 }
215 options.append(_dict)
216 else:
217 base_dict = {
218 "name": y,
219 "description": "No Description.",
220 "type": model.SlashCommandOptionType.SUB_COMMAND_GROUP,
221 "options": []
222 }
223 for z in sub:
224 sub_sub = sub[z]
225 _dict = {
226 "name": sub_sub.name,
227 "description": sub_sub.description or "No Description.",
228 "type": model.SlashCommandOptionType.SUB_COMMAND,
229 "options": sub_sub.options or []
230 }
231 base_dict["options"].append(_dict)
232 if sub_sub.subcommand_group_description:
233 base_dict["description"] = sub_sub.subcommand_group_description
234 options.append(base_dict)
235
236 command_dict = {
237 "name": x,
238 "description": selected.description or "No Description.",
239 "options": selected.options if not options else options
240 }
241 if selected.allowed_guild_ids:
242 for y in selected.allowed_guild_ids:
243 try:
244 commands["guild"][y].append(command_dict)
245 except KeyError:
246 commands["guild"][y] = [command_dict]
247 else:
248 commands["global"].append(command_dict)
249
250 return commands
251
252 async def sync_all_commands(self, delete_from_unused_guilds = True):
253 """
254 Matches commands registered on Discord to commands registered here.
255 Deletes any commands on Discord but not here, and registers any not on Discord.
256 This is done with a `put` request.
257 If ``auto_register`` and ``auto_delete`` are ``True`` then this will be automatically called.
258
259 :param delete_from_unused_guilds: If the bot should make a request to set no commands for guilds that haven't got any commands regestered in :class:``SlashCommand``
260 """
261 commands = await self.to_dict()
262 self.logger.info("Syncing commands...")
263 all_bot_guilds = [guild.id for guild in self._discord.guilds]
264 # This is an extremly bad way to do this, because slash cmds can be in guilds the bot isn't in
265 # But it's the only way until discord makes an endpoint to request all the guild with cmds registered.
266
267 await self.req.put_slash_commands(slash_commands = commands["global"], guild_id = None)
268
269 for guild in commands["guild"]:
270 await self.req.put_slash_commands(slash_commands = commands["guild"][guild], guild_id = guild)
271 all_bot_guilds.remove(guild)
272 if delete_from_unused_guilds:
273 for guild in all_bot_guilds:
274 await self.req.put_slash_commands(slash_commands=[], guild_id = guild)
275
276 self.logger.info("Completed syncing all commands!")
277
278 async def register_all_commands(self):
279 """
280 Registers all slash commands to Discord API.\n
281 If ``auto_register`` is ``True`` and ``auto_delete`` is ``False``, then this will be automatically called.
282 """
283 self.logger.info("Registering commands...")
284 commands = await self.to_dict()
285 for command in commands["global"]:
286 name = command.pop('name')
287 self.logger.debug(f"Registering global command {name}")
288 await self.req.add_slash_command(guild_id = None, cmd_name = name, **command)
289
290 for guild in commands["guild"]:
291 guild_cmds = commands["guild"][guild]
292 for command in guild_cmds:
293 name = command.pop('name')
294 self.logger.debug(f"Registering guild command {name} in guild: {guild}")
295 await self.req.add_slash_command(guild_id = guild, cmd_name = name, **command)
296 self.logger.info("Completed registering all commands!")
297
298 async def delete_unused_commands(self):
299 """
300 Unregisters all slash commands which are not used by the project to Discord API.\n
301 This might take some time because for every guild the bot is on an API call is made.\n
302 If ``auto_delete`` is ``True`` and ``auto_register`` is ``False``, then this will be automatically called.
303 """
304 await self._discord.wait_until_ready()
305 self.logger.info("Deleting unused commands...")
306 registered_commands = {}
307 global_commands = await self.req.get_all_commands(None)
308
309 for cmd in global_commands:
310 registered_commands[cmd["name"]] = {"id": cmd["id"], "guild_id": None}
311
312 for guild in self._discord.guilds:
313 # Since we can only get commands per guild we need to loop through every one
314 try:
315 guild_commands = await self.req.get_all_commands(guild.id)
316 except discord.Forbidden:
317 # In case a guild has not granted permissions to access commands
318 continue
319
320 for cmd in guild_commands:
321 registered_commands[cmd["name"]] = {"id": cmd["id"], "guild_id": guild.id}
322
323 for x in registered_commands:
324 if x not in self.commands:
325 # Delete command if not found locally
326 selected = registered_commands[x]
327 await self.req.remove_slash_command(selected["guild_id"], selected["id"])
328
329 self.logger.info("Completed deleting unused commands!")
330
331 def add_slash_command(self,
332 cmd,
333 name: str = None,
334 description: str = None,
335 auto_convert: dict = None,
336 guild_ids: typing.List[int] = None,
337 options: list = None,
338 has_subcommands: bool = False):
339 """
340 Registers slash command to SlashCommand.
341
342 :param cmd: Command Coroutine.
343 :type cmd: Coroutine
344 :param name: Name of the slash command. Default name of the coroutine.
345 :type name: str
346 :param description: Description of the slash command. Defaults to command docstring or ``None``.
347 :type description: str
348 :param auto_convert: Dictionary of how to convert option values. Default ``None``.
349 :type auto_convert: dict
350 :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.
351 :type guild_ids: List[int]
352 :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
353 :type options: list
354 :param has_subcommands: Whether it has subcommand. Default ``False``.
355 :type has_subcommands: bool
356 """
357 name = name or cmd.__name__
358 name = name.lower()
359 if name in self.commands:
360 tgt = self.commands[name]
361 if not tgt.has_subcommands:
362 raise error.DuplicateCommand(name)
363 has_subcommands = tgt.has_subcommands
364 for x in tgt.allowed_guild_ids:
365 if x not in guild_ids:
366 guild_ids.append(x)
367
368 description = description or getdoc(cmd)
369
370 if options is None:
371 options = manage_commands.generate_options(cmd, description)
372
373 if options:
374 auto_convert = manage_commands.generate_auto_convert(options)
375
376 _cmd = {
377 "func": cmd,
378 "description": description,
379 "auto_convert": auto_convert,
380 "guild_ids": guild_ids,
381 "api_options": options,
382 "has_subcommands": has_subcommands
383 }
384 self.commands[name] = model.CommandObject(name, _cmd)
385 self.logger.debug(f"Added command `{name}`")
386
387 def add_subcommand(self,
388 cmd,
389 base,
390 subcommand_group=None,
391 name=None,
392 description: str = None,
393 base_description: str = None,
394 subcommand_group_description: str = None,
395 auto_convert: dict = None,
396 guild_ids: typing.List[int] = None,
397 options: list = None):
398 """
399 Registers subcommand to SlashCommand.
400
401 :param cmd: Subcommand Coroutine.
402 :type cmd: Coroutine
403 :param base: Name of the base command.
404 :type base: str
405 :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.
406 :type subcommand_group: str
407 :param name: Name of the subcommand. Default name of the coroutine.
408 :type name: str
409 :param description: Description of the subcommand. Defaults to command docstring or ``None``.
410 :type description: str
411 :param base_description: Description of the base command. Default ``None``.
412 :type base_description: str
413 :param subcommand_group_description: Description of the subcommand_group. Default ``None``.
414 :type subcommand_group_description: str
415 :param auto_convert: Dictionary of how to convert option values. Default ``None``.
416 :type auto_convert: dict
417 :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.
418 :type guild_ids: List[int]
419 :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
420 :type options: list
421 """
422 base = base.lower()
423 subcommand_group = subcommand_group.lower() if subcommand_group else subcommand_group
424 name = name or cmd.__name__
425 name = name.lower()
426 description = description or getdoc(cmd)
427
428 if name in self.commands:
429 tgt = self.commands[name]
430 for x in tgt.allowed_guild_ids:
431 if x not in guild_ids:
432 guild_ids.append(x)
433
434 if options is None:
435 options = manage_commands.generate_options(cmd, description)
436
437 if options:
438 auto_convert = manage_commands.generate_auto_convert(options)
439
440 _cmd = {
441 "func": None,
442 "description": base_description,
443 "auto_convert": {},
444 "guild_ids": guild_ids,
445 "api_options": [],
446 "has_subcommands": True
447 }
448 _sub = {
449 "func": cmd,
450 "name": name,
451 "description": description,
452 "base_desc": base_description,
453 "sub_group_desc": subcommand_group_description,
454 "auto_convert": auto_convert,
455 "guild_ids": guild_ids,
456 "api_options": options
457 }
458 if base not in self.commands:
459 self.commands[base] = model.CommandObject(base, _cmd)
460 else:
461 self.commands[base].has_subcommands = True
462 self.commands[base].allowed_guild_ids = guild_ids
463 if self.commands[base].description:
464 _cmd["description"] = self.commands[base].description
465 if base not in self.subcommands:
466 self.subcommands[base] = {}
467 if subcommand_group:
468 if subcommand_group not in self.subcommands[base]:
469 self.subcommands[base][subcommand_group] = {}
470 if name in self.subcommands[base][subcommand_group]:
471 raise error.DuplicateCommand(f"{base} {subcommand_group} {name}")
472 self.subcommands[base][subcommand_group][name] = model.SubcommandObject(_sub, base, name, subcommand_group)
473 else:
474 if name in self.subcommands[base]:
475 raise error.DuplicateCommand(f"{base} {name}")
476 self.subcommands[base][name] = model.SubcommandObject(_sub, base, name)
477 self.logger.debug(f"Added subcommand `{base} {subcommand_group or ''} {name or cmd.__name__}`")
478
479 def slash(self,
480 *,
481 name: str = None,
482 description: str = None,
483 auto_convert: dict = None,
484 guild_id: int = None,
485 guild_ids: typing.List[int] = None,
486 options: typing.List[dict] = None):
487 """
488 Decorator that registers coroutine as a slash command.\n
489 All decorator args must be passed as keyword-only args.\n
490 1 arg for command coroutine is required for ctx(:class:`.model.SlashContext`),
491 and if your slash command has some args, then those args are also required.\n
492 All args must be passed as keyword-args.
493
494 .. note::
495 Role, User, and Channel types are passed as id if you don't set ``auto_convert``, since API doesn't give type of the option for now.\n
496 Also, if ``options`` is passed, then ``auto_convert`` will be automatically created or overrided.
497
498 .. warning::
499 Unlike discord.py's command, ``*args``, keyword-only args, converters, etc. are NOT supported.
500
501 Example:
502
503 .. code-block:: python
504
505 @slash.slash(name="ping")
506 async def _slash(ctx): # Normal usage.
507 await ctx.send(content=f"Pong! (`{round(bot.latency*1000)}`ms)")
508
509
510 @slash.slash(name="pick")
511 async def _pick(ctx, choice1, choice2): # Command with 1 or more args.
512 await ctx.send(content=str(random.choice([choice1, choice2])))
513
514 Example of formatting ``auto_convert``:
515
516 .. code-block:: python
517
518 {"option_role": "role", # For key put name of the option and for value put type of the option.
519 "option_user": SlashCommandOptionType.USER, # Also can use an enumeration member for the type
520 "option_user_two": 6, # or number
521 "option_channel": "CHANNEL"} # or upper case string.
522
523 :param name: Name of the slash command. Default name of the coroutine.
524 :type name: str
525 :param description: Description of the slash command. Default ``None``.
526 :type description: str
527 :param auto_convert: Dictionary of how to convert option values. Default ``None``.
528 :type auto_convert: dict
529 :param guild_id: Deprecated. Use ``guild_ids`` instead.
530 :type guild_id: int
531 :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.
532 :type guild_ids: List[int]
533 :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
534 :type options: List[dict]
535 """
536 if guild_id:
537 self.logger.warning("`guild_id` is deprecated! `Use guild_ids` instead.")
538 guild_ids = [guild_id]
539
540 def wrapper(cmd):
541 self.add_slash_command(cmd, name, description, auto_convert, guild_ids, options)
542 return cmd
543
544 return wrapper
545
546 def subcommand(self,
547 *,
548 base,
549 subcommand_group=None,
550 name=None,
551 description: str = None,
552 base_description: str = None,
553 base_desc: str = None,
554 subcommand_group_description: str = None,
555 sub_group_desc: str = None,
556 auto_convert: dict = None,
557 guild_ids: typing.List[int] = None,
558 options: typing.List[dict] = None):
559 """
560 Decorator that registers subcommand.\n
561 Unlike discord.py, you don't need base command.\n
562 All args must be passed as keyword-args.
563
564 Example:
565
566 .. code-block:: python
567
568 # /group say <str>
569 @slash.subcommand(base="group", name="say")
570 async def _group_say(ctx, _str):
571 await ctx.send(content=_str)
572
573 # /group kick user <user>
574 @slash.subcommand(base="group",
575 subcommand_group="kick",
576 name="user",
577 auto_convert={"user": "user"})
578 async def _group_kick_user(ctx, user):
579 ...
580
581 :param base: Name of the base command.
582 :type base: str
583 :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.
584 :type subcommand_group: str
585 :param name: Name of the subcommand. Default name of the coroutine.
586 :type name: str
587 :param description: Description of the subcommand. Default ``None``.
588 :type description: str
589 :param base_description: Description of the base command. Default ``None``.
590 :type base_description: str
591 :param base_desc: Alias of ``base_description``.
592 :param subcommand_group_description: Description of the subcommand_group. Default ``None``.
593 :type subcommand_group_description: str
594 :param sub_group_desc: Alias of ``subcommand_group_description``.
595 :param auto_convert: Dictionary of how to convert option values. Default ``None``.
596 :type auto_convert: dict
597 :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.
598 :type guild_ids: List[int]
599 :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
600 :type options: List[dict]
601 """
602 base_description = base_description or base_desc
603 subcommand_group_description = subcommand_group_description or sub_group_desc
604
605 def wrapper(cmd):
606 self.add_subcommand(cmd, base, subcommand_group, name, description, base_description, subcommand_group_description, auto_convert, guild_ids, options)
607 return cmd
608
609 return wrapper
610
611 async def process_options(self, guild: discord.Guild, options: list, auto_convert: dict) -> list:
612 """
613 Processes Role, User, and Channel option types to discord.py's models.
614
615 :param guild: Guild of the command message.
616 :type guild: discord.Guild
617 :param options: Dict of options.
618 :type options: list
619 :param auto_convert: Dictionary of how to convert option values.
620 :type auto_convert: dict
621 :return: list
622 """
623 if not guild:
624 self.logger.info("This command invoke is missing guild. Skipping option process.")
625 return [x["value"] for x in options]
626
627 if not isinstance(guild, discord.Guild):
628 return [x["value"] for x in options]
629
630 if not auto_convert:
631 return [x["value"] for x in options]
632
633 converters = [
634 [guild.get_member, guild.fetch_member],
635 guild.get_channel,
636 guild.get_role
637 ]
638
639 types = {
640 "user": 0,
641 "USER": 0,
642 model.SlashCommandOptionType.USER: 0,
643 "6": 0,
644 6: 0,
645 "channel": 1,
646 "CHANNEL": 1,
647 model.SlashCommandOptionType.CHANNEL: 1,
648 "7": 1,
649 7: 1,
650 "role": 2,
651 "ROLE": 2,
652 model.SlashCommandOptionType.ROLE: 2,
653 8: 2,
654 "8": 2
655 }
656
657 to_return = []
658
659 for x in options:
660 selected = x
661 if selected["name"] in auto_convert:
662 if auto_convert[selected["name"]] not in types:
663 to_return.append(selected["value"])
664 continue
665 loaded_converter = converters[types[auto_convert[selected["name"]]]]
666 if isinstance(loaded_converter, list):
667 cache_first = loaded_converter[0](int(selected["value"]))
668 if cache_first:
669 to_return.append(cache_first)
670 continue
671 loaded_converter = loaded_converter[1]
672 try:
673 to_return.append(await loaded_converter(int(selected["value"]))) \
674 if iscoroutinefunction(loaded_converter) else \
675 to_return.append(loaded_converter(int(selected["value"])))
676 except (discord.Forbidden, discord.HTTPException):
677 self.logger.warning("Failed fetching user! Passing ID instead.")
678 to_return.append(int(selected["value"]))
679 return to_return
680
681 async def on_socket_response(self, msg):
682 """
683 This event listener is automatically registered at initialization of this class.
684
685 .. warning::
686 DO NOT MANUALLY REGISTER, OVERRIDE, OR WHATEVER ACTION TO THIS COROUTINE UNLESS YOU KNOW WHAT YOU ARE DOING.
687
688 :param msg: Gateway message.
689 """
690 if msg["t"] != "INTERACTION_CREATE":
691 return
692
693 to_use = msg["d"]
694
695 if to_use["data"]["name"] in self.commands:
696
697 ctx = context.SlashContext(self.req, to_use, self._discord, self.logger)
698 cmd_name = to_use["data"]["name"]
699
700 if cmd_name not in self.commands and cmd_name in self.subcommands:
701 return await self.handle_subcommand(ctx, to_use)
702
703 selected_cmd = self.commands[to_use["data"]["name"]]
704
705 if selected_cmd.allowed_guild_ids:
706 guild_id = ctx.guild.id if isinstance(ctx.guild, discord.Guild) else ctx.guild
707
708 if guild_id not in selected_cmd.allowed_guild_ids:
709 return
710
711 if selected_cmd.has_subcommands and not selected_cmd.func:
712 return await self.handle_subcommand(ctx, to_use)
713
714 if "options" in to_use["data"]:
715 for x in to_use["data"]["options"]:
716 if "value" not in x:
717 return await self.handle_subcommand(ctx, to_use)
718
719 args = await self.process_options(ctx.guild, to_use["data"]["options"], selected_cmd.auto_convert) \
720 if "options" in to_use["data"] else []
721
722 self._discord.dispatch("slash_command", ctx)
723
724 try:
725 await selected_cmd.invoke(ctx, *args)
726 except Exception as ex:
727 await self.on_slash_command_error(ctx, ex)
728
729 async def handle_subcommand(self, ctx: context.SlashContext, data: dict):
730 """
731 Coroutine for handling subcommand.
732
733 .. warning::
734 Do not manually call this.
735
736 :param ctx: :class:`.model.SlashContext` instance.
737 :param data: Gateway message.
738 """
739 if data["data"]["name"] not in self.subcommands:
740 return
741 base = self.subcommands[data["data"]["name"]]
742 sub = data["data"]["options"][0]
743 sub_name = sub["name"]
744 if sub_name not in base:
745 return
746 ctx.subcommand = sub_name
747 sub_opts = sub["options"] if "options" in sub else []
748 for x in sub_opts:
749 if "options" in x or "value" not in x:
750 sub_group = x["name"]
751 if sub_group not in base[sub_name]:
752 return
753 ctx.subcommand_group = sub_group
754 selected = base[sub_name][sub_group]
755 args = await self.process_options(ctx.guild, x["options"], selected.auto_convert) \
756 if "options" in x else []
757 self._discord.dispatch("slash_command", ctx)
758 try:
759 await selected.invoke(ctx, *args)
760 except Exception as ex:
761 await self.on_slash_command_error(ctx, ex)
762 return
763 selected = base[sub_name]
764 args = await self.process_options(ctx.guild, sub_opts, selected.auto_convert) \
765 if "options" in sub else []
766 self._discord.dispatch("slash_command", ctx)
767 try:
768 await selected.invoke(ctx, *args)
769 except Exception as ex:
770 await self.on_slash_command_error(ctx, ex)
771
772 async def on_slash_command_error(self, ctx, ex):
773 """
774 Handles Exception occurred from invoking command.
775
776 Example of adding event:
777
778 .. code-block:: python
779
780 @client.event
781 async def on_slash_command_error(ctx, ex):
782 ...
783
784 Example of adding listener:
785
786 .. code-block:: python
787
788 @bot.listen()
789 async def on_slash_command_error(ctx, ex):
790 ...
791
792 :param ctx: Context of the command.
793 :type ctx: :class:`.model.SlashContext`
794 :param ex: Exception from the command invoke.
795 :type ex: Exception
796 :return:
797 """
798 if self.has_listener:
799 if self._discord.extra_events.get('on_slash_command_error'):
800 self._discord.dispatch("slash_command_error", ctx, ex)
801 return
802 if hasattr(self._discord, "on_slash_command_error"):
803 self._discord.dispatch("slash_command_error", ctx, ex)
804 return
805 # Prints exception if not overrided or has no listener for error.
806 self.logger.exception(f"An exception has occurred while executing command `{ctx.name}`:")
807
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord_slash/client.py b/discord_slash/client.py
--- a/discord_slash/client.py
+++ b/discord_slash/client.py
@@ -425,8 +425,8 @@
name = name.lower()
description = description or getdoc(cmd)
- if name in self.commands:
- tgt = self.commands[name]
+ if base in self.commands:
+ tgt = self.commands[base]
for x in tgt.allowed_guild_ids:
if x not in guild_ids:
guild_ids.append(x)
| {"golden_diff": "diff --git a/discord_slash/client.py b/discord_slash/client.py\n--- a/discord_slash/client.py\n+++ b/discord_slash/client.py\n@@ -425,8 +425,8 @@\n name = name.lower()\n description = description or getdoc(cmd)\n \n- if name in self.commands:\n- tgt = self.commands[name]\n+ if base in self.commands:\n+ tgt = self.commands[base]\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n", "issue": "Subcommands are registered to guilds where there are not allowed\nI've noticed that when I use `guild_ids` on a subcommand for a command which is shared between multiple guilds, that subcommand is registered to all guilds where one of its subcommands is allowed.\r\n\r\n## Steps\r\n1. Register a subcommand `s1` for command `c` for **Guild A**: \r\n ```python\r\n @slash.subcommand(\r\n base=\"c\",\r\n name=\"s1\",\r\n guilds_ids=[GUILD_A_ID],\r\n )\r\n def _handle(ctx):\r\n # ....\r\n ```\r\n1. Register a subcommand `s2` for command `c` for **Guild B**: \r\n ```python\r\n @slash.subcommand(\r\n base=\"c\",\r\n name=\"s2\",\r\n guilds_ids=[GUILD_A_ID],\r\n )\r\n def _handle(ctx):\r\n # ....\r\n ```\r\n\r\n## Expected behavior\r\n**Guild A** has `/c s1` command only and **Guild B** has `/c s2` command only.\r\n\r\n## Actual behavior\r\n**Guild A** has `/c s1` and `/c s2` but only can use `/c s1`, and **Guild B** has `/c s1` and `/c s2` but only can use `/c s2`.\n", "before_files": [{"content": "import logging\nimport typing\nimport discord\nfrom inspect import iscoroutinefunction, getdoc\nfrom discord.ext import commands\nfrom . import http\nfrom . import model\nfrom . import error\nfrom . import context\nfrom .utils import manage_commands\n\n\nclass SlashCommand:\n \"\"\"\n Slash command extension class.\n\n :param client: discord.py Client or Bot instance.\n :type client: Union[discord.Client, discord.ext.commands.Bot]\n :param auto_register: Whether to register commands automatically. Default `False`.\n :type auto_register: bool\n :param override_type: Whether to override checking type of the client and try register event.\n :type override_type: bool\n\n :ivar _discord: Discord client of this client.\n :ivar commands: Dictionary of the registered commands via :func:`.slash` decorator.\n :ivar req: :class:`.http.SlashCommandRequest` of this client.\n :ivar logger: Logger of this client.\n :ivar auto_register: Whether to register commands automatically.\n :ivar auto_delete: Whether to delete commands not found in the project automatically.\n :ivar has_listener: Whether discord client has listener add function.\n \"\"\"\n\n def __init__(self,\n client: typing.Union[discord.Client, commands.Bot],\n auto_register: bool = False,\n auto_delete: bool = False,\n override_type: bool = False):\n self._discord = client\n self.commands = {}\n self.subcommands = {}\n self.logger = logging.getLogger(\"discord_slash\")\n self.req = http.SlashCommandRequest(self.logger, self._discord)\n self.auto_register = auto_register\n self.auto_delete = auto_delete\n\n if self.auto_register and self.auto_delete:\n self._discord.loop.create_task(self.sync_all_commands())\n elif self.auto_register:\n self._discord.loop.create_task(self.register_all_commands())\n elif self.auto_delete:\n self._discord.loop.create_task(self.delete_unused_commands())\n \n if not isinstance(client, commands.Bot) and not isinstance(client,\n commands.AutoShardedBot) and not override_type:\n self.logger.info(\"Detected discord.Client! Overriding on_socket_response.\")\n self._discord.on_socket_response = self.on_socket_response\n self.has_listener = False\n else:\n if not hasattr(self._discord, 'slash'):\n self._discord.slash = self\n else:\n raise error.DuplicateSlashClient(\"You can't have duplicate SlashCommand instances!\")\n \n self._discord.add_listener(self.on_socket_response)\n self.has_listener = True\n default_add_function = self._discord.add_cog\n def override_add_cog(cog: commands.Cog):\n default_add_function(cog)\n self.get_cog_commands(cog)\n self._discord.add_cog = override_add_cog\n default_remove_function = self._discord.remove_cog\n def override_remove_cog(name: str):\n cog = self._discord.get_cog(name)\n if cog is None:\n return\n self.remove_cog_commands(cog)\n default_remove_function(name)\n self._discord.remove_cog = override_remove_cog\n \n \n\n def get_cog_commands(self, cog: commands.Cog):\n \"\"\"\n Gets slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'): # Temporary warning\n return self.logger.warning(\"Calling get_cog_commands is no longer required \"\n \"to add cog slash commands. Make sure to remove all calls to this function.\")\n cog._slash_registered = True # Assuming all went well\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n x.cog = cog\n if isinstance(x, model.CogCommandObject):\n if x.name in self.commands:\n raise error.DuplicateCommand(x.name)\n self.commands[x.name] = x\n else:\n if x.base in self.commands:\n for i in self.commands[x.base].allowed_guild_ids:\n if i not in x.allowed_guild_ids:\n x.allowed_guild_ids.append(i)\n self.commands[x.base].has_subcommands = True\n else:\n _cmd = {\n \"func\": None,\n \"description\": x.base_description,\n \"auto_convert\": {},\n \"guild_ids\": x.allowed_guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n self.commands[x.base] = model.CommandObject(x.base, _cmd)\n if x.base not in self.subcommands:\n self.subcommands[x.base] = {}\n if x.subcommand_group:\n if x.subcommand_group not in self.subcommands[x.base]:\n self.subcommands[x.base][x.subcommand_group] = {}\n if x.name in self.subcommands[x.base][x.subcommand_group]:\n raise error.DuplicateCommand(f\"{x.base} {x.subcommand_group} {x.name}\")\n self.subcommands[x.base][x.subcommand_group][x.name] = x\n else:\n if x.name in self.subcommands[x.base]:\n raise error.DuplicateCommand(f\"{x.base} {x.name}\")\n self.subcommands[x.base][x.name] = x\n\n def remove_cog_commands(self, cog):\n \"\"\"\n Removes slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog de-initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'):\n del cog._slash_registered\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if\n isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n if isinstance(x, model.CogCommandObject):\n if x.name not in self.commands:\n continue # Just in case it is removed due to subcommand.\n if x.name in self.subcommands:\n self.commands[x.name].func = None\n continue # Let's remove completely when every subcommand is removed.\n del self.commands[x.name]\n else:\n if x.base not in self.subcommands:\n continue # Just in case...\n if x.subcommand_group:\n del self.subcommands[x.base][x.subcommand_group][x.name]\n if not self.subcommands[x.base][x.subcommand_group]:\n del self.subcommands[x.base][x.subcommand_group]\n else:\n del self.subcommands[x.base][x.name]\n if not self.subcommands[x.base]:\n del self.subcommands[x.base]\n if x.base in self.commands:\n if self.commands[x.base].func:\n self.commands[x.base].has_subcommands = False\n else:\n del self.commands[x.base]\n\n async def to_dict(self):\n \"\"\"\n Converts all commands currently registered to :class:`SlashCommand` to a dictionary.\n Returns a dictionary in the format:\n\n .. code-block:: python\n\n {\n \"global\" : [], # list of global commands\n \"guild\" : {\n 0000: [] # list of commands in the guild 0000\n }\n }\n\n Commands are in the format specified by discord `here <https://discord.com/developers/docs/interactions/slash-commands#applicationcommand>`_\n \"\"\"\n await self._discord.wait_until_ready() # In case commands are still not registered to SlashCommand.\n commands = {\n \"global\": [],\n \"guild\": {}\n }\n for x in self.commands:\n selected = self.commands[x]\n if selected.has_subcommands and selected.func:\n # Registering both subcommand and command with same base name / name\n # will result in only subcommand being registered,\n # so we will warn this at registering subcommands.\n self.logger.warning(f\"Detected command name with same subcommand base name! \"\n f\"This command will only have subcommand: {x}\")\n \n options = []\n if selected.has_subcommands:\n tgt = self.subcommands[x]\n for y in tgt:\n sub = tgt[y]\n if isinstance(sub, model.SubcommandObject):\n _dict = {\n \"name\": sub.name,\n \"description\": sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub.options or []\n }\n options.append(_dict)\n else:\n base_dict = {\n \"name\": y,\n \"description\": \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND_GROUP,\n \"options\": []\n }\n for z in sub:\n sub_sub = sub[z]\n _dict = {\n \"name\": sub_sub.name,\n \"description\": sub_sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub_sub.options or []\n }\n base_dict[\"options\"].append(_dict)\n if sub_sub.subcommand_group_description:\n base_dict[\"description\"] = sub_sub.subcommand_group_description\n options.append(base_dict)\n\n command_dict = {\n \"name\": x,\n \"description\": selected.description or \"No Description.\",\n \"options\": selected.options if not options else options\n }\n if selected.allowed_guild_ids:\n for y in selected.allowed_guild_ids:\n try:\n commands[\"guild\"][y].append(command_dict)\n except KeyError:\n commands[\"guild\"][y] = [command_dict]\n else:\n commands[\"global\"].append(command_dict)\n\n return commands\n\n async def sync_all_commands(self, delete_from_unused_guilds = True):\n \"\"\"\n Matches commands registered on Discord to commands registered here.\n Deletes any commands on Discord but not here, and registers any not on Discord.\n This is done with a `put` request.\n If ``auto_register`` and ``auto_delete`` are ``True`` then this will be automatically called.\n\n :param delete_from_unused_guilds: If the bot should make a request to set no commands for guilds that haven't got any commands regestered in :class:``SlashCommand``\n \"\"\"\n commands = await self.to_dict()\n self.logger.info(\"Syncing commands...\")\n all_bot_guilds = [guild.id for guild in self._discord.guilds]\n # This is an extremly bad way to do this, because slash cmds can be in guilds the bot isn't in\n # But it's the only way until discord makes an endpoint to request all the guild with cmds registered.\n\n await self.req.put_slash_commands(slash_commands = commands[\"global\"], guild_id = None)\n \n for guild in commands[\"guild\"]:\n await self.req.put_slash_commands(slash_commands = commands[\"guild\"][guild], guild_id = guild)\n all_bot_guilds.remove(guild)\n if delete_from_unused_guilds:\n for guild in all_bot_guilds:\n await self.req.put_slash_commands(slash_commands=[], guild_id = guild)\n \n self.logger.info(\"Completed syncing all commands!\")\n\n async def register_all_commands(self):\n \"\"\"\n Registers all slash commands to Discord API.\\n\n If ``auto_register`` is ``True`` and ``auto_delete`` is ``False``, then this will be automatically called.\n \"\"\"\n self.logger.info(\"Registering commands...\")\n commands = await self.to_dict()\n for command in commands[\"global\"]:\n name = command.pop('name')\n self.logger.debug(f\"Registering global command {name}\")\n await self.req.add_slash_command(guild_id = None, cmd_name = name, **command)\n \n for guild in commands[\"guild\"]:\n guild_cmds = commands[\"guild\"][guild]\n for command in guild_cmds:\n name = command.pop('name')\n self.logger.debug(f\"Registering guild command {name} in guild: {guild}\")\n await self.req.add_slash_command(guild_id = guild, cmd_name = name, **command)\n self.logger.info(\"Completed registering all commands!\")\n\n async def delete_unused_commands(self):\n \"\"\"\n Unregisters all slash commands which are not used by the project to Discord API.\\n\n This might take some time because for every guild the bot is on an API call is made.\\n\n If ``auto_delete`` is ``True`` and ``auto_register`` is ``False``, then this will be automatically called.\n \"\"\"\n await self._discord.wait_until_ready()\n self.logger.info(\"Deleting unused commands...\")\n registered_commands = {}\n global_commands = await self.req.get_all_commands(None)\n\n for cmd in global_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": None}\n\n for guild in self._discord.guilds:\n # Since we can only get commands per guild we need to loop through every one\n try:\n guild_commands = await self.req.get_all_commands(guild.id)\n except discord.Forbidden:\n # In case a guild has not granted permissions to access commands\n continue\n\n for cmd in guild_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": guild.id}\n\n for x in registered_commands:\n if x not in self.commands:\n # Delete command if not found locally\n selected = registered_commands[x]\n await self.req.remove_slash_command(selected[\"guild_id\"], selected[\"id\"])\n\n self.logger.info(\"Completed deleting unused commands!\")\n\n def add_slash_command(self,\n cmd,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None,\n has_subcommands: bool = False):\n \"\"\"\n Registers slash command to SlashCommand.\n\n :param cmd: Command Coroutine.\n :type cmd: Coroutine\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Defaults to command docstring or ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n :param has_subcommands: Whether it has subcommand. Default ``False``.\n :type has_subcommands: bool\n \"\"\"\n name = name or cmd.__name__\n name = name.lower()\n if name in self.commands:\n tgt = self.commands[name]\n if not tgt.has_subcommands:\n raise error.DuplicateCommand(name)\n has_subcommands = tgt.has_subcommands\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n description = description or getdoc(cmd)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": cmd,\n \"description\": description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options,\n \"has_subcommands\": has_subcommands\n }\n self.commands[name] = model.CommandObject(name, _cmd)\n self.logger.debug(f\"Added command `{name}`\")\n\n def add_subcommand(self,\n cmd,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n subcommand_group_description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None):\n \"\"\"\n Registers subcommand to SlashCommand.\n\n :param cmd: Subcommand Coroutine.\n :type cmd: Coroutine\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Defaults to command docstring or ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n \"\"\"\n base = base.lower()\n subcommand_group = subcommand_group.lower() if subcommand_group else subcommand_group\n name = name or cmd.__name__\n name = name.lower()\n description = description or getdoc(cmd)\n\n if name in self.commands:\n tgt = self.commands[name]\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"auto_convert\": {},\n \"guild_ids\": guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n _sub = {\n \"func\": cmd,\n \"name\": name,\n \"description\": description,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options\n }\n if base not in self.commands:\n self.commands[base] = model.CommandObject(base, _cmd)\n else:\n self.commands[base].has_subcommands = True\n self.commands[base].allowed_guild_ids = guild_ids\n if self.commands[base].description:\n _cmd[\"description\"] = self.commands[base].description\n if base not in self.subcommands:\n self.subcommands[base] = {}\n if subcommand_group:\n if subcommand_group not in self.subcommands[base]:\n self.subcommands[base][subcommand_group] = {}\n if name in self.subcommands[base][subcommand_group]:\n raise error.DuplicateCommand(f\"{base} {subcommand_group} {name}\")\n self.subcommands[base][subcommand_group][name] = model.SubcommandObject(_sub, base, name, subcommand_group)\n else:\n if name in self.subcommands[base]:\n raise error.DuplicateCommand(f\"{base} {name}\")\n self.subcommands[base][name] = model.SubcommandObject(_sub, base, name)\n self.logger.debug(f\"Added subcommand `{base} {subcommand_group or ''} {name or cmd.__name__}`\")\n\n def slash(self,\n *,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_id: int = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers coroutine as a slash command.\\n\n All decorator args must be passed as keyword-only args.\\n\n 1 arg for command coroutine is required for ctx(:class:`.model.SlashContext`),\n and if your slash command has some args, then those args are also required.\\n\n All args must be passed as keyword-args.\n\n .. note::\n Role, User, and Channel types are passed as id if you don't set ``auto_convert``, since API doesn't give type of the option for now.\\n\n Also, if ``options`` is passed, then ``auto_convert`` will be automatically created or overrided.\n\n .. warning::\n Unlike discord.py's command, ``*args``, keyword-only args, converters, etc. are NOT supported.\n\n Example:\n\n .. code-block:: python\n\n @slash.slash(name=\"ping\")\n async def _slash(ctx): # Normal usage.\n await ctx.send(content=f\"Pong! (`{round(bot.latency*1000)}`ms)\")\n\n\n @slash.slash(name=\"pick\")\n async def _pick(ctx, choice1, choice2): # Command with 1 or more args.\n await ctx.send(content=str(random.choice([choice1, choice2])))\n\n Example of formatting ``auto_convert``:\n\n .. code-block:: python\n\n {\"option_role\": \"role\", # For key put name of the option and for value put type of the option.\n \"option_user\": SlashCommandOptionType.USER, # Also can use an enumeration member for the type\n \"option_user_two\": 6, # or number\n \"option_channel\": \"CHANNEL\"} # or upper case string.\n\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Default ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_id: Deprecated. Use ``guild_ids`` instead.\n :type guild_id: int\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n if guild_id:\n self.logger.warning(\"`guild_id` is deprecated! `Use guild_ids` instead.\")\n guild_ids = [guild_id]\n\n def wrapper(cmd):\n self.add_slash_command(cmd, name, description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n def subcommand(self,\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers subcommand.\\n\n Unlike discord.py, you don't need base command.\\n\n All args must be passed as keyword-args.\n\n Example:\n\n .. code-block:: python\n\n # /group say <str>\n @slash.subcommand(base=\"group\", name=\"say\")\n async def _group_say(ctx, _str):\n await ctx.send(content=_str)\n\n # /group kick user <user>\n @slash.subcommand(base=\"group\",\n subcommand_group=\"kick\",\n name=\"user\",\n auto_convert={\"user\": \"user\"})\n async def _group_kick_user(ctx, user):\n ...\n\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Default ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param base_desc: Alias of ``base_description``.\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param sub_group_desc: Alias of ``subcommand_group_description``.\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n self.add_subcommand(cmd, base, subcommand_group, name, description, base_description, subcommand_group_description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n async def process_options(self, guild: discord.Guild, options: list, auto_convert: dict) -> list:\n \"\"\"\n Processes Role, User, and Channel option types to discord.py's models.\n\n :param guild: Guild of the command message.\n :type guild: discord.Guild\n :param options: Dict of options.\n :type options: list\n :param auto_convert: Dictionary of how to convert option values.\n :type auto_convert: dict\n :return: list\n \"\"\"\n if not guild:\n self.logger.info(\"This command invoke is missing guild. Skipping option process.\")\n return [x[\"value\"] for x in options]\n\n if not isinstance(guild, discord.Guild):\n return [x[\"value\"] for x in options]\n\n if not auto_convert:\n return [x[\"value\"] for x in options]\n\n converters = [\n [guild.get_member, guild.fetch_member],\n guild.get_channel,\n guild.get_role\n ]\n\n types = {\n \"user\": 0,\n \"USER\": 0,\n model.SlashCommandOptionType.USER: 0,\n \"6\": 0,\n 6: 0,\n \"channel\": 1,\n \"CHANNEL\": 1,\n model.SlashCommandOptionType.CHANNEL: 1,\n \"7\": 1,\n 7: 1,\n \"role\": 2,\n \"ROLE\": 2,\n model.SlashCommandOptionType.ROLE: 2,\n 8: 2,\n \"8\": 2\n }\n\n to_return = []\n\n for x in options:\n selected = x\n if selected[\"name\"] in auto_convert:\n if auto_convert[selected[\"name\"]] not in types:\n to_return.append(selected[\"value\"])\n continue\n loaded_converter = converters[types[auto_convert[selected[\"name\"]]]]\n if isinstance(loaded_converter, list):\n cache_first = loaded_converter[0](int(selected[\"value\"]))\n if cache_first:\n to_return.append(cache_first)\n continue\n loaded_converter = loaded_converter[1]\n try:\n to_return.append(await loaded_converter(int(selected[\"value\"]))) \\\n if iscoroutinefunction(loaded_converter) else \\\n to_return.append(loaded_converter(int(selected[\"value\"])))\n except (discord.Forbidden, discord.HTTPException):\n self.logger.warning(\"Failed fetching user! Passing ID instead.\")\n to_return.append(int(selected[\"value\"]))\n return to_return\n\n async def on_socket_response(self, msg):\n \"\"\"\n This event listener is automatically registered at initialization of this class.\n\n .. warning::\n DO NOT MANUALLY REGISTER, OVERRIDE, OR WHATEVER ACTION TO THIS COROUTINE UNLESS YOU KNOW WHAT YOU ARE DOING.\n\n :param msg: Gateway message.\n \"\"\"\n if msg[\"t\"] != \"INTERACTION_CREATE\":\n return\n\n to_use = msg[\"d\"]\n\n if to_use[\"data\"][\"name\"] in self.commands:\n\n ctx = context.SlashContext(self.req, to_use, self._discord, self.logger)\n cmd_name = to_use[\"data\"][\"name\"]\n\n if cmd_name not in self.commands and cmd_name in self.subcommands:\n return await self.handle_subcommand(ctx, to_use)\n\n selected_cmd = self.commands[to_use[\"data\"][\"name\"]]\n\n if selected_cmd.allowed_guild_ids:\n guild_id = ctx.guild.id if isinstance(ctx.guild, discord.Guild) else ctx.guild\n\n if guild_id not in selected_cmd.allowed_guild_ids:\n return\n\n if selected_cmd.has_subcommands and not selected_cmd.func:\n return await self.handle_subcommand(ctx, to_use)\n\n if \"options\" in to_use[\"data\"]:\n for x in to_use[\"data\"][\"options\"]:\n if \"value\" not in x:\n return await self.handle_subcommand(ctx, to_use)\n\n args = await self.process_options(ctx.guild, to_use[\"data\"][\"options\"], selected_cmd.auto_convert) \\\n if \"options\" in to_use[\"data\"] else []\n\n self._discord.dispatch(\"slash_command\", ctx)\n\n try:\n await selected_cmd.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def handle_subcommand(self, ctx: context.SlashContext, data: dict):\n \"\"\"\n Coroutine for handling subcommand.\n\n .. warning::\n Do not manually call this.\n\n :param ctx: :class:`.model.SlashContext` instance.\n :param data: Gateway message.\n \"\"\"\n if data[\"data\"][\"name\"] not in self.subcommands:\n return\n base = self.subcommands[data[\"data\"][\"name\"]]\n sub = data[\"data\"][\"options\"][0]\n sub_name = sub[\"name\"]\n if sub_name not in base:\n return\n ctx.subcommand = sub_name\n sub_opts = sub[\"options\"] if \"options\" in sub else []\n for x in sub_opts:\n if \"options\" in x or \"value\" not in x:\n sub_group = x[\"name\"]\n if sub_group not in base[sub_name]:\n return\n ctx.subcommand_group = sub_group\n selected = base[sub_name][sub_group]\n args = await self.process_options(ctx.guild, x[\"options\"], selected.auto_convert) \\\n if \"options\" in x else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n return\n selected = base[sub_name]\n args = await self.process_options(ctx.guild, sub_opts, selected.auto_convert) \\\n if \"options\" in sub else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def on_slash_command_error(self, ctx, ex):\n \"\"\"\n Handles Exception occurred from invoking command.\n\n Example of adding event:\n\n .. code-block:: python\n\n @client.event\n async def on_slash_command_error(ctx, ex):\n ...\n\n Example of adding listener:\n\n .. code-block:: python\n\n @bot.listen()\n async def on_slash_command_error(ctx, ex):\n ...\n\n :param ctx: Context of the command.\n :type ctx: :class:`.model.SlashContext`\n :param ex: Exception from the command invoke.\n :type ex: Exception\n :return:\n \"\"\"\n if self.has_listener:\n if self._discord.extra_events.get('on_slash_command_error'):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n if hasattr(self._discord, \"on_slash_command_error\"):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n # Prints exception if not overrided or has no listener for error.\n self.logger.exception(f\"An exception has occurred while executing command `{ctx.name}`:\")\n", "path": "discord_slash/client.py"}], "after_files": [{"content": "import logging\nimport typing\nimport discord\nfrom inspect import iscoroutinefunction, getdoc\nfrom discord.ext import commands\nfrom . import http\nfrom . import model\nfrom . import error\nfrom . import context\nfrom .utils import manage_commands\n\n\nclass SlashCommand:\n \"\"\"\n Slash command extension class.\n\n :param client: discord.py Client or Bot instance.\n :type client: Union[discord.Client, discord.ext.commands.Bot]\n :param auto_register: Whether to register commands automatically. Default `False`.\n :type auto_register: bool\n :param override_type: Whether to override checking type of the client and try register event.\n :type override_type: bool\n\n :ivar _discord: Discord client of this client.\n :ivar commands: Dictionary of the registered commands via :func:`.slash` decorator.\n :ivar req: :class:`.http.SlashCommandRequest` of this client.\n :ivar logger: Logger of this client.\n :ivar auto_register: Whether to register commands automatically.\n :ivar auto_delete: Whether to delete commands not found in the project automatically.\n :ivar has_listener: Whether discord client has listener add function.\n \"\"\"\n\n def __init__(self,\n client: typing.Union[discord.Client, commands.Bot],\n auto_register: bool = False,\n auto_delete: bool = False,\n override_type: bool = False):\n self._discord = client\n self.commands = {}\n self.subcommands = {}\n self.logger = logging.getLogger(\"discord_slash\")\n self.req = http.SlashCommandRequest(self.logger, self._discord)\n self.auto_register = auto_register\n self.auto_delete = auto_delete\n\n if self.auto_register and self.auto_delete:\n self._discord.loop.create_task(self.sync_all_commands())\n elif self.auto_register:\n self._discord.loop.create_task(self.register_all_commands())\n elif self.auto_delete:\n self._discord.loop.create_task(self.delete_unused_commands())\n \n if not isinstance(client, commands.Bot) and not isinstance(client,\n commands.AutoShardedBot) and not override_type:\n self.logger.info(\"Detected discord.Client! Overriding on_socket_response.\")\n self._discord.on_socket_response = self.on_socket_response\n self.has_listener = False\n else:\n if not hasattr(self._discord, 'slash'):\n self._discord.slash = self\n else:\n raise error.DuplicateSlashClient(\"You can't have duplicate SlashCommand instances!\")\n \n self._discord.add_listener(self.on_socket_response)\n self.has_listener = True\n default_add_function = self._discord.add_cog\n def override_add_cog(cog: commands.Cog):\n default_add_function(cog)\n self.get_cog_commands(cog)\n self._discord.add_cog = override_add_cog\n default_remove_function = self._discord.remove_cog\n def override_remove_cog(name: str):\n cog = self._discord.get_cog(name)\n if cog is None:\n return\n self.remove_cog_commands(cog)\n default_remove_function(name)\n self._discord.remove_cog = override_remove_cog\n \n \n\n def get_cog_commands(self, cog: commands.Cog):\n \"\"\"\n Gets slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'): # Temporary warning\n return self.logger.warning(\"Calling get_cog_commands is no longer required \"\n \"to add cog slash commands. Make sure to remove all calls to this function.\")\n cog._slash_registered = True # Assuming all went well\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n x.cog = cog\n if isinstance(x, model.CogCommandObject):\n if x.name in self.commands:\n raise error.DuplicateCommand(x.name)\n self.commands[x.name] = x\n else:\n if x.base in self.commands:\n for i in self.commands[x.base].allowed_guild_ids:\n if i not in x.allowed_guild_ids:\n x.allowed_guild_ids.append(i)\n self.commands[x.base].has_subcommands = True\n else:\n _cmd = {\n \"func\": None,\n \"description\": x.base_description,\n \"auto_convert\": {},\n \"guild_ids\": x.allowed_guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n self.commands[x.base] = model.CommandObject(x.base, _cmd)\n if x.base not in self.subcommands:\n self.subcommands[x.base] = {}\n if x.subcommand_group:\n if x.subcommand_group not in self.subcommands[x.base]:\n self.subcommands[x.base][x.subcommand_group] = {}\n if x.name in self.subcommands[x.base][x.subcommand_group]:\n raise error.DuplicateCommand(f\"{x.base} {x.subcommand_group} {x.name}\")\n self.subcommands[x.base][x.subcommand_group][x.name] = x\n else:\n if x.name in self.subcommands[x.base]:\n raise error.DuplicateCommand(f\"{x.base} {x.name}\")\n self.subcommands[x.base][x.name] = x\n\n def remove_cog_commands(self, cog):\n \"\"\"\n Removes slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog de-initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'):\n del cog._slash_registered\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if\n isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n if isinstance(x, model.CogCommandObject):\n if x.name not in self.commands:\n continue # Just in case it is removed due to subcommand.\n if x.name in self.subcommands:\n self.commands[x.name].func = None\n continue # Let's remove completely when every subcommand is removed.\n del self.commands[x.name]\n else:\n if x.base not in self.subcommands:\n continue # Just in case...\n if x.subcommand_group:\n del self.subcommands[x.base][x.subcommand_group][x.name]\n if not self.subcommands[x.base][x.subcommand_group]:\n del self.subcommands[x.base][x.subcommand_group]\n else:\n del self.subcommands[x.base][x.name]\n if not self.subcommands[x.base]:\n del self.subcommands[x.base]\n if x.base in self.commands:\n if self.commands[x.base].func:\n self.commands[x.base].has_subcommands = False\n else:\n del self.commands[x.base]\n\n async def to_dict(self):\n \"\"\"\n Converts all commands currently registered to :class:`SlashCommand` to a dictionary.\n Returns a dictionary in the format:\n\n .. code-block:: python\n\n {\n \"global\" : [], # list of global commands\n \"guild\" : {\n 0000: [] # list of commands in the guild 0000\n }\n }\n\n Commands are in the format specified by discord `here <https://discord.com/developers/docs/interactions/slash-commands#applicationcommand>`_\n \"\"\"\n await self._discord.wait_until_ready() # In case commands are still not registered to SlashCommand.\n commands = {\n \"global\": [],\n \"guild\": {}\n }\n for x in self.commands:\n selected = self.commands[x]\n if selected.has_subcommands and selected.func:\n # Registering both subcommand and command with same base name / name\n # will result in only subcommand being registered,\n # so we will warn this at registering subcommands.\n self.logger.warning(f\"Detected command name with same subcommand base name! \"\n f\"This command will only have subcommand: {x}\")\n \n options = []\n if selected.has_subcommands:\n tgt = self.subcommands[x]\n for y in tgt:\n sub = tgt[y]\n if isinstance(sub, model.SubcommandObject):\n _dict = {\n \"name\": sub.name,\n \"description\": sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub.options or []\n }\n options.append(_dict)\n else:\n base_dict = {\n \"name\": y,\n \"description\": \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND_GROUP,\n \"options\": []\n }\n for z in sub:\n sub_sub = sub[z]\n _dict = {\n \"name\": sub_sub.name,\n \"description\": sub_sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub_sub.options or []\n }\n base_dict[\"options\"].append(_dict)\n if sub_sub.subcommand_group_description:\n base_dict[\"description\"] = sub_sub.subcommand_group_description\n options.append(base_dict)\n\n command_dict = {\n \"name\": x,\n \"description\": selected.description or \"No Description.\",\n \"options\": selected.options if not options else options\n }\n if selected.allowed_guild_ids:\n for y in selected.allowed_guild_ids:\n try:\n commands[\"guild\"][y].append(command_dict)\n except KeyError:\n commands[\"guild\"][y] = [command_dict]\n else:\n commands[\"global\"].append(command_dict)\n\n return commands\n\n async def sync_all_commands(self, delete_from_unused_guilds = True):\n \"\"\"\n Matches commands registered on Discord to commands registered here.\n Deletes any commands on Discord but not here, and registers any not on Discord.\n This is done with a `put` request.\n If ``auto_register`` and ``auto_delete`` are ``True`` then this will be automatically called.\n\n :param delete_from_unused_guilds: If the bot should make a request to set no commands for guilds that haven't got any commands regestered in :class:``SlashCommand``\n \"\"\"\n commands = await self.to_dict()\n self.logger.info(\"Syncing commands...\")\n all_bot_guilds = [guild.id for guild in self._discord.guilds]\n # This is an extremly bad way to do this, because slash cmds can be in guilds the bot isn't in\n # But it's the only way until discord makes an endpoint to request all the guild with cmds registered.\n\n await self.req.put_slash_commands(slash_commands = commands[\"global\"], guild_id = None)\n \n for guild in commands[\"guild\"]:\n await self.req.put_slash_commands(slash_commands = commands[\"guild\"][guild], guild_id = guild)\n all_bot_guilds.remove(guild)\n if delete_from_unused_guilds:\n for guild in all_bot_guilds:\n await self.req.put_slash_commands(slash_commands=[], guild_id = guild)\n \n self.logger.info(\"Completed syncing all commands!\")\n\n async def register_all_commands(self):\n \"\"\"\n Registers all slash commands to Discord API.\\n\n If ``auto_register`` is ``True`` and ``auto_delete`` is ``False``, then this will be automatically called.\n \"\"\"\n self.logger.info(\"Registering commands...\")\n commands = await self.to_dict()\n for command in commands[\"global\"]:\n name = command.pop('name')\n self.logger.debug(f\"Registering global command {name}\")\n await self.req.add_slash_command(guild_id = None, cmd_name = name, **command)\n \n for guild in commands[\"guild\"]:\n guild_cmds = commands[\"guild\"][guild]\n for command in guild_cmds:\n name = command.pop('name')\n self.logger.debug(f\"Registering guild command {name} in guild: {guild}\")\n await self.req.add_slash_command(guild_id = guild, cmd_name = name, **command)\n self.logger.info(\"Completed registering all commands!\")\n\n async def delete_unused_commands(self):\n \"\"\"\n Unregisters all slash commands which are not used by the project to Discord API.\\n\n This might take some time because for every guild the bot is on an API call is made.\\n\n If ``auto_delete`` is ``True`` and ``auto_register`` is ``False``, then this will be automatically called.\n \"\"\"\n await self._discord.wait_until_ready()\n self.logger.info(\"Deleting unused commands...\")\n registered_commands = {}\n global_commands = await self.req.get_all_commands(None)\n\n for cmd in global_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": None}\n\n for guild in self._discord.guilds:\n # Since we can only get commands per guild we need to loop through every one\n try:\n guild_commands = await self.req.get_all_commands(guild.id)\n except discord.Forbidden:\n # In case a guild has not granted permissions to access commands\n continue\n\n for cmd in guild_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": guild.id}\n\n for x in registered_commands:\n if x not in self.commands:\n # Delete command if not found locally\n selected = registered_commands[x]\n await self.req.remove_slash_command(selected[\"guild_id\"], selected[\"id\"])\n\n self.logger.info(\"Completed deleting unused commands!\")\n\n def add_slash_command(self,\n cmd,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None,\n has_subcommands: bool = False):\n \"\"\"\n Registers slash command to SlashCommand.\n\n :param cmd: Command Coroutine.\n :type cmd: Coroutine\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Defaults to command docstring or ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n :param has_subcommands: Whether it has subcommand. Default ``False``.\n :type has_subcommands: bool\n \"\"\"\n name = name or cmd.__name__\n name = name.lower()\n if name in self.commands:\n tgt = self.commands[name]\n if not tgt.has_subcommands:\n raise error.DuplicateCommand(name)\n has_subcommands = tgt.has_subcommands\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n description = description or getdoc(cmd)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": cmd,\n \"description\": description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options,\n \"has_subcommands\": has_subcommands\n }\n self.commands[name] = model.CommandObject(name, _cmd)\n self.logger.debug(f\"Added command `{name}`\")\n\n def add_subcommand(self,\n cmd,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n subcommand_group_description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None):\n \"\"\"\n Registers subcommand to SlashCommand.\n\n :param cmd: Subcommand Coroutine.\n :type cmd: Coroutine\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Defaults to command docstring or ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n \"\"\"\n base = base.lower()\n subcommand_group = subcommand_group.lower() if subcommand_group else subcommand_group\n name = name or cmd.__name__\n name = name.lower()\n description = description or getdoc(cmd)\n\n if base in self.commands:\n tgt = self.commands[base]\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"auto_convert\": {},\n \"guild_ids\": guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n _sub = {\n \"func\": cmd,\n \"name\": name,\n \"description\": description,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options\n }\n if base not in self.commands:\n self.commands[base] = model.CommandObject(base, _cmd)\n else:\n self.commands[base].has_subcommands = True\n self.commands[base].allowed_guild_ids = guild_ids\n if self.commands[base].description:\n _cmd[\"description\"] = self.commands[base].description\n if base not in self.subcommands:\n self.subcommands[base] = {}\n if subcommand_group:\n if subcommand_group not in self.subcommands[base]:\n self.subcommands[base][subcommand_group] = {}\n if name in self.subcommands[base][subcommand_group]:\n raise error.DuplicateCommand(f\"{base} {subcommand_group} {name}\")\n self.subcommands[base][subcommand_group][name] = model.SubcommandObject(_sub, base, name, subcommand_group)\n else:\n if name in self.subcommands[base]:\n raise error.DuplicateCommand(f\"{base} {name}\")\n self.subcommands[base][name] = model.SubcommandObject(_sub, base, name)\n self.logger.debug(f\"Added subcommand `{base} {subcommand_group or ''} {name or cmd.__name__}`\")\n\n def slash(self,\n *,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_id: int = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers coroutine as a slash command.\\n\n All decorator args must be passed as keyword-only args.\\n\n 1 arg for command coroutine is required for ctx(:class:`.model.SlashContext`),\n and if your slash command has some args, then those args are also required.\\n\n All args must be passed as keyword-args.\n\n .. note::\n Role, User, and Channel types are passed as id if you don't set ``auto_convert``, since API doesn't give type of the option for now.\\n\n Also, if ``options`` is passed, then ``auto_convert`` will be automatically created or overrided.\n\n .. warning::\n Unlike discord.py's command, ``*args``, keyword-only args, converters, etc. are NOT supported.\n\n Example:\n\n .. code-block:: python\n\n @slash.slash(name=\"ping\")\n async def _slash(ctx): # Normal usage.\n await ctx.send(content=f\"Pong! (`{round(bot.latency*1000)}`ms)\")\n\n\n @slash.slash(name=\"pick\")\n async def _pick(ctx, choice1, choice2): # Command with 1 or more args.\n await ctx.send(content=str(random.choice([choice1, choice2])))\n\n Example of formatting ``auto_convert``:\n\n .. code-block:: python\n\n {\"option_role\": \"role\", # For key put name of the option and for value put type of the option.\n \"option_user\": SlashCommandOptionType.USER, # Also can use an enumeration member for the type\n \"option_user_two\": 6, # or number\n \"option_channel\": \"CHANNEL\"} # or upper case string.\n\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Default ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_id: Deprecated. Use ``guild_ids`` instead.\n :type guild_id: int\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n if guild_id:\n self.logger.warning(\"`guild_id` is deprecated! `Use guild_ids` instead.\")\n guild_ids = [guild_id]\n\n def wrapper(cmd):\n self.add_slash_command(cmd, name, description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n def subcommand(self,\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers subcommand.\\n\n Unlike discord.py, you don't need base command.\\n\n All args must be passed as keyword-args.\n\n Example:\n\n .. code-block:: python\n\n # /group say <str>\n @slash.subcommand(base=\"group\", name=\"say\")\n async def _group_say(ctx, _str):\n await ctx.send(content=_str)\n\n # /group kick user <user>\n @slash.subcommand(base=\"group\",\n subcommand_group=\"kick\",\n name=\"user\",\n auto_convert={\"user\": \"user\"})\n async def _group_kick_user(ctx, user):\n ...\n\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Default ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param base_desc: Alias of ``base_description``.\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param sub_group_desc: Alias of ``subcommand_group_description``.\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n self.add_subcommand(cmd, base, subcommand_group, name, description, base_description, subcommand_group_description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n async def process_options(self, guild: discord.Guild, options: list, auto_convert: dict) -> list:\n \"\"\"\n Processes Role, User, and Channel option types to discord.py's models.\n\n :param guild: Guild of the command message.\n :type guild: discord.Guild\n :param options: Dict of options.\n :type options: list\n :param auto_convert: Dictionary of how to convert option values.\n :type auto_convert: dict\n :return: list\n \"\"\"\n if not guild:\n self.logger.info(\"This command invoke is missing guild. Skipping option process.\")\n return [x[\"value\"] for x in options]\n\n if not isinstance(guild, discord.Guild):\n return [x[\"value\"] for x in options]\n\n if not auto_convert:\n return [x[\"value\"] for x in options]\n\n converters = [\n [guild.get_member, guild.fetch_member],\n guild.get_channel,\n guild.get_role\n ]\n\n types = {\n \"user\": 0,\n \"USER\": 0,\n model.SlashCommandOptionType.USER: 0,\n \"6\": 0,\n 6: 0,\n \"channel\": 1,\n \"CHANNEL\": 1,\n model.SlashCommandOptionType.CHANNEL: 1,\n \"7\": 1,\n 7: 1,\n \"role\": 2,\n \"ROLE\": 2,\n model.SlashCommandOptionType.ROLE: 2,\n 8: 2,\n \"8\": 2\n }\n\n to_return = []\n\n for x in options:\n selected = x\n if selected[\"name\"] in auto_convert:\n if auto_convert[selected[\"name\"]] not in types:\n to_return.append(selected[\"value\"])\n continue\n loaded_converter = converters[types[auto_convert[selected[\"name\"]]]]\n if isinstance(loaded_converter, list):\n cache_first = loaded_converter[0](int(selected[\"value\"]))\n if cache_first:\n to_return.append(cache_first)\n continue\n loaded_converter = loaded_converter[1]\n try:\n to_return.append(await loaded_converter(int(selected[\"value\"]))) \\\n if iscoroutinefunction(loaded_converter) else \\\n to_return.append(loaded_converter(int(selected[\"value\"])))\n except (discord.Forbidden, discord.HTTPException):\n self.logger.warning(\"Failed fetching user! Passing ID instead.\")\n to_return.append(int(selected[\"value\"]))\n return to_return\n\n async def on_socket_response(self, msg):\n \"\"\"\n This event listener is automatically registered at initialization of this class.\n\n .. warning::\n DO NOT MANUALLY REGISTER, OVERRIDE, OR WHATEVER ACTION TO THIS COROUTINE UNLESS YOU KNOW WHAT YOU ARE DOING.\n\n :param msg: Gateway message.\n \"\"\"\n if msg[\"t\"] != \"INTERACTION_CREATE\":\n return\n\n to_use = msg[\"d\"]\n\n if to_use[\"data\"][\"name\"] in self.commands:\n\n ctx = context.SlashContext(self.req, to_use, self._discord, self.logger)\n cmd_name = to_use[\"data\"][\"name\"]\n\n if cmd_name not in self.commands and cmd_name in self.subcommands:\n return await self.handle_subcommand(ctx, to_use)\n\n selected_cmd = self.commands[to_use[\"data\"][\"name\"]]\n\n if selected_cmd.allowed_guild_ids:\n guild_id = ctx.guild.id if isinstance(ctx.guild, discord.Guild) else ctx.guild\n\n if guild_id not in selected_cmd.allowed_guild_ids:\n return\n\n if selected_cmd.has_subcommands and not selected_cmd.func:\n return await self.handle_subcommand(ctx, to_use)\n\n if \"options\" in to_use[\"data\"]:\n for x in to_use[\"data\"][\"options\"]:\n if \"value\" not in x:\n return await self.handle_subcommand(ctx, to_use)\n\n args = await self.process_options(ctx.guild, to_use[\"data\"][\"options\"], selected_cmd.auto_convert) \\\n if \"options\" in to_use[\"data\"] else []\n\n self._discord.dispatch(\"slash_command\", ctx)\n\n try:\n await selected_cmd.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def handle_subcommand(self, ctx: context.SlashContext, data: dict):\n \"\"\"\n Coroutine for handling subcommand.\n\n .. warning::\n Do not manually call this.\n\n :param ctx: :class:`.model.SlashContext` instance.\n :param data: Gateway message.\n \"\"\"\n if data[\"data\"][\"name\"] not in self.subcommands:\n return\n base = self.subcommands[data[\"data\"][\"name\"]]\n sub = data[\"data\"][\"options\"][0]\n sub_name = sub[\"name\"]\n if sub_name not in base:\n return\n ctx.subcommand = sub_name\n sub_opts = sub[\"options\"] if \"options\" in sub else []\n for x in sub_opts:\n if \"options\" in x or \"value\" not in x:\n sub_group = x[\"name\"]\n if sub_group not in base[sub_name]:\n return\n ctx.subcommand_group = sub_group\n selected = base[sub_name][sub_group]\n args = await self.process_options(ctx.guild, x[\"options\"], selected.auto_convert) \\\n if \"options\" in x else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n return\n selected = base[sub_name]\n args = await self.process_options(ctx.guild, sub_opts, selected.auto_convert) \\\n if \"options\" in sub else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def on_slash_command_error(self, ctx, ex):\n \"\"\"\n Handles Exception occurred from invoking command.\n\n Example of adding event:\n\n .. code-block:: python\n\n @client.event\n async def on_slash_command_error(ctx, ex):\n ...\n\n Example of adding listener:\n\n .. code-block:: python\n\n @bot.listen()\n async def on_slash_command_error(ctx, ex):\n ...\n\n :param ctx: Context of the command.\n :type ctx: :class:`.model.SlashContext`\n :param ex: Exception from the command invoke.\n :type ex: Exception\n :return:\n \"\"\"\n if self.has_listener:\n if self._discord.extra_events.get('on_slash_command_error'):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n if hasattr(self._discord, \"on_slash_command_error\"):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n # Prints exception if not overrided or has no listener for error.\n self.logger.exception(f\"An exception has occurred while executing command `{ctx.name}`:\")\n", "path": "discord_slash/client.py"}]} |
gh_patches_debug_1439 | rasdani/github-patches | git_diff | translate__pootle-5899 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add language on pootle_fs/xliff support
Ubuntu 16.04
Pootle 2.8.0b3
Have succesfully created a pootle_fs project.
Based on xliff, I put a file with my GNU style "messages.<language_code>.<ext>"
I tried french and templates as language, both are taken, uploaded and I can translate.
But when I add any language, my RQworker get me this error in both language :
```
[2016-09-13T10:13:35] default: pootle_project.forms.update_translation_project(<TranslationProject: /sq/messages/>, True, 'http://localhost:8000/') (cfeaa7eb-99ae-4e4f-bbab-3cfa0e96d436)
2016-09-13 10:13:55,524 INFO Scanning for new files in /sq/messages/
[2016-09-13T10:14:05] AttributeError: 'NoneType' object has no attribute 'startswith'
Traceback (most recent call last):
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/worker.py", line 568, in perform_job
rv = job.perform()
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_project/forms.py", line 45, in update_translation_project
raise e
AttributeError: 'NoneType' object has no attribute 'startswith'
Traceback (most recent call last):
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/worker.py", line 568, in perform_job
rv = job.perform()
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_project/forms.py", line 45, in update_translation_project
raise e
AttributeError: 'NoneType' object has no attribute 'startswith'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_translationproject/models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import logging
10
11 from django.conf import settings
12 from django.contrib.contenttypes.fields import GenericRelation
13 from django.db import models
14 from django.db.models.signals import post_save
15 from django.dispatch import receiver
16 from django.urls import reverse
17 from django.utils.functional import cached_property
18
19 from pootle.core.delegate import data_tool
20 from pootle.core.mixins import CachedTreeItem
21 from pootle.core.url_helpers import get_editor_filter, split_pootle_path
22 from pootle_app.models.directory import Directory
23 from pootle_app.project_tree import (does_not_exist, init_store_from_template,
24 translation_project_dir_exists)
25 from pootle_format.models import Format
26 from pootle_language.models import Language
27 from pootle_misc.checks import excluded_filters
28 from pootle_project.models import Project
29 from pootle_revision.models import Revision
30 from pootle_store.constants import PARSED
31 from pootle_store.util import absolute_real_path, relative_real_path
32 from staticpages.models import StaticPage
33
34
35 def create_or_resurrect_translation_project(language, project):
36 tp = create_translation_project(language, project)
37 if tp is not None:
38 if tp.directory.obsolete:
39 tp.directory.obsolete = False
40 tp.directory.save()
41 logging.info(u"Resurrected %s", tp)
42 else:
43 logging.info(u"Created %s", tp)
44
45
46 def create_translation_project(language, project):
47 if translation_project_dir_exists(language, project):
48 try:
49 translation_project, __ = TranslationProject.objects.all() \
50 .get_or_create(language=language, project=project)
51 return translation_project
52 except OSError:
53 return None
54 except IndexError:
55 return None
56
57
58 def scan_translation_projects(languages=None, projects=None):
59 project_query = Project.objects.all()
60
61 if projects is not None:
62 project_query = project_query.filter(code__in=projects)
63
64 for project in project_query.iterator():
65 if does_not_exist(project.get_real_path()):
66 logging.info(u"Disabling %s", project)
67 project.disabled = True
68 project.save()
69 else:
70 lang_query = Language.objects.exclude(
71 id__in=project.translationproject_set.live().values_list('language',
72 flat=True))
73 if languages is not None:
74 lang_query = lang_query.filter(code__in=languages)
75
76 for language in lang_query.iterator():
77 create_or_resurrect_translation_project(language, project)
78
79
80 class TranslationProjectManager(models.Manager):
81
82 def get_terminology_project(self, language_id):
83 # FIXME: the code below currently uses the same approach to determine
84 # the 'terminology' kind of a project as 'Project.is_terminology()',
85 # which means it checks the value of 'checkstyle' field
86 # (see pootle_project/models.py:240).
87 #
88 # This should probably be replaced in the future with a dedicated
89 # project property.
90 return self.get(language=language_id,
91 project__checkstyle='terminology')
92
93 def live(self):
94 """Filters translation projects that have non-obsolete directories."""
95 return self.filter(directory__obsolete=False)
96
97 def for_user(self, user, select_related=None):
98 """Filters translation projects for a specific user.
99
100 - Admins always get all translation projects.
101 - Regular users only get enabled translation projects
102 accessible to them.
103
104 :param user: The user for whom the translation projects need to be
105 retrieved for.
106 :return: A filtered queryset with `TranslationProject`s for `user`.
107 """
108 qs = self.live()
109 if select_related is not None:
110 qs = qs.select_related(*select_related)
111
112 if user.is_superuser:
113 return qs
114
115 return qs.filter(
116 project__disabled=False,
117 project__code__in=Project.accessible_by_user(user))
118
119 def get_for_user(self, user, project_code, language_code,
120 select_related=None):
121 """Gets a `language_code`/`project_code` translation project
122 for a specific `user`.
123
124 - Admins can get the translation project even
125 if its project is disabled.
126 - Regular users only get a translation project
127 if its project isn't disabled and it is accessible to them.
128
129 :param user: The user for whom the translation project needs
130 to be retrieved.
131 :param project_code: The code of a project for the TP to retrieve.
132 :param language_code: The code of the language fro the TP to retrieve.
133 :return: The `TranslationProject` matching the params, raises
134 otherwise.
135 """
136 return self.for_user(
137 user, select_related).get(
138 project__code=project_code,
139 language__code=language_code)
140
141
142 class TranslationProject(models.Model, CachedTreeItem):
143
144 language = models.ForeignKey(
145 Language, db_index=True, on_delete=models.CASCADE)
146 project = models.ForeignKey(
147 Project, db_index=True, on_delete=models.CASCADE)
148 real_path = models.FilePathField(editable=False, null=True, blank=True)
149 directory = models.OneToOneField(
150 Directory, db_index=True, editable=False, on_delete=models.CASCADE)
151 pootle_path = models.CharField(max_length=255, null=False, unique=True,
152 db_index=True, editable=False)
153 creation_time = models.DateTimeField(auto_now_add=True, db_index=True,
154 editable=False, null=True)
155 revisions = GenericRelation(Revision)
156
157 objects = TranslationProjectManager()
158
159 class Meta(object):
160 unique_together = (
161 ('language', 'project'),
162 ('project', 'language'))
163 db_table = 'pootle_app_translationproject'
164 # disabled objects are hidden for related objects too
165 base_manager_name = 'objects'
166
167 @cached_property
168 def code(self):
169 return u'-'.join([self.language.code, self.project.code])
170
171 @cached_property
172 def data_tool(self):
173 return data_tool.get(self.__class__)(self)
174
175 # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #
176
177 @property
178 def name(self):
179 # TODO: See if `self.fullname` can be removed
180 return self.fullname
181
182 @property
183 def fullname(self):
184 return "%s [%s]" % (self.project.fullname, self.language.name)
185
186 @property
187 def abs_real_path(self):
188 if self.real_path is not None:
189 return absolute_real_path(self.real_path)
190
191 @abs_real_path.setter
192 def abs_real_path(self, value):
193 if value is not None:
194 self.real_path = relative_real_path(value)
195 else:
196 self.real_path = None
197
198 @property
199 def file_style(self):
200 return self.project.get_treestyle()
201
202 @property
203 def checker(self):
204 from translate.filters import checks
205 # We do not use default Translate Toolkit checkers; instead use
206 # our own one
207 if settings.POOTLE_QUALITY_CHECKER:
208 from pootle_misc.util import import_func
209 checkerclasses = [import_func(settings.POOTLE_QUALITY_CHECKER)]
210 else:
211 checkerclasses = [
212 checks.projectcheckers.get(self.project.checkstyle,
213 checks.StandardChecker)
214 ]
215
216 return checks.TeeChecker(checkerclasses=checkerclasses,
217 excludefilters=excluded_filters,
218 errorhandler=self.filtererrorhandler,
219 languagecode=self.language.code)
220
221 @property
222 def disabled(self):
223 return self.project.disabled
224
225 @property
226 def is_template_project(self):
227 return self == self.project.get_template_translationproject()
228
229 # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #
230
231 def __unicode__(self):
232 return self.pootle_path
233
234 def __init__(self, *args, **kwargs):
235 super(TranslationProject, self).__init__(*args, **kwargs)
236
237 def save(self, *args, **kwargs):
238 self.directory = self.language.directory \
239 .get_or_make_subdir(self.project.code)
240 self.pootle_path = self.directory.pootle_path
241
242 if self.project.treestyle != 'pootle_fs':
243 from pootle_app.project_tree import get_translation_project_dir
244 self.abs_real_path = get_translation_project_dir(
245 self.language, self.project, self.file_style, make_dirs=not
246 self.directory.obsolete)
247 else:
248 self.abs_real_path = None
249 super(TranslationProject, self).save(*args, **kwargs)
250 if self.directory.tp_id != self.pk:
251 self.directory.tp = self
252 self.directory.save()
253
254 def delete(self, *args, **kwargs):
255 directory = self.directory
256
257 super(TranslationProject, self).delete(*args, **kwargs)
258 directory.delete()
259
260 def get_absolute_url(self):
261 return reverse(
262 'pootle-tp-browse',
263 args=split_pootle_path(self.pootle_path)[:-1])
264
265 def get_translate_url(self, **kwargs):
266 return u''.join(
267 [reverse("pootle-tp-translate",
268 args=split_pootle_path(self.pootle_path)[:-1]),
269 get_editor_filter(**kwargs)])
270
271 def get_announcement(self, user=None):
272 """Return the related announcement, if any."""
273 return StaticPage.get_announcement_for(self.pootle_path, user)
274
275 def filtererrorhandler(self, functionname, str1, str2, e):
276 logging.error(u"Error in filter %s: %r, %r, %s", functionname, str1,
277 str2, e)
278 return False
279
280 def is_accessible_by(self, user):
281 """Returns `True` if the current translation project is accessible
282 by `user`.
283 """
284 if user.is_superuser:
285 return True
286
287 return self.project.code in Project.accessible_by_user(user)
288
289 def can_be_inited_from_templates(self):
290 """Returns `True` if the current translation project hasn't been
291 saved yet and can be initialized from templates.
292 """
293
294 # This method checks if the current translation project directory
295 # doesn't exist. So it won't work if the translation project is already
296 # saved the database because the translation project directory is
297 # auto-created in `save()` method.
298 template_tp = self.project.get_template_translationproject()
299 return (
300 not self.is_template_project
301 and template_tp is not None
302 and not translation_project_dir_exists(self.language,
303 self.project))
304
305 def init_from_templates(self):
306 """Initializes the current translation project files using
307 the templates TP ones.
308 """
309
310 template_tp = self.project.get_template_translationproject()
311 template_stores = template_tp.stores.live().exclude(file="")
312
313 for template_store in template_stores.iterator():
314 init_store_from_template(self, template_store)
315
316 self.update_from_disk()
317
318 def update_from_disk(self, force=False, overwrite=False):
319 """Update all stores to reflect state on disk."""
320 changed = False
321
322 logging.info(u"Scanning for new files in %s", self)
323 # Create new, make obsolete in-DB stores to reflect state on disk
324 self.scan_files()
325
326 stores = self.stores.live().select_related('parent').exclude(file='')
327 # Update store content from disk store
328 for store in stores.iterator():
329 if not store.file:
330 continue
331 disk_mtime = store.get_file_mtime()
332 if not force and disk_mtime == store.file_mtime:
333 # The file on disk wasn't changed since the last sync
334 logging.debug(u"File didn't change since last sync, "
335 u"skipping %s", store.pootle_path)
336 continue
337
338 changed = (
339 store.updater.update_from_disk(overwrite=overwrite)
340 or changed)
341
342 return changed
343
344 def sync(self, conservative=True, skip_missing=False, only_newer=True):
345 """Sync unsaved work on all stores to disk"""
346 stores = self.stores.live().exclude(file='').filter(state__gte=PARSED)
347 for store in stores.select_related("parent").iterator():
348 store.sync(update_structure=not conservative,
349 conservative=conservative,
350 skip_missing=skip_missing, only_newer=only_newer)
351
352 # # # TreeItem
353 def get_children(self):
354 return self.directory.children
355
356 def get_parents(self):
357 return [self.project]
358
359 # # # /TreeItem
360
361 def directory_exists_on_disk(self):
362 """Checks if the actual directory for the translation project
363 exists on disk.
364 """
365 return not does_not_exist(self.abs_real_path)
366
367 def scan_files(self):
368 """Scans the file system and returns a list of translation files.
369 """
370 projects = [p.strip() for p in self.project.ignoredfiles.split(',')]
371 ignored_files = set(projects)
372
373 filetypes = self.project.filetype_tool
374 exts = filetypes.filetype_extensions
375
376 # Scan for pots if template project
377 if self.is_template_project:
378 exts = filetypes.template_extensions
379
380 from pootle_app.project_tree import (add_files,
381 match_template_filename,
382 direct_language_match_filename)
383
384 all_files = []
385 new_files = []
386
387 if self.file_style == 'gnu':
388 if self.pootle_path.startswith('/templates/'):
389 file_filter = lambda filename: match_template_filename(
390 self.project, filename,)
391 else:
392 file_filter = lambda filename: direct_language_match_filename(
393 self.language.code, filename,)
394 else:
395 file_filter = lambda filename: True
396
397 all_files, new_files, __ = add_files(
398 self,
399 ignored_files,
400 exts,
401 self.real_path,
402 self.directory,
403 file_filter,
404 )
405
406 return all_files, new_files
407
408 ###########################################################################
409
410
411 @receiver(post_save, sender=Project)
412 def scan_languages(**kwargs):
413 instance = kwargs["instance"]
414 created = kwargs.get("created", False)
415 raw = kwargs.get("raw", False)
416
417 if not created or raw or instance.disabled:
418 return
419
420 if not instance.filetypes.all().exists():
421 instance.filetypes.add(Format.objects.get(name="po"))
422
423 if instance.treestyle == 'pootle_fs':
424 return
425
426 for language in Language.objects.iterator():
427 tp = create_translation_project(language, instance)
428 if tp is not None:
429 tp.update_from_disk()
430
431
432 @receiver(post_save, sender=Language)
433 def scan_projects(**kwargs):
434 instance = kwargs["instance"]
435 created = kwargs.get("created", False)
436 raw = kwargs.get("raw", False)
437
438 if not created or raw:
439 return
440
441 for project in Project.objects.enabled().iterator():
442 tp = create_translation_project(instance, project)
443 if tp is not None:
444 tp.update_from_disk()
445
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_translationproject/models.py b/pootle/apps/pootle_translationproject/models.py
--- a/pootle/apps/pootle_translationproject/models.py
+++ b/pootle/apps/pootle_translationproject/models.py
@@ -438,7 +438,10 @@
if not created or raw:
return
- for project in Project.objects.enabled().iterator():
+ old_style_projects = Project.objects.enabled().exclude(
+ treestyle="pootle_fs")
+
+ for project in old_style_projects.iterator():
tp = create_translation_project(instance, project)
if tp is not None:
tp.update_from_disk()
| {"golden_diff": "diff --git a/pootle/apps/pootle_translationproject/models.py b/pootle/apps/pootle_translationproject/models.py\n--- a/pootle/apps/pootle_translationproject/models.py\n+++ b/pootle/apps/pootle_translationproject/models.py\n@@ -438,7 +438,10 @@\n if not created or raw:\n return\n \n- for project in Project.objects.enabled().iterator():\n+ old_style_projects = Project.objects.enabled().exclude(\n+ treestyle=\"pootle_fs\")\n+\n+ for project in old_style_projects.iterator():\n tp = create_translation_project(instance, project)\n if tp is not None:\n tp.update_from_disk()\n", "issue": "Add language on pootle_fs/xliff support\nUbuntu 16.04\nPootle 2.8.0b3\n\nHave succesfully created a pootle_fs project.\nBased on xliff, I put a file with my GNU style \"messages.<language_code>.<ext>\"\nI tried french and templates as language, both are taken, uploaded and I can translate.\nBut when I add any language, my RQworker get me this error in both language :\n\n```\n[2016-09-13T10:13:35] default: pootle_project.forms.update_translation_project(<TranslationProject: /sq/messages/>, True, 'http://localhost:8000/') (cfeaa7eb-99ae-4e4f-bbab-3cfa0e96d436)\n2016-09-13 10:13:55,524 INFO Scanning for new files in /sq/messages/\n[2016-09-13T10:14:05] AttributeError: 'NoneType' object has no attribute 'startswith'\nTraceback (most recent call last):\n File \"/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/worker.py\", line 568, in perform_job\n rv = job.perform()\n File \"/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/job.py\", line 495, in perform\n self._result = self.func(*self.args, **self.kwargs)\n File \"/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_project/forms.py\", line 45, in update_translation_project\n raise e\nAttributeError: 'NoneType' object has no attribute 'startswith'\nTraceback (most recent call last):\n File \"/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/worker.py\", line 568, in perform_job\n rv = job.perform()\n File \"/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/job.py\", line 495, in perform\n self._result = self.func(*self.args, **self.kwargs)\n File \"/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_project/forms.py\", line 45, in update_translation_project\n raise e\nAttributeError: 'NoneType' object has no attribute 'startswith'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import data_tool\nfrom pootle.core.mixins import CachedTreeItem\nfrom pootle.core.url_helpers import get_editor_filter, split_pootle_path\nfrom pootle_app.models.directory import Directory\nfrom pootle_app.project_tree import (does_not_exist, init_store_from_template,\n translation_project_dir_exists)\nfrom pootle_format.models import Format\nfrom pootle_language.models import Language\nfrom pootle_misc.checks import excluded_filters\nfrom pootle_project.models import Project\nfrom pootle_revision.models import Revision\nfrom pootle_store.constants import PARSED\nfrom pootle_store.util import absolute_real_path, relative_real_path\nfrom staticpages.models import StaticPage\n\n\ndef create_or_resurrect_translation_project(language, project):\n tp = create_translation_project(language, project)\n if tp is not None:\n if tp.directory.obsolete:\n tp.directory.obsolete = False\n tp.directory.save()\n logging.info(u\"Resurrected %s\", tp)\n else:\n logging.info(u\"Created %s\", tp)\n\n\ndef create_translation_project(language, project):\n if translation_project_dir_exists(language, project):\n try:\n translation_project, __ = TranslationProject.objects.all() \\\n .get_or_create(language=language, project=project)\n return translation_project\n except OSError:\n return None\n except IndexError:\n return None\n\n\ndef scan_translation_projects(languages=None, projects=None):\n project_query = Project.objects.all()\n\n if projects is not None:\n project_query = project_query.filter(code__in=projects)\n\n for project in project_query.iterator():\n if does_not_exist(project.get_real_path()):\n logging.info(u\"Disabling %s\", project)\n project.disabled = True\n project.save()\n else:\n lang_query = Language.objects.exclude(\n id__in=project.translationproject_set.live().values_list('language',\n flat=True))\n if languages is not None:\n lang_query = lang_query.filter(code__in=languages)\n\n for language in lang_query.iterator():\n create_or_resurrect_translation_project(language, project)\n\n\nclass TranslationProjectManager(models.Manager):\n\n def get_terminology_project(self, language_id):\n # FIXME: the code below currently uses the same approach to determine\n # the 'terminology' kind of a project as 'Project.is_terminology()',\n # which means it checks the value of 'checkstyle' field\n # (see pootle_project/models.py:240).\n #\n # This should probably be replaced in the future with a dedicated\n # project property.\n return self.get(language=language_id,\n project__checkstyle='terminology')\n\n def live(self):\n \"\"\"Filters translation projects that have non-obsolete directories.\"\"\"\n return self.filter(directory__obsolete=False)\n\n def for_user(self, user, select_related=None):\n \"\"\"Filters translation projects for a specific user.\n\n - Admins always get all translation projects.\n - Regular users only get enabled translation projects\n accessible to them.\n\n :param user: The user for whom the translation projects need to be\n retrieved for.\n :return: A filtered queryset with `TranslationProject`s for `user`.\n \"\"\"\n qs = self.live()\n if select_related is not None:\n qs = qs.select_related(*select_related)\n\n if user.is_superuser:\n return qs\n\n return qs.filter(\n project__disabled=False,\n project__code__in=Project.accessible_by_user(user))\n\n def get_for_user(self, user, project_code, language_code,\n select_related=None):\n \"\"\"Gets a `language_code`/`project_code` translation project\n for a specific `user`.\n\n - Admins can get the translation project even\n if its project is disabled.\n - Regular users only get a translation project\n if its project isn't disabled and it is accessible to them.\n\n :param user: The user for whom the translation project needs\n to be retrieved.\n :param project_code: The code of a project for the TP to retrieve.\n :param language_code: The code of the language fro the TP to retrieve.\n :return: The `TranslationProject` matching the params, raises\n otherwise.\n \"\"\"\n return self.for_user(\n user, select_related).get(\n project__code=project_code,\n language__code=language_code)\n\n\nclass TranslationProject(models.Model, CachedTreeItem):\n\n language = models.ForeignKey(\n Language, db_index=True, on_delete=models.CASCADE)\n project = models.ForeignKey(\n Project, db_index=True, on_delete=models.CASCADE)\n real_path = models.FilePathField(editable=False, null=True, blank=True)\n directory = models.OneToOneField(\n Directory, db_index=True, editable=False, on_delete=models.CASCADE)\n pootle_path = models.CharField(max_length=255, null=False, unique=True,\n db_index=True, editable=False)\n creation_time = models.DateTimeField(auto_now_add=True, db_index=True,\n editable=False, null=True)\n revisions = GenericRelation(Revision)\n\n objects = TranslationProjectManager()\n\n class Meta(object):\n unique_together = (\n ('language', 'project'),\n ('project', 'language'))\n db_table = 'pootle_app_translationproject'\n # disabled objects are hidden for related objects too\n base_manager_name = 'objects'\n\n @cached_property\n def code(self):\n return u'-'.join([self.language.code, self.project.code])\n\n @cached_property\n def data_tool(self):\n return data_tool.get(self.__class__)(self)\n\n # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #\n\n @property\n def name(self):\n # TODO: See if `self.fullname` can be removed\n return self.fullname\n\n @property\n def fullname(self):\n return \"%s [%s]\" % (self.project.fullname, self.language.name)\n\n @property\n def abs_real_path(self):\n if self.real_path is not None:\n return absolute_real_path(self.real_path)\n\n @abs_real_path.setter\n def abs_real_path(self, value):\n if value is not None:\n self.real_path = relative_real_path(value)\n else:\n self.real_path = None\n\n @property\n def file_style(self):\n return self.project.get_treestyle()\n\n @property\n def checker(self):\n from translate.filters import checks\n # We do not use default Translate Toolkit checkers; instead use\n # our own one\n if settings.POOTLE_QUALITY_CHECKER:\n from pootle_misc.util import import_func\n checkerclasses = [import_func(settings.POOTLE_QUALITY_CHECKER)]\n else:\n checkerclasses = [\n checks.projectcheckers.get(self.project.checkstyle,\n checks.StandardChecker)\n ]\n\n return checks.TeeChecker(checkerclasses=checkerclasses,\n excludefilters=excluded_filters,\n errorhandler=self.filtererrorhandler,\n languagecode=self.language.code)\n\n @property\n def disabled(self):\n return self.project.disabled\n\n @property\n def is_template_project(self):\n return self == self.project.get_template_translationproject()\n\n # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #\n\n def __unicode__(self):\n return self.pootle_path\n\n def __init__(self, *args, **kwargs):\n super(TranslationProject, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.directory = self.language.directory \\\n .get_or_make_subdir(self.project.code)\n self.pootle_path = self.directory.pootle_path\n\n if self.project.treestyle != 'pootle_fs':\n from pootle_app.project_tree import get_translation_project_dir\n self.abs_real_path = get_translation_project_dir(\n self.language, self.project, self.file_style, make_dirs=not\n self.directory.obsolete)\n else:\n self.abs_real_path = None\n super(TranslationProject, self).save(*args, **kwargs)\n if self.directory.tp_id != self.pk:\n self.directory.tp = self\n self.directory.save()\n\n def delete(self, *args, **kwargs):\n directory = self.directory\n\n super(TranslationProject, self).delete(*args, **kwargs)\n directory.delete()\n\n def get_absolute_url(self):\n return reverse(\n 'pootle-tp-browse',\n args=split_pootle_path(self.pootle_path)[:-1])\n\n def get_translate_url(self, **kwargs):\n return u''.join(\n [reverse(\"pootle-tp-translate\",\n args=split_pootle_path(self.pootle_path)[:-1]),\n get_editor_filter(**kwargs)])\n\n def get_announcement(self, user=None):\n \"\"\"Return the related announcement, if any.\"\"\"\n return StaticPage.get_announcement_for(self.pootle_path, user)\n\n def filtererrorhandler(self, functionname, str1, str2, e):\n logging.error(u\"Error in filter %s: %r, %r, %s\", functionname, str1,\n str2, e)\n return False\n\n def is_accessible_by(self, user):\n \"\"\"Returns `True` if the current translation project is accessible\n by `user`.\n \"\"\"\n if user.is_superuser:\n return True\n\n return self.project.code in Project.accessible_by_user(user)\n\n def can_be_inited_from_templates(self):\n \"\"\"Returns `True` if the current translation project hasn't been\n saved yet and can be initialized from templates.\n \"\"\"\n\n # This method checks if the current translation project directory\n # doesn't exist. So it won't work if the translation project is already\n # saved the database because the translation project directory is\n # auto-created in `save()` method.\n template_tp = self.project.get_template_translationproject()\n return (\n not self.is_template_project\n and template_tp is not None\n and not translation_project_dir_exists(self.language,\n self.project))\n\n def init_from_templates(self):\n \"\"\"Initializes the current translation project files using\n the templates TP ones.\n \"\"\"\n\n template_tp = self.project.get_template_translationproject()\n template_stores = template_tp.stores.live().exclude(file=\"\")\n\n for template_store in template_stores.iterator():\n init_store_from_template(self, template_store)\n\n self.update_from_disk()\n\n def update_from_disk(self, force=False, overwrite=False):\n \"\"\"Update all stores to reflect state on disk.\"\"\"\n changed = False\n\n logging.info(u\"Scanning for new files in %s\", self)\n # Create new, make obsolete in-DB stores to reflect state on disk\n self.scan_files()\n\n stores = self.stores.live().select_related('parent').exclude(file='')\n # Update store content from disk store\n for store in stores.iterator():\n if not store.file:\n continue\n disk_mtime = store.get_file_mtime()\n if not force and disk_mtime == store.file_mtime:\n # The file on disk wasn't changed since the last sync\n logging.debug(u\"File didn't change since last sync, \"\n u\"skipping %s\", store.pootle_path)\n continue\n\n changed = (\n store.updater.update_from_disk(overwrite=overwrite)\n or changed)\n\n return changed\n\n def sync(self, conservative=True, skip_missing=False, only_newer=True):\n \"\"\"Sync unsaved work on all stores to disk\"\"\"\n stores = self.stores.live().exclude(file='').filter(state__gte=PARSED)\n for store in stores.select_related(\"parent\").iterator():\n store.sync(update_structure=not conservative,\n conservative=conservative,\n skip_missing=skip_missing, only_newer=only_newer)\n\n # # # TreeItem\n def get_children(self):\n return self.directory.children\n\n def get_parents(self):\n return [self.project]\n\n # # # /TreeItem\n\n def directory_exists_on_disk(self):\n \"\"\"Checks if the actual directory for the translation project\n exists on disk.\n \"\"\"\n return not does_not_exist(self.abs_real_path)\n\n def scan_files(self):\n \"\"\"Scans the file system and returns a list of translation files.\n \"\"\"\n projects = [p.strip() for p in self.project.ignoredfiles.split(',')]\n ignored_files = set(projects)\n\n filetypes = self.project.filetype_tool\n exts = filetypes.filetype_extensions\n\n # Scan for pots if template project\n if self.is_template_project:\n exts = filetypes.template_extensions\n\n from pootle_app.project_tree import (add_files,\n match_template_filename,\n direct_language_match_filename)\n\n all_files = []\n new_files = []\n\n if self.file_style == 'gnu':\n if self.pootle_path.startswith('/templates/'):\n file_filter = lambda filename: match_template_filename(\n self.project, filename,)\n else:\n file_filter = lambda filename: direct_language_match_filename(\n self.language.code, filename,)\n else:\n file_filter = lambda filename: True\n\n all_files, new_files, __ = add_files(\n self,\n ignored_files,\n exts,\n self.real_path,\n self.directory,\n file_filter,\n )\n\n return all_files, new_files\n\n ###########################################################################\n\n\n@receiver(post_save, sender=Project)\ndef scan_languages(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw or instance.disabled:\n return\n\n if not instance.filetypes.all().exists():\n instance.filetypes.add(Format.objects.get(name=\"po\"))\n\n if instance.treestyle == 'pootle_fs':\n return\n\n for language in Language.objects.iterator():\n tp = create_translation_project(language, instance)\n if tp is not None:\n tp.update_from_disk()\n\n\n@receiver(post_save, sender=Language)\ndef scan_projects(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw:\n return\n\n for project in Project.objects.enabled().iterator():\n tp = create_translation_project(instance, project)\n if tp is not None:\n tp.update_from_disk()\n", "path": "pootle/apps/pootle_translationproject/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import data_tool\nfrom pootle.core.mixins import CachedTreeItem\nfrom pootle.core.url_helpers import get_editor_filter, split_pootle_path\nfrom pootle_app.models.directory import Directory\nfrom pootle_app.project_tree import (does_not_exist, init_store_from_template,\n translation_project_dir_exists)\nfrom pootle_format.models import Format\nfrom pootle_language.models import Language\nfrom pootle_misc.checks import excluded_filters\nfrom pootle_project.models import Project\nfrom pootle_revision.models import Revision\nfrom pootle_store.constants import PARSED\nfrom pootle_store.util import absolute_real_path, relative_real_path\nfrom staticpages.models import StaticPage\n\n\ndef create_or_resurrect_translation_project(language, project):\n tp = create_translation_project(language, project)\n if tp is not None:\n if tp.directory.obsolete:\n tp.directory.obsolete = False\n tp.directory.save()\n logging.info(u\"Resurrected %s\", tp)\n else:\n logging.info(u\"Created %s\", tp)\n\n\ndef create_translation_project(language, project):\n if translation_project_dir_exists(language, project):\n try:\n translation_project, __ = TranslationProject.objects.all() \\\n .get_or_create(language=language, project=project)\n return translation_project\n except OSError:\n return None\n except IndexError:\n return None\n\n\ndef scan_translation_projects(languages=None, projects=None):\n project_query = Project.objects.all()\n\n if projects is not None:\n project_query = project_query.filter(code__in=projects)\n\n for project in project_query.iterator():\n if does_not_exist(project.get_real_path()):\n logging.info(u\"Disabling %s\", project)\n project.disabled = True\n project.save()\n else:\n lang_query = Language.objects.exclude(\n id__in=project.translationproject_set.live().values_list('language',\n flat=True))\n if languages is not None:\n lang_query = lang_query.filter(code__in=languages)\n\n for language in lang_query.iterator():\n create_or_resurrect_translation_project(language, project)\n\n\nclass TranslationProjectManager(models.Manager):\n\n def get_terminology_project(self, language_id):\n # FIXME: the code below currently uses the same approach to determine\n # the 'terminology' kind of a project as 'Project.is_terminology()',\n # which means it checks the value of 'checkstyle' field\n # (see pootle_project/models.py:240).\n #\n # This should probably be replaced in the future with a dedicated\n # project property.\n return self.get(language=language_id,\n project__checkstyle='terminology')\n\n def live(self):\n \"\"\"Filters translation projects that have non-obsolete directories.\"\"\"\n return self.filter(directory__obsolete=False)\n\n def for_user(self, user, select_related=None):\n \"\"\"Filters translation projects for a specific user.\n\n - Admins always get all translation projects.\n - Regular users only get enabled translation projects\n accessible to them.\n\n :param user: The user for whom the translation projects need to be\n retrieved for.\n :return: A filtered queryset with `TranslationProject`s for `user`.\n \"\"\"\n qs = self.live()\n if select_related is not None:\n qs = qs.select_related(*select_related)\n\n if user.is_superuser:\n return qs\n\n return qs.filter(\n project__disabled=False,\n project__code__in=Project.accessible_by_user(user))\n\n def get_for_user(self, user, project_code, language_code,\n select_related=None):\n \"\"\"Gets a `language_code`/`project_code` translation project\n for a specific `user`.\n\n - Admins can get the translation project even\n if its project is disabled.\n - Regular users only get a translation project\n if its project isn't disabled and it is accessible to them.\n\n :param user: The user for whom the translation project needs\n to be retrieved.\n :param project_code: The code of a project for the TP to retrieve.\n :param language_code: The code of the language fro the TP to retrieve.\n :return: The `TranslationProject` matching the params, raises\n otherwise.\n \"\"\"\n return self.for_user(\n user, select_related).get(\n project__code=project_code,\n language__code=language_code)\n\n\nclass TranslationProject(models.Model, CachedTreeItem):\n\n language = models.ForeignKey(\n Language, db_index=True, on_delete=models.CASCADE)\n project = models.ForeignKey(\n Project, db_index=True, on_delete=models.CASCADE)\n real_path = models.FilePathField(editable=False, null=True, blank=True)\n directory = models.OneToOneField(\n Directory, db_index=True, editable=False, on_delete=models.CASCADE)\n pootle_path = models.CharField(max_length=255, null=False, unique=True,\n db_index=True, editable=False)\n creation_time = models.DateTimeField(auto_now_add=True, db_index=True,\n editable=False, null=True)\n revisions = GenericRelation(Revision)\n\n objects = TranslationProjectManager()\n\n class Meta(object):\n unique_together = (\n ('language', 'project'),\n ('project', 'language'))\n db_table = 'pootle_app_translationproject'\n # disabled objects are hidden for related objects too\n base_manager_name = 'objects'\n\n @cached_property\n def code(self):\n return u'-'.join([self.language.code, self.project.code])\n\n @cached_property\n def data_tool(self):\n return data_tool.get(self.__class__)(self)\n\n # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #\n\n @property\n def name(self):\n # TODO: See if `self.fullname` can be removed\n return self.fullname\n\n @property\n def fullname(self):\n return \"%s [%s]\" % (self.project.fullname, self.language.name)\n\n @property\n def abs_real_path(self):\n if self.real_path is not None:\n return absolute_real_path(self.real_path)\n\n @abs_real_path.setter\n def abs_real_path(self, value):\n if value is not None:\n self.real_path = relative_real_path(value)\n else:\n self.real_path = None\n\n @property\n def file_style(self):\n return self.project.get_treestyle()\n\n @property\n def checker(self):\n from translate.filters import checks\n # We do not use default Translate Toolkit checkers; instead use\n # our own one\n if settings.POOTLE_QUALITY_CHECKER:\n from pootle_misc.util import import_func\n checkerclasses = [import_func(settings.POOTLE_QUALITY_CHECKER)]\n else:\n checkerclasses = [\n checks.projectcheckers.get(self.project.checkstyle,\n checks.StandardChecker)\n ]\n\n return checks.TeeChecker(checkerclasses=checkerclasses,\n excludefilters=excluded_filters,\n errorhandler=self.filtererrorhandler,\n languagecode=self.language.code)\n\n @property\n def disabled(self):\n return self.project.disabled\n\n @property\n def is_template_project(self):\n return self == self.project.get_template_translationproject()\n\n # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #\n\n def __unicode__(self):\n return self.pootle_path\n\n def __init__(self, *args, **kwargs):\n super(TranslationProject, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.directory = self.language.directory \\\n .get_or_make_subdir(self.project.code)\n self.pootle_path = self.directory.pootle_path\n\n if self.project.treestyle != 'pootle_fs':\n from pootle_app.project_tree import get_translation_project_dir\n self.abs_real_path = get_translation_project_dir(\n self.language, self.project, self.file_style, make_dirs=not\n self.directory.obsolete)\n else:\n self.abs_real_path = None\n super(TranslationProject, self).save(*args, **kwargs)\n if self.directory.tp_id != self.pk:\n self.directory.tp = self\n self.directory.save()\n\n def delete(self, *args, **kwargs):\n directory = self.directory\n\n super(TranslationProject, self).delete(*args, **kwargs)\n directory.delete()\n\n def get_absolute_url(self):\n return reverse(\n 'pootle-tp-browse',\n args=split_pootle_path(self.pootle_path)[:-1])\n\n def get_translate_url(self, **kwargs):\n return u''.join(\n [reverse(\"pootle-tp-translate\",\n args=split_pootle_path(self.pootle_path)[:-1]),\n get_editor_filter(**kwargs)])\n\n def get_announcement(self, user=None):\n \"\"\"Return the related announcement, if any.\"\"\"\n return StaticPage.get_announcement_for(self.pootle_path, user)\n\n def filtererrorhandler(self, functionname, str1, str2, e):\n logging.error(u\"Error in filter %s: %r, %r, %s\", functionname, str1,\n str2, e)\n return False\n\n def is_accessible_by(self, user):\n \"\"\"Returns `True` if the current translation project is accessible\n by `user`.\n \"\"\"\n if user.is_superuser:\n return True\n\n return self.project.code in Project.accessible_by_user(user)\n\n def can_be_inited_from_templates(self):\n \"\"\"Returns `True` if the current translation project hasn't been\n saved yet and can be initialized from templates.\n \"\"\"\n\n # This method checks if the current translation project directory\n # doesn't exist. So it won't work if the translation project is already\n # saved the database because the translation project directory is\n # auto-created in `save()` method.\n template_tp = self.project.get_template_translationproject()\n return (\n not self.is_template_project\n and template_tp is not None\n and not translation_project_dir_exists(self.language,\n self.project))\n\n def init_from_templates(self):\n \"\"\"Initializes the current translation project files using\n the templates TP ones.\n \"\"\"\n\n template_tp = self.project.get_template_translationproject()\n template_stores = template_tp.stores.live().exclude(file=\"\")\n\n for template_store in template_stores.iterator():\n init_store_from_template(self, template_store)\n\n self.update_from_disk()\n\n def update_from_disk(self, force=False, overwrite=False):\n \"\"\"Update all stores to reflect state on disk.\"\"\"\n changed = False\n\n logging.info(u\"Scanning for new files in %s\", self)\n # Create new, make obsolete in-DB stores to reflect state on disk\n self.scan_files()\n\n stores = self.stores.live().select_related('parent').exclude(file='')\n # Update store content from disk store\n for store in stores.iterator():\n if not store.file:\n continue\n disk_mtime = store.get_file_mtime()\n if not force and disk_mtime == store.file_mtime:\n # The file on disk wasn't changed since the last sync\n logging.debug(u\"File didn't change since last sync, \"\n u\"skipping %s\", store.pootle_path)\n continue\n\n changed = (\n store.updater.update_from_disk(overwrite=overwrite)\n or changed)\n\n return changed\n\n def sync(self, conservative=True, skip_missing=False, only_newer=True):\n \"\"\"Sync unsaved work on all stores to disk\"\"\"\n stores = self.stores.live().exclude(file='').filter(state__gte=PARSED)\n for store in stores.select_related(\"parent\").iterator():\n store.sync(update_structure=not conservative,\n conservative=conservative,\n skip_missing=skip_missing, only_newer=only_newer)\n\n # # # TreeItem\n def get_children(self):\n return self.directory.children\n\n def get_parents(self):\n return [self.project]\n\n # # # /TreeItem\n\n def directory_exists_on_disk(self):\n \"\"\"Checks if the actual directory for the translation project\n exists on disk.\n \"\"\"\n return not does_not_exist(self.abs_real_path)\n\n def scan_files(self):\n \"\"\"Scans the file system and returns a list of translation files.\n \"\"\"\n projects = [p.strip() for p in self.project.ignoredfiles.split(',')]\n ignored_files = set(projects)\n\n filetypes = self.project.filetype_tool\n exts = filetypes.filetype_extensions\n\n # Scan for pots if template project\n if self.is_template_project:\n exts = filetypes.template_extensions\n\n from pootle_app.project_tree import (add_files,\n match_template_filename,\n direct_language_match_filename)\n\n all_files = []\n new_files = []\n\n if self.file_style == 'gnu':\n if self.pootle_path.startswith('/templates/'):\n file_filter = lambda filename: match_template_filename(\n self.project, filename,)\n else:\n file_filter = lambda filename: direct_language_match_filename(\n self.language.code, filename,)\n else:\n file_filter = lambda filename: True\n\n all_files, new_files, __ = add_files(\n self,\n ignored_files,\n exts,\n self.real_path,\n self.directory,\n file_filter,\n )\n\n return all_files, new_files\n\n ###########################################################################\n\n\n@receiver(post_save, sender=Project)\ndef scan_languages(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw or instance.disabled:\n return\n\n if not instance.filetypes.all().exists():\n instance.filetypes.add(Format.objects.get(name=\"po\"))\n\n if instance.treestyle == 'pootle_fs':\n return\n\n for language in Language.objects.iterator():\n tp = create_translation_project(language, instance)\n if tp is not None:\n tp.update_from_disk()\n\n\n@receiver(post_save, sender=Language)\ndef scan_projects(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw:\n return\n\n old_style_projects = Project.objects.enabled().exclude(\n treestyle=\"pootle_fs\")\n\n for project in old_style_projects.iterator():\n tp = create_translation_project(instance, project)\n if tp is not None:\n tp.update_from_disk()\n", "path": "pootle/apps/pootle_translationproject/models.py"}]} |
gh_patches_debug_1440 | rasdani/github-patches | git_diff | nonebot__nonebot2-430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: mirai adapter调用upload_image上传图片报错
**描述问题:**
mirai adapter调用upload_image上传图片报错:
```powershell
httpx.HTTPStatusError: 400 Client Error: Bad Request for url: http://127.0.0.1:8000/uploadImage
For more information check: https://httpstatuses.com/400
```
**如何复现?**
```python
with open('file.png', 'rb') as f:
img = BytesIO(f.read())
img_id = await bot.upload_image('group', img)
```
**期望的结果**
```
{'imageId': '{******-****-FD90-491D-141D77303EE5}.png', 'url': 'http://gchat.qpic.cn/gchatpic_new/*********', 'path': ''}
```
**环境信息:**
- OS: Windows10
- Python Version: 3.8.2
- Nonebot Version: 2.0.0a13.post1
- Mirai Version: 2.7-M1
- mirai-api-http Version: 1.12.0
**截图**

**原因**
mah v-1.8.4 的uploadImage api需要提供sessionKey。
经过测试,mah 版本1.12.0,在增加sessionKey后能返回预期结果,
是个新手,代码写的不好,就不提pr了。
> ### [图片文件上传](https://github.com/zxsean/mirai-api-http/blob/master/docs/API.md#%E5%9B%BE%E7%89%87%E6%96%87%E4%BB%B6%E4%B8%8A%E4%BC%A0)
>
> ```
> [POST] /uploadImage
> ```
>
> 使用此方法上传图片文件至服务器并返回ImageId
>
> #### 请求
>
> Content-Type:multipart/form-data
>
> | 名字 | 类型 | 可选 | 举例 | 说明 |
> | ---------- | ------ | ----- | ----------- | ----------------------------- |
> | sessionKey | String | false | YourSession | 已经激活的Session |
> | type | String | false | "friend " | "friend" 或 "group" 或 "temp" |
> | img | File | false | - | 图片文件 |
在mah v-2.0的http adapter中就不需要了
> ### [图片文件上传](https://github.com/project-mirai/mirai-api-http/blob/master/docs/adapter/HttpAdapter.md#%E5%9B%BE%E7%89%87%E6%96%87%E4%BB%B6%E4%B8%8A%E4%BC%A0)
>
> 使用此方法上传图片文件至服务器并返回ImageId
>
> ```
> [POST] /uploadImage
> ```
>
> **本接口为[POST]请求, 参数格式为`multipart/form-data`**
>
> #### 请求:
>
> | 名字 | 类型 | 可选 | 举例 | 说明 |
> | ---------- | ------ | ----- | ----------- | ----------------------------- |
> | sessionKey | String | true | YourSession | 已经激活的Session |
> | type | String | false | "friend" | "friend" 或 "group" 或 "temp" |
> | img | File | false | - | 图片文件 |
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py`
Content:
```
1 from datetime import datetime, timedelta
2 from io import BytesIO
3 from ipaddress import IPv4Address
4 from typing import Any, Dict, List, NoReturn, Optional, Tuple, Union
5
6 import httpx
7
8 from nonebot.config import Config
9 from nonebot.typing import overrides
10 from nonebot.adapters import Bot as BaseBot
11 from nonebot.exception import ApiNotAvailable
12 from nonebot.drivers import Driver, HTTPConnection, HTTPResponse, WebSocket
13
14 from .config import Config as MiraiConfig
15 from .event import Event, FriendMessage, GroupMessage, TempMessage
16 from .message import MessageChain, MessageSegment
17 from .utils import Log, argument_validation, catch_network_error, process_event
18
19
20 class SessionManager:
21 """Bot会话管理器, 提供API主动调用接口"""
22 sessions: Dict[int, Tuple[str, datetime, httpx.AsyncClient]] = {}
23 session_expiry: timedelta = timedelta(minutes=15)
24
25 def __init__(self, session_key: str, client: httpx.AsyncClient):
26 self.session_key, self.client = session_key, client
27
28 @catch_network_error
29 async def post(self,
30 path: str,
31 *,
32 params: Optional[Dict[str, Any]] = None) -> Any:
33 """
34 :说明:
35
36 以POST方式主动提交API请求
37
38 :参数:
39
40 * ``path: str``: 对应API路径
41 * ``params: Optional[Dict[str, Any]]``: 请求参数 (无需sessionKey)
42
43 :返回:
44
45 - ``Dict[str, Any]``: API 返回值
46 """
47 response = await self.client.post(
48 path,
49 json={
50 **(params or {}),
51 'sessionKey': self.session_key,
52 },
53 timeout=3,
54 )
55 response.raise_for_status()
56 return response.json()
57
58 @catch_network_error
59 async def request(self,
60 path: str,
61 *,
62 params: Optional[Dict[str, Any]] = None) -> Any:
63 """
64 :说明:
65
66 以GET方式主动提交API请求
67
68 :参数:
69
70 * ``path: str``: 对应API路径
71 * ``params: Optional[Dict[str, Any]]``: 请求参数 (无需sessionKey)
72 """
73 response = await self.client.get(
74 path,
75 params={
76 **(params or {}),
77 'sessionKey': self.session_key,
78 },
79 timeout=3,
80 )
81 response.raise_for_status()
82 return response.json()
83
84 @catch_network_error
85 async def upload(self, path: str, *, params: Dict[str, Any]) -> Any:
86 """
87 :说明:
88
89 以表单(``multipart/form-data``)形式主动提交API请求
90
91 :参数:
92
93 * ``path: str``: 对应API路径
94 * ``params: Dict[str, Any]``: 请求参数 (无需sessionKey)
95 """
96 files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}
97 form = {k: v for k, v in params.items() if k not in files}
98 response = await self.client.post(
99 path,
100 data=form,
101 files=files,
102 timeout=6,
103 )
104 response.raise_for_status()
105 return response.json()
106
107 @classmethod
108 async def new(cls, self_id: int, *, host: IPv4Address, port: int,
109 auth_key: str) -> "SessionManager":
110 session = cls.get(self_id)
111 if session is not None:
112 return session
113
114 client = httpx.AsyncClient(base_url=f'http://{host}:{port}')
115 response = await client.post('/auth', json={'authKey': auth_key})
116 response.raise_for_status()
117 auth = response.json()
118 assert auth['code'] == 0
119 session_key = auth['session']
120 response = await client.post('/verify',
121 json={
122 'sessionKey': session_key,
123 'qq': self_id
124 })
125 assert response.json()['code'] == 0
126 cls.sessions[self_id] = session_key, datetime.now(), client
127
128 return cls(session_key, client)
129
130 @classmethod
131 def get(cls,
132 self_id: int,
133 check_expire: bool = True) -> Optional["SessionManager"]:
134 if self_id not in cls.sessions:
135 return None
136 key, time, client = cls.sessions[self_id]
137 if check_expire and (datetime.now() - time > cls.session_expiry):
138 return None
139 return cls(key, client)
140
141
142 class Bot(BaseBot):
143 r"""
144 mirai-api-http 协议 Bot 适配。
145
146 \:\:\: warning
147 API中为了使代码更加整洁, 我们采用了与PEP8相符的命名规则取代Mirai原有的驼峰命名
148
149 部分字段可能与文档在符号上不一致
150 \:\:\:
151
152 """
153
154 @property
155 @overrides(BaseBot)
156 def type(self) -> str:
157 return "mirai"
158
159 @property
160 def alive(self) -> bool:
161 assert isinstance(self.request, WebSocket)
162 return not self.request.closed
163
164 @property
165 def api(self) -> SessionManager:
166 """返回该Bot对象的会话管理实例以提供API主动调用"""
167 api = SessionManager.get(self_id=int(self.self_id))
168 assert api is not None, 'SessionManager has not been initialized'
169 return api
170
171 @classmethod
172 @overrides(BaseBot)
173 async def check_permission(
174 cls, driver: Driver,
175 request: HTTPConnection) -> Tuple[Optional[str], HTTPResponse]:
176 if isinstance(request, WebSocket):
177 return None, HTTPResponse(
178 501, b'Websocket connection is not implemented')
179 self_id: Optional[str] = request.headers.get('bot')
180 if self_id is None:
181 return None, HTTPResponse(400, b'Header `Bot` is required.')
182 self_id = str(self_id).strip()
183 await SessionManager.new(
184 int(self_id),
185 host=cls.mirai_config.host, # type: ignore
186 port=cls.mirai_config.port, #type: ignore
187 auth_key=cls.mirai_config.auth_key) # type: ignore
188 return self_id, HTTPResponse(204, b'')
189
190 @classmethod
191 @overrides(BaseBot)
192 def register(cls, driver: Driver, config: "Config"):
193 cls.mirai_config = MiraiConfig(**config.dict())
194 if (cls.mirai_config.auth_key and cls.mirai_config.host and
195 cls.mirai_config.port) is None:
196 raise ApiNotAvailable('mirai')
197 super().register(driver, config)
198
199 @overrides(BaseBot)
200 async def handle_message(self, message: dict):
201 Log.debug(f'received message {message}')
202 try:
203 await process_event(
204 bot=self,
205 event=Event.new({
206 **message,
207 'self_id': self.self_id,
208 }),
209 )
210 except Exception as e:
211 Log.error(f'Failed to handle message: {message}', e)
212
213 @overrides(BaseBot)
214 async def _call_api(self, api: str, **data) -> NoReturn:
215 raise NotImplementedError
216
217 @overrides(BaseBot)
218 async def call_api(self, api: str, **data) -> NoReturn:
219 r"""
220 \:\:\: danger
221 由于Mirai的HTTP API特殊性, 该API暂时无法实现
222 \:\:\:
223
224 \:\:\: tip
225 你可以使用 ``MiraiBot.api`` 中提供的调用方法来代替
226 \:\:\:
227 """
228 raise NotImplementedError
229
230 @overrides(BaseBot)
231 def __getattr__(self, key: str) -> NoReturn:
232 """由于Mirai的HTTP API特殊性, 该API暂时无法实现"""
233 raise NotImplementedError
234
235 @overrides(BaseBot)
236 @argument_validation
237 async def send(self,
238 event: Event,
239 message: Union[MessageChain, MessageSegment, str],
240 at_sender: bool = False):
241 """
242 :说明:
243
244 根据 ``event`` 向触发事件的主体发送信息
245
246 :参数:
247
248 * ``event: Event``: Event对象
249 * ``message: Union[MessageChain, MessageSegment, str]``: 要发送的消息
250 * ``at_sender: bool``: 是否 @ 事件主体
251 """
252 if not isinstance(message, MessageChain):
253 message = MessageChain(message)
254 if isinstance(event, FriendMessage):
255 return await self.send_friend_message(target=event.sender.id,
256 message_chain=message)
257 elif isinstance(event, GroupMessage):
258 if at_sender:
259 message = MessageSegment.at(event.sender.id) + message
260 return await self.send_group_message(group=event.sender.group.id,
261 message_chain=message)
262 elif isinstance(event, TempMessage):
263 return await self.send_temp_message(qq=event.sender.id,
264 group=event.sender.group.id,
265 message_chain=message)
266 else:
267 raise ValueError(f'Unsupported event type {event!r}.')
268
269 @argument_validation
270 async def send_friend_message(self, target: int,
271 message_chain: MessageChain):
272 """
273 :说明:
274
275 使用此方法向指定好友发送消息
276
277 :参数:
278
279 * ``target: int``: 发送消息目标好友的 QQ 号
280 * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组
281 """
282 return await self.api.post('sendFriendMessage',
283 params={
284 'target': target,
285 'messageChain': message_chain.export()
286 })
287
288 @argument_validation
289 async def send_temp_message(self, qq: int, group: int,
290 message_chain: MessageChain):
291 """
292 :说明:
293
294 使用此方法向临时会话对象发送消息
295
296 :参数:
297
298 * ``qq: int``: 临时会话对象 QQ 号
299 * ``group: int``: 临时会话群号
300 * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组
301 """
302 return await self.api.post('sendTempMessage',
303 params={
304 'qq': qq,
305 'group': group,
306 'messageChain': message_chain.export()
307 })
308
309 @argument_validation
310 async def send_group_message(self,
311 group: int,
312 message_chain: MessageChain,
313 quote: Optional[int] = None):
314 """
315 :说明:
316
317 使用此方法向指定群发送消息
318
319 :参数:
320
321 * ``group: int``: 发送消息目标群的群号
322 * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组
323 * ``quote: Optional[int]``: 引用一条消息的 message_id 进行回复
324 """
325 return await self.api.post('sendGroupMessage',
326 params={
327 'group': group,
328 'messageChain': message_chain.export(),
329 'quote': quote
330 })
331
332 @argument_validation
333 async def recall(self, target: int):
334 """
335 :说明:
336
337 使用此方法撤回指定消息。对于bot发送的消息,有2分钟时间限制。对于撤回群聊中群员的消息,需要有相应权限
338
339 :参数:
340
341 * ``target: int``: 需要撤回的消息的message_id
342 """
343 return await self.api.post('recall', params={'target': target})
344
345 @argument_validation
346 async def send_image_message(self, target: int, qq: int, group: int,
347 urls: List[str]) -> List[str]:
348 """
349 :说明:
350
351 使用此方法向指定对象(群或好友)发送图片消息
352 除非需要通过此手段获取image_id,否则不推荐使用该接口
353
354 > 当qq和group同时存在时,表示发送临时会话图片,qq为临时会话对象QQ号,group为临时会话发起的群号
355
356 :参数:
357
358 * ``target: int``: 发送对象的QQ号或群号,可能存在歧义
359 * ``qq: int``: 发送对象的QQ号
360 * ``group: int``: 发送对象的群号
361 * ``urls: List[str]``: 是一个url字符串构成的数组
362
363 :返回:
364
365 - ``List[str]``: 一个包含图片imageId的数组
366 """
367 return await self.api.post('sendImageMessage',
368 params={
369 'target': target,
370 'qq': qq,
371 'group': group,
372 'urls': urls
373 })
374
375 @argument_validation
376 async def upload_image(self, type: str, img: BytesIO):
377 """
378 :说明:
379
380 使用此方法上传图片文件至服务器并返回Image_id
381
382 :参数:
383
384 * ``type: str``: "friend" 或 "group" 或 "temp"
385 * ``img: BytesIO``: 图片的BytesIO对象
386 """
387 return await self.api.upload('uploadImage',
388 params={
389 'type': type,
390 'img': img
391 })
392
393 @argument_validation
394 async def upload_voice(self, type: str, voice: BytesIO):
395 """
396 :说明:
397
398 使用此方法上传语音文件至服务器并返回voice_id
399
400 :参数:
401
402 * ``type: str``: 当前仅支持 "group"
403 * ``voice: BytesIO``: 语音的BytesIO对象
404 """
405 return await self.api.upload('uploadVoice',
406 params={
407 'type': type,
408 'voice': voice
409 })
410
411 @argument_validation
412 async def fetch_message(self, count: int = 10):
413 """
414 :说明:
415
416 使用此方法获取bot接收到的最老消息和最老各类事件
417 (会从MiraiApiHttp消息记录中删除)
418
419 :参数:
420
421 * ``count: int``: 获取消息和事件的数量
422 """
423 return await self.api.request('fetchMessage', params={'count': count})
424
425 @argument_validation
426 async def fetch_latest_message(self, count: int = 10):
427 """
428 :说明:
429
430 使用此方法获取bot接收到的最新消息和最新各类事件
431 (会从MiraiApiHttp消息记录中删除)
432
433 :参数:
434
435 * ``count: int``: 获取消息和事件的数量
436 """
437 return await self.api.request('fetchLatestMessage',
438 params={'count': count})
439
440 @argument_validation
441 async def peek_message(self, count: int = 10):
442 """
443 :说明:
444
445 使用此方法获取bot接收到的最老消息和最老各类事件
446 (不会从MiraiApiHttp消息记录中删除)
447
448 :参数:
449
450 * ``count: int``: 获取消息和事件的数量
451 """
452 return await self.api.request('peekMessage', params={'count': count})
453
454 @argument_validation
455 async def peek_latest_message(self, count: int = 10):
456 """
457 :说明:
458
459 使用此方法获取bot接收到的最新消息和最新各类事件
460 (不会从MiraiApiHttp消息记录中删除)
461
462 :参数:
463
464 * ``count: int``: 获取消息和事件的数量
465 """
466 return await self.api.request('peekLatestMessage',
467 params={'count': count})
468
469 @argument_validation
470 async def messsage_from_id(self, id: int):
471 """
472 :说明:
473
474 通过messageId获取一条被缓存的消息
475 使用此方法获取bot接收到的消息和各类事件
476
477 :参数:
478
479 * ``id: int``: 获取消息的message_id
480 """
481 return await self.api.request('messageFromId', params={'id': id})
482
483 @argument_validation
484 async def count_message(self):
485 """
486 :说明:
487
488 使用此方法获取bot接收并缓存的消息总数,注意不包含被删除的
489 """
490 return await self.api.request('countMessage')
491
492 @argument_validation
493 async def friend_list(self) -> List[Dict[str, Any]]:
494 """
495 :说明:
496
497 使用此方法获取bot的好友列表
498
499 :返回:
500
501 - ``List[Dict[str, Any]]``: 返回的好友列表数据
502 """
503 return await self.api.request('friendList')
504
505 @argument_validation
506 async def group_list(self) -> List[Dict[str, Any]]:
507 """
508 :说明:
509
510 使用此方法获取bot的群列表
511
512 :返回:
513
514 - ``List[Dict[str, Any]]``: 返回的群列表数据
515 """
516 return await self.api.request('groupList')
517
518 @argument_validation
519 async def member_list(self, target: int) -> List[Dict[str, Any]]:
520 """
521 :说明:
522
523 使用此方法获取bot指定群种的成员列表
524
525 :参数:
526
527 * ``target: int``: 指定群的群号
528
529 :返回:
530
531 - ``List[Dict[str, Any]]``: 返回的群成员列表数据
532 """
533 return await self.api.request('memberList', params={'target': target})
534
535 @argument_validation
536 async def mute(self, target: int, member_id: int, time: int):
537 """
538 :说明:
539
540 使用此方法指定群禁言指定群员(需要有相关权限)
541
542 :参数:
543
544 * ``target: int``: 指定群的群号
545 * ``member_id: int``: 指定群员QQ号
546 * ``time: int``: 禁言时长,单位为秒,最多30天
547 """
548 return await self.api.post('mute',
549 params={
550 'target': target,
551 'memberId': member_id,
552 'time': time
553 })
554
555 @argument_validation
556 async def unmute(self, target: int, member_id: int):
557 """
558 :说明:
559
560 使用此方法指定群解除群成员禁言(需要有相关权限)
561
562 :参数:
563
564 * ``target: int``: 指定群的群号
565 * ``member_id: int``: 指定群员QQ号
566 """
567 return await self.api.post('unmute',
568 params={
569 'target': target,
570 'memberId': member_id
571 })
572
573 @argument_validation
574 async def kick(self, target: int, member_id: int, msg: str):
575 """
576 :说明:
577
578 使用此方法移除指定群成员(需要有相关权限)
579
580 :参数:
581
582 * ``target: int``: 指定群的群号
583 * ``member_id: int``: 指定群员QQ号
584 * ``msg: str``: 信息
585 """
586 return await self.api.post('kick',
587 params={
588 'target': target,
589 'memberId': member_id,
590 'msg': msg
591 })
592
593 @argument_validation
594 async def quit(self, target: int):
595 """
596 :说明:
597
598 使用此方法使Bot退出群聊
599
600 :参数:
601
602 * ``target: int``: 退出的群号
603 """
604 return await self.api.post('quit', params={'target': target})
605
606 @argument_validation
607 async def mute_all(self, target: int):
608 """
609 :说明:
610
611 使用此方法令指定群进行全体禁言(需要有相关权限)
612
613 :参数:
614
615 * ``target: int``: 指定群的群号
616 """
617 return await self.api.post('muteAll', params={'target': target})
618
619 @argument_validation
620 async def unmute_all(self, target: int):
621 """
622 :说明:
623
624 使用此方法令指定群解除全体禁言(需要有相关权限)
625
626 :参数:
627
628 * ``target: int``: 指定群的群号
629 """
630 return await self.api.post('unmuteAll', params={'target': target})
631
632 @argument_validation
633 async def group_config(self, target: int):
634 """
635 :说明:
636
637 使用此方法获取群设置
638
639 :参数:
640
641 * ``target: int``: 指定群的群号
642
643 :返回:
644
645 .. code-block:: json
646
647 {
648 "name": "群名称",
649 "announcement": "群公告",
650 "confessTalk": true,
651 "allowMemberInvite": true,
652 "autoApprove": true,
653 "anonymousChat": true
654 }
655 """
656 return await self.api.request('groupConfig', params={'target': target})
657
658 @argument_validation
659 async def modify_group_config(self, target: int, config: Dict[str, Any]):
660 """
661 :说明:
662
663 使用此方法修改群设置(需要有相关权限)
664
665 :参数:
666
667 * ``target: int``: 指定群的群号
668 * ``config: Dict[str, Any]``: 群设置, 格式见 ``group_config`` 的返回值
669 """
670 return await self.api.post('groupConfig',
671 params={
672 'target': target,
673 'config': config
674 })
675
676 @argument_validation
677 async def member_info(self, target: int, member_id: int):
678 """
679 :说明:
680
681 使用此方法获取群员资料
682
683 :参数:
684
685 * ``target: int``: 指定群的群号
686 * ``member_id: int``: 群员QQ号
687
688 :返回:
689
690 .. code-block:: json
691
692 {
693 "name": "群名片",
694 "specialTitle": "群头衔"
695 }
696 """
697 return await self.api.request('memberInfo',
698 params={
699 'target': target,
700 'memberId': member_id
701 })
702
703 @argument_validation
704 async def modify_member_info(self, target: int, member_id: int,
705 info: Dict[str, Any]):
706 """
707 :说明:
708
709 使用此方法修改群员资料(需要有相关权限)
710
711 :参数:
712
713 * ``target: int``: 指定群的群号
714 * ``member_id: int``: 群员QQ号
715 * ``info: Dict[str, Any]``: 群员资料, 格式见 ``member_info`` 的返回值
716 """
717 return await self.api.post('memberInfo',
718 params={
719 'target': target,
720 'memberId': member_id,
721 'info': info
722 })
723
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py
--- a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py
+++ b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py
@@ -95,6 +95,7 @@
"""
files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}
form = {k: v for k, v in params.items() if k not in files}
+ form['sessionKey'] = self.session_key
response = await self.client.post(
path,
data=form,
| {"golden_diff": "diff --git a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py\n--- a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py\n+++ b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py\n@@ -95,6 +95,7 @@\n \"\"\"\n files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}\n form = {k: v for k, v in params.items() if k not in files}\n+ form['sessionKey'] = self.session_key\n response = await self.client.post(\n path,\n data=form,\n", "issue": "Bug: mirai adapter\u8c03\u7528upload_image\u4e0a\u4f20\u56fe\u7247\u62a5\u9519\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\nmirai adapter\u8c03\u7528upload_image\u4e0a\u4f20\u56fe\u7247\u62a5\u9519\uff1a\r\n\r\n```powershell\r\nhttpx.HTTPStatusError: 400 Client Error: Bad Request for url: http://127.0.0.1:8000/uploadImage\r\nFor more information check: https://httpstatuses.com/400\r\n```\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n```python\r\nwith open('file.png', 'rb') as f:\r\n img = BytesIO(f.read())\r\nimg_id = await bot.upload_image('group', img)\r\n```\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n```\r\n{'imageId': '{******-****-FD90-491D-141D77303EE5}.png', 'url': 'http://gchat.qpic.cn/gchatpic_new/*********', 'path': ''}\r\n```\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: Windows10\r\n - Python Version: 3.8.2\r\n - Nonebot Version: 2.0.0a13.post1\r\n - Mirai Version: 2.7-M1\r\n - mirai-api-http Version: 1.12.0\r\n\r\n**\u622a\u56fe**\r\n\r\n\r\n\r\n\r\n**\u539f\u56e0**\r\n\r\nmah v-1.8.4 \u7684uploadImage api\u9700\u8981\u63d0\u4f9bsessionKey\u3002\r\n\u7ecf\u8fc7\u6d4b\u8bd5\uff0cmah \u7248\u672c1.12.0\uff0c\u5728\u589e\u52a0sessionKey\u540e\u80fd\u8fd4\u56de\u9884\u671f\u7ed3\u679c\uff0c\r\n\u662f\u4e2a\u65b0\u624b\uff0c\u4ee3\u7801\u5199\u7684\u4e0d\u597d\uff0c\u5c31\u4e0d\u63d0pr\u4e86\u3002\r\n> ### [\u56fe\u7247\u6587\u4ef6\u4e0a\u4f20](https://github.com/zxsean/mirai-api-http/blob/master/docs/API.md#%E5%9B%BE%E7%89%87%E6%96%87%E4%BB%B6%E4%B8%8A%E4%BC%A0)\r\n>\r\n> ```\r\n> [POST] /uploadImage\r\n> ```\r\n>\r\n> \u4f7f\u7528\u6b64\u65b9\u6cd5\u4e0a\u4f20\u56fe\u7247\u6587\u4ef6\u81f3\u670d\u52a1\u5668\u5e76\u8fd4\u56deImageId\r\n>\r\n> #### \u8bf7\u6c42\r\n>\r\n> Content-Type\uff1amultipart/form-data\r\n>\r\n> | \u540d\u5b57 | \u7c7b\u578b | \u53ef\u9009 | \u4e3e\u4f8b | \u8bf4\u660e |\r\n> | ---------- | ------ | ----- | ----------- | ----------------------------- |\r\n> | sessionKey | String | false | YourSession | \u5df2\u7ecf\u6fc0\u6d3b\u7684Session |\r\n> | type | String | false | \"friend \" | \"friend\" \u6216 \"group\" \u6216 \"temp\" |\r\n> | img | File | false | - | \u56fe\u7247\u6587\u4ef6 |\r\n\r\n\r\n\r\n\u5728mah v-2.0\u7684http adapter\u4e2d\u5c31\u4e0d\u9700\u8981\u4e86\r\n\r\n> ### [\u56fe\u7247\u6587\u4ef6\u4e0a\u4f20](https://github.com/project-mirai/mirai-api-http/blob/master/docs/adapter/HttpAdapter.md#%E5%9B%BE%E7%89%87%E6%96%87%E4%BB%B6%E4%B8%8A%E4%BC%A0)\r\n>\r\n> \u4f7f\u7528\u6b64\u65b9\u6cd5\u4e0a\u4f20\u56fe\u7247\u6587\u4ef6\u81f3\u670d\u52a1\u5668\u5e76\u8fd4\u56deImageId\r\n>\r\n> ```\r\n> [POST] /uploadImage\r\n> ```\r\n>\r\n> **\u672c\u63a5\u53e3\u4e3a[POST]\u8bf7\u6c42, \u53c2\u6570\u683c\u5f0f\u4e3a`multipart/form-data`**\r\n>\r\n> #### \u8bf7\u6c42:\r\n>\r\n> | \u540d\u5b57 | \u7c7b\u578b | \u53ef\u9009 | \u4e3e\u4f8b | \u8bf4\u660e |\r\n> | ---------- | ------ | ----- | ----------- | ----------------------------- |\r\n> | sessionKey | String | true | YourSession | \u5df2\u7ecf\u6fc0\u6d3b\u7684Session |\r\n> | type | String | false | \"friend\" | \"friend\" \u6216 \"group\" \u6216 \"temp\" |\r\n> | img | File | false | - | \u56fe\u7247\u6587\u4ef6 |\r\n\r\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom io import BytesIO\nfrom ipaddress import IPv4Address\nfrom typing import Any, Dict, List, NoReturn, Optional, Tuple, Union\n\nimport httpx\n\nfrom nonebot.config import Config\nfrom nonebot.typing import overrides\nfrom nonebot.adapters import Bot as BaseBot\nfrom nonebot.exception import ApiNotAvailable\nfrom nonebot.drivers import Driver, HTTPConnection, HTTPResponse, WebSocket\n\nfrom .config import Config as MiraiConfig\nfrom .event import Event, FriendMessage, GroupMessage, TempMessage\nfrom .message import MessageChain, MessageSegment\nfrom .utils import Log, argument_validation, catch_network_error, process_event\n\n\nclass SessionManager:\n \"\"\"Bot\u4f1a\u8bdd\u7ba1\u7406\u5668, \u63d0\u4f9bAPI\u4e3b\u52a8\u8c03\u7528\u63a5\u53e3\"\"\"\n sessions: Dict[int, Tuple[str, datetime, httpx.AsyncClient]] = {}\n session_expiry: timedelta = timedelta(minutes=15)\n\n def __init__(self, session_key: str, client: httpx.AsyncClient):\n self.session_key, self.client = session_key, client\n\n @catch_network_error\n async def post(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4ee5POST\u65b9\u5f0f\u4e3b\u52a8\u63d0\u4ea4API\u8bf7\u6c42\n\n :\u53c2\u6570:\n\n * ``path: str``: \u5bf9\u5e94API\u8def\u5f84\n * ``params: Optional[Dict[str, Any]]``: \u8bf7\u6c42\u53c2\u6570 (\u65e0\u9700sessionKey)\n\n :\u8fd4\u56de:\n\n - ``Dict[str, Any]``: API \u8fd4\u56de\u503c\n \"\"\"\n response = await self.client.post(\n path,\n json={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def request(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4ee5GET\u65b9\u5f0f\u4e3b\u52a8\u63d0\u4ea4API\u8bf7\u6c42\n\n :\u53c2\u6570:\n\n * ``path: str``: \u5bf9\u5e94API\u8def\u5f84\n * ``params: Optional[Dict[str, Any]]``: \u8bf7\u6c42\u53c2\u6570 (\u65e0\u9700sessionKey)\n \"\"\"\n response = await self.client.get(\n path,\n params={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def upload(self, path: str, *, params: Dict[str, Any]) -> Any:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4ee5\u8868\u5355(``multipart/form-data``)\u5f62\u5f0f\u4e3b\u52a8\u63d0\u4ea4API\u8bf7\u6c42\n\n :\u53c2\u6570:\n\n * ``path: str``: \u5bf9\u5e94API\u8def\u5f84\n * ``params: Dict[str, Any]``: \u8bf7\u6c42\u53c2\u6570 (\u65e0\u9700sessionKey)\n \"\"\"\n files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}\n form = {k: v for k, v in params.items() if k not in files}\n response = await self.client.post(\n path,\n data=form,\n files=files,\n timeout=6,\n )\n response.raise_for_status()\n return response.json()\n\n @classmethod\n async def new(cls, self_id: int, *, host: IPv4Address, port: int,\n auth_key: str) -> \"SessionManager\":\n session = cls.get(self_id)\n if session is not None:\n return session\n\n client = httpx.AsyncClient(base_url=f'http://{host}:{port}')\n response = await client.post('/auth', json={'authKey': auth_key})\n response.raise_for_status()\n auth = response.json()\n assert auth['code'] == 0\n session_key = auth['session']\n response = await client.post('/verify',\n json={\n 'sessionKey': session_key,\n 'qq': self_id\n })\n assert response.json()['code'] == 0\n cls.sessions[self_id] = session_key, datetime.now(), client\n\n return cls(session_key, client)\n\n @classmethod\n def get(cls,\n self_id: int,\n check_expire: bool = True) -> Optional[\"SessionManager\"]:\n if self_id not in cls.sessions:\n return None\n key, time, client = cls.sessions[self_id]\n if check_expire and (datetime.now() - time > cls.session_expiry):\n return None\n return cls(key, client)\n\n\nclass Bot(BaseBot):\n r\"\"\"\n mirai-api-http \u534f\u8bae Bot \u9002\u914d\u3002\n\n \\:\\:\\: warning\n API\u4e2d\u4e3a\u4e86\u4f7f\u4ee3\u7801\u66f4\u52a0\u6574\u6d01, \u6211\u4eec\u91c7\u7528\u4e86\u4e0ePEP8\u76f8\u7b26\u7684\u547d\u540d\u89c4\u5219\u53d6\u4ee3Mirai\u539f\u6709\u7684\u9a7c\u5cf0\u547d\u540d\n\n \u90e8\u5206\u5b57\u6bb5\u53ef\u80fd\u4e0e\u6587\u6863\u5728\u7b26\u53f7\u4e0a\u4e0d\u4e00\u81f4\n \\:\\:\\:\n\n \"\"\"\n\n @property\n @overrides(BaseBot)\n def type(self) -> str:\n return \"mirai\"\n\n @property\n def alive(self) -> bool:\n assert isinstance(self.request, WebSocket)\n return not self.request.closed\n\n @property\n def api(self) -> SessionManager:\n \"\"\"\u8fd4\u56de\u8be5Bot\u5bf9\u8c61\u7684\u4f1a\u8bdd\u7ba1\u7406\u5b9e\u4f8b\u4ee5\u63d0\u4f9bAPI\u4e3b\u52a8\u8c03\u7528\"\"\"\n api = SessionManager.get(self_id=int(self.self_id))\n assert api is not None, 'SessionManager has not been initialized'\n return api\n\n @classmethod\n @overrides(BaseBot)\n async def check_permission(\n cls, driver: Driver,\n request: HTTPConnection) -> Tuple[Optional[str], HTTPResponse]:\n if isinstance(request, WebSocket):\n return None, HTTPResponse(\n 501, b'Websocket connection is not implemented')\n self_id: Optional[str] = request.headers.get('bot')\n if self_id is None:\n return None, HTTPResponse(400, b'Header `Bot` is required.')\n self_id = str(self_id).strip()\n await SessionManager.new(\n int(self_id),\n host=cls.mirai_config.host, # type: ignore\n port=cls.mirai_config.port, #type: ignore\n auth_key=cls.mirai_config.auth_key) # type: ignore\n return self_id, HTTPResponse(204, b'')\n\n @classmethod\n @overrides(BaseBot)\n def register(cls, driver: Driver, config: \"Config\"):\n cls.mirai_config = MiraiConfig(**config.dict())\n if (cls.mirai_config.auth_key and cls.mirai_config.host and\n cls.mirai_config.port) is None:\n raise ApiNotAvailable('mirai')\n super().register(driver, config)\n\n @overrides(BaseBot)\n async def handle_message(self, message: dict):\n Log.debug(f'received message {message}')\n try:\n await process_event(\n bot=self,\n event=Event.new({\n **message,\n 'self_id': self.self_id,\n }),\n )\n except Exception as e:\n Log.error(f'Failed to handle message: {message}', e)\n\n @overrides(BaseBot)\n async def _call_api(self, api: str, **data) -> NoReturn:\n raise NotImplementedError\n\n @overrides(BaseBot)\n async def call_api(self, api: str, **data) -> NoReturn:\n r\"\"\"\n \\:\\:\\: danger\n \u7531\u4e8eMirai\u7684HTTP API\u7279\u6b8a\u6027, \u8be5API\u6682\u65f6\u65e0\u6cd5\u5b9e\u73b0\n \\:\\:\\:\n\n \\:\\:\\: tip\n \u4f60\u53ef\u4ee5\u4f7f\u7528 ``MiraiBot.api`` \u4e2d\u63d0\u4f9b\u7684\u8c03\u7528\u65b9\u6cd5\u6765\u4ee3\u66ff\n \\:\\:\\:\n \"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n def __getattr__(self, key: str) -> NoReturn:\n \"\"\"\u7531\u4e8eMirai\u7684HTTP API\u7279\u6b8a\u6027, \u8be5API\u6682\u65f6\u65e0\u6cd5\u5b9e\u73b0\"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n @argument_validation\n async def send(self,\n event: Event,\n message: Union[MessageChain, MessageSegment, str],\n at_sender: bool = False):\n \"\"\"\n :\u8bf4\u660e:\n\n \u6839\u636e ``event`` \u5411\u89e6\u53d1\u4e8b\u4ef6\u7684\u4e3b\u4f53\u53d1\u9001\u4fe1\u606f\n\n :\u53c2\u6570:\n\n * ``event: Event``: Event\u5bf9\u8c61\n * ``message: Union[MessageChain, MessageSegment, str]``: \u8981\u53d1\u9001\u7684\u6d88\u606f\n * ``at_sender: bool``: \u662f\u5426 @ \u4e8b\u4ef6\u4e3b\u4f53\n \"\"\"\n if not isinstance(message, MessageChain):\n message = MessageChain(message)\n if isinstance(event, FriendMessage):\n return await self.send_friend_message(target=event.sender.id,\n message_chain=message)\n elif isinstance(event, GroupMessage):\n if at_sender:\n message = MessageSegment.at(event.sender.id) + message\n return await self.send_group_message(group=event.sender.group.id,\n message_chain=message)\n elif isinstance(event, TempMessage):\n return await self.send_temp_message(qq=event.sender.id,\n group=event.sender.group.id,\n message_chain=message)\n else:\n raise ValueError(f'Unsupported event type {event!r}.')\n\n @argument_validation\n async def send_friend_message(self, target: int,\n message_chain: MessageChain):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u6307\u5b9a\u597d\u53cb\u53d1\u9001\u6d88\u606f\n\n :\u53c2\u6570:\n\n * ``target: int``: \u53d1\u9001\u6d88\u606f\u76ee\u6807\u597d\u53cb\u7684 QQ \u53f7\n * ``message_chain: MessageChain``: \u6d88\u606f\u94fe\uff0c\u662f\u4e00\u4e2a\u6d88\u606f\u5bf9\u8c61\u6784\u6210\u7684\u6570\u7ec4\n \"\"\"\n return await self.api.post('sendFriendMessage',\n params={\n 'target': target,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_temp_message(self, qq: int, group: int,\n message_chain: MessageChain):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u4e34\u65f6\u4f1a\u8bdd\u5bf9\u8c61\u53d1\u9001\u6d88\u606f\n\n :\u53c2\u6570:\n\n * ``qq: int``: \u4e34\u65f6\u4f1a\u8bdd\u5bf9\u8c61 QQ \u53f7\n * ``group: int``: \u4e34\u65f6\u4f1a\u8bdd\u7fa4\u53f7\n * ``message_chain: MessageChain``: \u6d88\u606f\u94fe\uff0c\u662f\u4e00\u4e2a\u6d88\u606f\u5bf9\u8c61\u6784\u6210\u7684\u6570\u7ec4\n \"\"\"\n return await self.api.post('sendTempMessage',\n params={\n 'qq': qq,\n 'group': group,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_group_message(self,\n group: int,\n message_chain: MessageChain,\n quote: Optional[int] = None):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u6307\u5b9a\u7fa4\u53d1\u9001\u6d88\u606f\n\n :\u53c2\u6570:\n\n * ``group: int``: \u53d1\u9001\u6d88\u606f\u76ee\u6807\u7fa4\u7684\u7fa4\u53f7\n * ``message_chain: MessageChain``: \u6d88\u606f\u94fe\uff0c\u662f\u4e00\u4e2a\u6d88\u606f\u5bf9\u8c61\u6784\u6210\u7684\u6570\u7ec4\n * ``quote: Optional[int]``: \u5f15\u7528\u4e00\u6761\u6d88\u606f\u7684 message_id \u8fdb\u884c\u56de\u590d\n \"\"\"\n return await self.api.post('sendGroupMessage',\n params={\n 'group': group,\n 'messageChain': message_chain.export(),\n 'quote': quote\n })\n\n @argument_validation\n async def recall(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u64a4\u56de\u6307\u5b9a\u6d88\u606f\u3002\u5bf9\u4e8ebot\u53d1\u9001\u7684\u6d88\u606f\uff0c\u67092\u5206\u949f\u65f6\u95f4\u9650\u5236\u3002\u5bf9\u4e8e\u64a4\u56de\u7fa4\u804a\u4e2d\u7fa4\u5458\u7684\u6d88\u606f\uff0c\u9700\u8981\u6709\u76f8\u5e94\u6743\u9650\n\n :\u53c2\u6570:\n\n * ``target: int``: \u9700\u8981\u64a4\u56de\u7684\u6d88\u606f\u7684message_id\n \"\"\"\n return await self.api.post('recall', params={'target': target})\n\n @argument_validation\n async def send_image_message(self, target: int, qq: int, group: int,\n urls: List[str]) -> List[str]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u6307\u5b9a\u5bf9\u8c61\uff08\u7fa4\u6216\u597d\u53cb\uff09\u53d1\u9001\u56fe\u7247\u6d88\u606f\n \u9664\u975e\u9700\u8981\u901a\u8fc7\u6b64\u624b\u6bb5\u83b7\u53d6image_id\uff0c\u5426\u5219\u4e0d\u63a8\u8350\u4f7f\u7528\u8be5\u63a5\u53e3\n\n > \u5f53qq\u548cgroup\u540c\u65f6\u5b58\u5728\u65f6\uff0c\u8868\u793a\u53d1\u9001\u4e34\u65f6\u4f1a\u8bdd\u56fe\u7247\uff0cqq\u4e3a\u4e34\u65f6\u4f1a\u8bdd\u5bf9\u8c61QQ\u53f7\uff0cgroup\u4e3a\u4e34\u65f6\u4f1a\u8bdd\u53d1\u8d77\u7684\u7fa4\u53f7\n\n :\u53c2\u6570:\n\n * ``target: int``: \u53d1\u9001\u5bf9\u8c61\u7684QQ\u53f7\u6216\u7fa4\u53f7\uff0c\u53ef\u80fd\u5b58\u5728\u6b67\u4e49\n * ``qq: int``: \u53d1\u9001\u5bf9\u8c61\u7684QQ\u53f7\n * ``group: int``: \u53d1\u9001\u5bf9\u8c61\u7684\u7fa4\u53f7\n * ``urls: List[str]``: \u662f\u4e00\u4e2aurl\u5b57\u7b26\u4e32\u6784\u6210\u7684\u6570\u7ec4\n\n :\u8fd4\u56de:\n\n - ``List[str]``: \u4e00\u4e2a\u5305\u542b\u56fe\u7247imageId\u7684\u6570\u7ec4\n \"\"\"\n return await self.api.post('sendImageMessage',\n params={\n 'target': target,\n 'qq': qq,\n 'group': group,\n 'urls': urls\n })\n\n @argument_validation\n async def upload_image(self, type: str, img: BytesIO):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4e0a\u4f20\u56fe\u7247\u6587\u4ef6\u81f3\u670d\u52a1\u5668\u5e76\u8fd4\u56deImage_id\n\n :\u53c2\u6570:\n\n * ``type: str``: \"friend\" \u6216 \"group\" \u6216 \"temp\"\n * ``img: BytesIO``: \u56fe\u7247\u7684BytesIO\u5bf9\u8c61\n \"\"\"\n return await self.api.upload('uploadImage',\n params={\n 'type': type,\n 'img': img\n })\n\n @argument_validation\n async def upload_voice(self, type: str, voice: BytesIO):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4e0a\u4f20\u8bed\u97f3\u6587\u4ef6\u81f3\u670d\u52a1\u5668\u5e76\u8fd4\u56devoice_id\n\n :\u53c2\u6570:\n\n * ``type: str``: \u5f53\u524d\u4ec5\u652f\u6301 \"group\"\n * ``voice: BytesIO``: \u8bed\u97f3\u7684BytesIO\u5bf9\u8c61\n \"\"\"\n return await self.api.upload('uploadVoice',\n params={\n 'type': type,\n 'voice': voice\n })\n\n @argument_validation\n async def fetch_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u8001\u6d88\u606f\u548c\u6700\u8001\u5404\u7c7b\u4e8b\u4ef6\n (\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('fetchMessage', params={'count': count})\n\n @argument_validation\n async def fetch_latest_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u65b0\u6d88\u606f\u548c\u6700\u65b0\u5404\u7c7b\u4e8b\u4ef6\n (\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('fetchLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def peek_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u8001\u6d88\u606f\u548c\u6700\u8001\u5404\u7c7b\u4e8b\u4ef6\n (\u4e0d\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('peekMessage', params={'count': count})\n\n @argument_validation\n async def peek_latest_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u65b0\u6d88\u606f\u548c\u6700\u65b0\u5404\u7c7b\u4e8b\u4ef6\n (\u4e0d\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('peekLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def messsage_from_id(self, id: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u901a\u8fc7messageId\u83b7\u53d6\u4e00\u6761\u88ab\u7f13\u5b58\u7684\u6d88\u606f\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6d88\u606f\u548c\u5404\u7c7b\u4e8b\u4ef6\n\n :\u53c2\u6570:\n\n * ``id: int``: \u83b7\u53d6\u6d88\u606f\u7684message_id\n \"\"\"\n return await self.api.request('messageFromId', params={'id': id})\n\n @argument_validation\n async def count_message(self):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5e76\u7f13\u5b58\u7684\u6d88\u606f\u603b\u6570\uff0c\u6ce8\u610f\u4e0d\u5305\u542b\u88ab\u5220\u9664\u7684\n \"\"\"\n return await self.api.request('countMessage')\n\n @argument_validation\n async def friend_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u7684\u597d\u53cb\u5217\u8868\n\n :\u8fd4\u56de:\n\n - ``List[Dict[str, Any]]``: \u8fd4\u56de\u7684\u597d\u53cb\u5217\u8868\u6570\u636e\n \"\"\"\n return await self.api.request('friendList')\n\n @argument_validation\n async def group_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u7684\u7fa4\u5217\u8868\n\n :\u8fd4\u56de:\n\n - ``List[Dict[str, Any]]``: \u8fd4\u56de\u7684\u7fa4\u5217\u8868\u6570\u636e\n \"\"\"\n return await self.api.request('groupList')\n\n @argument_validation\n async def member_list(self, target: int) -> List[Dict[str, Any]]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u6307\u5b9a\u7fa4\u79cd\u7684\u6210\u5458\u5217\u8868\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n\n :\u8fd4\u56de:\n\n - ``List[Dict[str, Any]]``: \u8fd4\u56de\u7684\u7fa4\u6210\u5458\u5217\u8868\u6570\u636e\n \"\"\"\n return await self.api.request('memberList', params={'target': target})\n\n @argument_validation\n async def mute(self, target: int, member_id: int, time: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u6307\u5b9a\u7fa4\u7981\u8a00\u6307\u5b9a\u7fa4\u5458\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u6307\u5b9a\u7fa4\u5458QQ\u53f7\n * ``time: int``: \u7981\u8a00\u65f6\u957f\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6700\u591a30\u5929\n \"\"\"\n return await self.api.post('mute',\n params={\n 'target': target,\n 'memberId': member_id,\n 'time': time\n })\n\n @argument_validation\n async def unmute(self, target: int, member_id: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u6307\u5b9a\u7fa4\u89e3\u9664\u7fa4\u6210\u5458\u7981\u8a00\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u6307\u5b9a\u7fa4\u5458QQ\u53f7\n \"\"\"\n return await self.api.post('unmute',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def kick(self, target: int, member_id: int, msg: str):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u79fb\u9664\u6307\u5b9a\u7fa4\u6210\u5458\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u6307\u5b9a\u7fa4\u5458QQ\u53f7\n * ``msg: str``: \u4fe1\u606f\n \"\"\"\n return await self.api.post('kick',\n params={\n 'target': target,\n 'memberId': member_id,\n 'msg': msg\n })\n\n @argument_validation\n async def quit(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4f7fBot\u9000\u51fa\u7fa4\u804a\n\n :\u53c2\u6570:\n\n * ``target: int``: \u9000\u51fa\u7684\u7fa4\u53f7\n \"\"\"\n return await self.api.post('quit', params={'target': target})\n\n @argument_validation\n async def mute_all(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4ee4\u6307\u5b9a\u7fa4\u8fdb\u884c\u5168\u4f53\u7981\u8a00\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n \"\"\"\n return await self.api.post('muteAll', params={'target': target})\n\n @argument_validation\n async def unmute_all(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4ee4\u6307\u5b9a\u7fa4\u89e3\u9664\u5168\u4f53\u7981\u8a00\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n \"\"\"\n return await self.api.post('unmuteAll', params={'target': target})\n\n @argument_validation\n async def group_config(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6\u7fa4\u8bbe\u7f6e\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n\n :\u8fd4\u56de:\n\n .. code-block:: json\n\n {\n \"name\": \"\u7fa4\u540d\u79f0\",\n \"announcement\": \"\u7fa4\u516c\u544a\",\n \"confessTalk\": true,\n \"allowMemberInvite\": true,\n \"autoApprove\": true,\n \"anonymousChat\": true\n }\n \"\"\"\n return await self.api.request('groupConfig', params={'target': target})\n\n @argument_validation\n async def modify_group_config(self, target: int, config: Dict[str, Any]):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4fee\u6539\u7fa4\u8bbe\u7f6e\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``config: Dict[str, Any]``: \u7fa4\u8bbe\u7f6e, \u683c\u5f0f\u89c1 ``group_config`` \u7684\u8fd4\u56de\u503c\n \"\"\"\n return await self.api.post('groupConfig',\n params={\n 'target': target,\n 'config': config\n })\n\n @argument_validation\n async def member_info(self, target: int, member_id: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6\u7fa4\u5458\u8d44\u6599\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u7fa4\u5458QQ\u53f7\n\n :\u8fd4\u56de:\n\n .. code-block:: json\n\n {\n \"name\": \"\u7fa4\u540d\u7247\",\n \"specialTitle\": \"\u7fa4\u5934\u8854\"\n }\n \"\"\"\n return await self.api.request('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def modify_member_info(self, target: int, member_id: int,\n info: Dict[str, Any]):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4fee\u6539\u7fa4\u5458\u8d44\u6599\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u7fa4\u5458QQ\u53f7\n * ``info: Dict[str, Any]``: \u7fa4\u5458\u8d44\u6599, \u683c\u5f0f\u89c1 ``member_info`` \u7684\u8fd4\u56de\u503c\n \"\"\"\n return await self.api.post('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id,\n 'info': info\n })\n", "path": "packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nfrom io import BytesIO\nfrom ipaddress import IPv4Address\nfrom typing import Any, Dict, List, NoReturn, Optional, Tuple, Union\n\nimport httpx\n\nfrom nonebot.adapters import Bot as BaseBot\nfrom nonebot.config import Config\nfrom nonebot.drivers import Driver, WebSocket\nfrom nonebot.exception import ApiNotAvailable, RequestDenied\nfrom nonebot.typing import overrides\n\nfrom .config import Config as MiraiConfig\nfrom .event import Event, FriendMessage, GroupMessage, TempMessage\nfrom .message import MessageChain, MessageSegment\nfrom .utils import Log, argument_validation, catch_network_error, process_event\n\n\nclass SessionManager:\n \"\"\"Bot\u4f1a\u8bdd\u7ba1\u7406\u5668, \u63d0\u4f9bAPI\u4e3b\u52a8\u8c03\u7528\u63a5\u53e3\"\"\"\n sessions: Dict[int, Tuple[str, datetime, httpx.AsyncClient]] = {}\n session_expiry: timedelta = timedelta(minutes=15)\n\n def __init__(self, session_key: str, client: httpx.AsyncClient):\n self.session_key, self.client = session_key, client\n\n @catch_network_error\n async def post(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4ee5POST\u65b9\u5f0f\u4e3b\u52a8\u63d0\u4ea4API\u8bf7\u6c42\n\n :\u53c2\u6570:\n\n * ``path: str``: \u5bf9\u5e94API\u8def\u5f84\n * ``params: Optional[Dict[str, Any]]``: \u8bf7\u6c42\u53c2\u6570 (\u65e0\u9700sessionKey)\n\n :\u8fd4\u56de:\n\n - ``Dict[str, Any]``: API \u8fd4\u56de\u503c\n \"\"\"\n response = await self.client.post(\n path,\n json={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def request(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4ee5GET\u65b9\u5f0f\u4e3b\u52a8\u63d0\u4ea4API\u8bf7\u6c42\n\n :\u53c2\u6570:\n\n * ``path: str``: \u5bf9\u5e94API\u8def\u5f84\n * ``params: Optional[Dict[str, Any]]``: \u8bf7\u6c42\u53c2\u6570 (\u65e0\u9700sessionKey)\n \"\"\"\n response = await self.client.get(\n path,\n params={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def upload(self, path: str, *, params: Dict[str, Any]) -> Any:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4ee5\u8868\u5355(``multipart/form-data``)\u5f62\u5f0f\u4e3b\u52a8\u63d0\u4ea4API\u8bf7\u6c42\n\n :\u53c2\u6570:\n\n * ``path: str``: \u5bf9\u5e94API\u8def\u5f84\n * ``params: Dict[str, Any]``: \u8bf7\u6c42\u53c2\u6570 (\u65e0\u9700sessionKey)\n \"\"\"\n files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}\n form = {k: v for k, v in params.items() if k not in files}\n form['sessionKey'] = self.session_key\n response = await self.client.post(\n path,\n data=form,\n files=files,\n timeout=6,\n )\n response.raise_for_status()\n return response.json()\n\n @classmethod\n async def new(cls, self_id: int, *, host: IPv4Address, port: int,\n auth_key: str) -> \"SessionManager\":\n session = cls.get(self_id)\n if session is not None:\n return session\n\n client = httpx.AsyncClient(base_url=f'http://{host}:{port}')\n response = await client.post('/auth', json={'authKey': auth_key})\n response.raise_for_status()\n auth = response.json()\n assert auth['code'] == 0\n session_key = auth['session']\n response = await client.post('/verify',\n json={\n 'sessionKey': session_key,\n 'qq': self_id\n })\n assert response.json()['code'] == 0\n cls.sessions[self_id] = session_key, datetime.now(), client\n\n return cls(session_key, client)\n\n @classmethod\n def get(cls,\n self_id: int,\n check_expire: bool = True) -> Optional[\"SessionManager\"]:\n if self_id not in cls.sessions:\n return None\n key, time, client = cls.sessions[self_id]\n if check_expire and (datetime.now() - time > cls.session_expiry):\n return None\n return cls(key, client)\n\n\nclass Bot(BaseBot):\n \"\"\"\n mirai-api-http \u534f\u8bae Bot \u9002\u914d\u3002\n\n \\:\\:\\: warning\n API\u4e2d\u4e3a\u4e86\u4f7f\u4ee3\u7801\u66f4\u52a0\u6574\u6d01, \u6211\u4eec\u91c7\u7528\u4e86\u4e0ePEP8\u76f8\u7b26\u7684\u547d\u540d\u89c4\u5219\u53d6\u4ee3Mirai\u539f\u6709\u7684\u9a7c\u5cf0\u547d\u540d\n\n \u90e8\u5206\u5b57\u6bb5\u53ef\u80fd\u4e0e\u6587\u6863\u5728\u7b26\u53f7\u4e0a\u4e0d\u4e00\u81f4\n \\:\\:\\:\n\n \"\"\"\n\n @overrides(BaseBot)\n def __init__(self,\n connection_type: str,\n self_id: str,\n *,\n websocket: Optional[WebSocket] = None):\n super().__init__(connection_type, self_id, websocket=websocket)\n\n @property\n @overrides(BaseBot)\n def type(self) -> str:\n return \"mirai\"\n\n @property\n def alive(self) -> bool:\n return not self.websocket.closed\n\n @property\n def api(self) -> SessionManager:\n \"\"\"\u8fd4\u56de\u8be5Bot\u5bf9\u8c61\u7684\u4f1a\u8bdd\u7ba1\u7406\u5b9e\u4f8b\u4ee5\u63d0\u4f9bAPI\u4e3b\u52a8\u8c03\u7528\"\"\"\n api = SessionManager.get(self_id=int(self.self_id))\n assert api is not None, 'SessionManager has not been initialized'\n return api\n\n @classmethod\n @overrides(BaseBot)\n async def check_permission(cls, driver: \"Driver\", connection_type: str,\n headers: dict, body: Optional[bytes]) -> str:\n if connection_type == 'ws':\n raise RequestDenied(\n status_code=501,\n reason='Websocket connection is not implemented')\n self_id: Optional[str] = headers.get('bot')\n if self_id is None:\n raise RequestDenied(status_code=400,\n reason='Header `Bot` is required.')\n self_id = str(self_id).strip()\n await SessionManager.new(\n int(self_id),\n host=cls.mirai_config.host, # type: ignore\n port=cls.mirai_config.port, #type: ignore\n auth_key=cls.mirai_config.auth_key) # type: ignore\n return self_id\n\n @classmethod\n @overrides(BaseBot)\n def register(cls, driver: \"Driver\", config: \"Config\"):\n cls.mirai_config = MiraiConfig(**config.dict())\n if (cls.mirai_config.auth_key and cls.mirai_config.host and\n cls.mirai_config.port) is None:\n raise ApiNotAvailable('mirai')\n super().register(driver, config)\n\n @overrides(BaseBot)\n async def handle_message(self, message: dict):\n Log.debug(f'received message {message}')\n try:\n await process_event(\n bot=self,\n event=Event.new({\n **message,\n 'self_id': self.self_id,\n }),\n )\n except Exception as e:\n Log.error(f'Failed to handle message: {message}', e)\n\n @overrides(BaseBot)\n async def _call_api(self, api: str, **data) -> NoReturn:\n raise NotImplementedError\n\n @overrides(BaseBot)\n async def call_api(self, api: str, **data) -> NoReturn:\n \"\"\"\n \\:\\:\\: danger\n \u7531\u4e8eMirai\u7684HTTP API\u7279\u6b8a\u6027, \u8be5API\u6682\u65f6\u65e0\u6cd5\u5b9e\u73b0\n \\:\\:\\:\n\n \\:\\:\\: tip\n \u4f60\u53ef\u4ee5\u4f7f\u7528 ``MiraiBot.api`` \u4e2d\u63d0\u4f9b\u7684\u8c03\u7528\u65b9\u6cd5\u6765\u4ee3\u66ff\n \\:\\:\\:\n \"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n def __getattr__(self, key: str) -> NoReturn:\n \"\"\"\u7531\u4e8eMirai\u7684HTTP API\u7279\u6b8a\u6027, \u8be5API\u6682\u65f6\u65e0\u6cd5\u5b9e\u73b0\"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n @argument_validation\n async def send(self,\n event: Event,\n message: Union[MessageChain, MessageSegment, str],\n at_sender: bool = False):\n \"\"\"\n :\u8bf4\u660e:\n\n \u6839\u636e ``event`` \u5411\u89e6\u53d1\u4e8b\u4ef6\u7684\u4e3b\u4f53\u53d1\u9001\u4fe1\u606f\n\n :\u53c2\u6570:\n\n * ``event: Event``: Event\u5bf9\u8c61\n * ``message: Union[MessageChain, MessageSegment, str]``: \u8981\u53d1\u9001\u7684\u6d88\u606f\n * ``at_sender: bool``: \u662f\u5426 @ \u4e8b\u4ef6\u4e3b\u4f53\n \"\"\"\n if not isinstance(message, MessageChain):\n message = MessageChain(message)\n if isinstance(event, FriendMessage):\n return await self.send_friend_message(target=event.sender.id,\n message_chain=message)\n elif isinstance(event, GroupMessage):\n if at_sender:\n message = MessageSegment.at(event.sender.id) + message\n return await self.send_group_message(group=event.sender.group.id,\n message_chain=message)\n elif isinstance(event, TempMessage):\n return await self.send_temp_message(qq=event.sender.id,\n group=event.sender.group.id,\n message_chain=message)\n else:\n raise ValueError(f'Unsupported event type {event!r}.')\n\n @argument_validation\n async def send_friend_message(self, target: int,\n message_chain: MessageChain):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u6307\u5b9a\u597d\u53cb\u53d1\u9001\u6d88\u606f\n\n :\u53c2\u6570:\n\n * ``target: int``: \u53d1\u9001\u6d88\u606f\u76ee\u6807\u597d\u53cb\u7684 QQ \u53f7\n * ``message_chain: MessageChain``: \u6d88\u606f\u94fe\uff0c\u662f\u4e00\u4e2a\u6d88\u606f\u5bf9\u8c61\u6784\u6210\u7684\u6570\u7ec4\n \"\"\"\n return await self.api.post('sendFriendMessage',\n params={\n 'target': target,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_temp_message(self, qq: int, group: int,\n message_chain: MessageChain):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u4e34\u65f6\u4f1a\u8bdd\u5bf9\u8c61\u53d1\u9001\u6d88\u606f\n\n :\u53c2\u6570:\n\n * ``qq: int``: \u4e34\u65f6\u4f1a\u8bdd\u5bf9\u8c61 QQ \u53f7\n * ``group: int``: \u4e34\u65f6\u4f1a\u8bdd\u7fa4\u53f7\n * ``message_chain: MessageChain``: \u6d88\u606f\u94fe\uff0c\u662f\u4e00\u4e2a\u6d88\u606f\u5bf9\u8c61\u6784\u6210\u7684\u6570\u7ec4\n \"\"\"\n return await self.api.post('sendTempMessage',\n params={\n 'qq': qq,\n 'group': group,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_group_message(self,\n group: int,\n message_chain: MessageChain,\n quote: Optional[int] = None):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u6307\u5b9a\u7fa4\u53d1\u9001\u6d88\u606f\n\n :\u53c2\u6570:\n\n * ``group: int``: \u53d1\u9001\u6d88\u606f\u76ee\u6807\u7fa4\u7684\u7fa4\u53f7\n * ``message_chain: MessageChain``: \u6d88\u606f\u94fe\uff0c\u662f\u4e00\u4e2a\u6d88\u606f\u5bf9\u8c61\u6784\u6210\u7684\u6570\u7ec4\n * ``quote: Optional[int]``: \u5f15\u7528\u4e00\u6761\u6d88\u606f\u7684 message_id \u8fdb\u884c\u56de\u590d\n \"\"\"\n return await self.api.post('sendGroupMessage',\n params={\n 'group': group,\n 'messageChain': message_chain.export(),\n 'quote': quote\n })\n\n @argument_validation\n async def recall(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u64a4\u56de\u6307\u5b9a\u6d88\u606f\u3002\u5bf9\u4e8ebot\u53d1\u9001\u7684\u6d88\u606f\uff0c\u67092\u5206\u949f\u65f6\u95f4\u9650\u5236\u3002\u5bf9\u4e8e\u64a4\u56de\u7fa4\u804a\u4e2d\u7fa4\u5458\u7684\u6d88\u606f\uff0c\u9700\u8981\u6709\u76f8\u5e94\u6743\u9650\n\n :\u53c2\u6570:\n\n * ``target: int``: \u9700\u8981\u64a4\u56de\u7684\u6d88\u606f\u7684message_id\n \"\"\"\n return await self.api.post('recall', params={'target': target})\n\n @argument_validation\n async def send_image_message(self, target: int, qq: int, group: int,\n urls: List[str]) -> List[str]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u5411\u6307\u5b9a\u5bf9\u8c61\uff08\u7fa4\u6216\u597d\u53cb\uff09\u53d1\u9001\u56fe\u7247\u6d88\u606f\n \u9664\u975e\u9700\u8981\u901a\u8fc7\u6b64\u624b\u6bb5\u83b7\u53d6image_id\uff0c\u5426\u5219\u4e0d\u63a8\u8350\u4f7f\u7528\u8be5\u63a5\u53e3\n\n > \u5f53qq\u548cgroup\u540c\u65f6\u5b58\u5728\u65f6\uff0c\u8868\u793a\u53d1\u9001\u4e34\u65f6\u4f1a\u8bdd\u56fe\u7247\uff0cqq\u4e3a\u4e34\u65f6\u4f1a\u8bdd\u5bf9\u8c61QQ\u53f7\uff0cgroup\u4e3a\u4e34\u65f6\u4f1a\u8bdd\u53d1\u8d77\u7684\u7fa4\u53f7\n\n :\u53c2\u6570:\n\n * ``target: int``: \u53d1\u9001\u5bf9\u8c61\u7684QQ\u53f7\u6216\u7fa4\u53f7\uff0c\u53ef\u80fd\u5b58\u5728\u6b67\u4e49\n * ``qq: int``: \u53d1\u9001\u5bf9\u8c61\u7684QQ\u53f7\n * ``group: int``: \u53d1\u9001\u5bf9\u8c61\u7684\u7fa4\u53f7\n * ``urls: List[str]``: \u662f\u4e00\u4e2aurl\u5b57\u7b26\u4e32\u6784\u6210\u7684\u6570\u7ec4\n\n :\u8fd4\u56de:\n\n - ``List[str]``: \u4e00\u4e2a\u5305\u542b\u56fe\u7247imageId\u7684\u6570\u7ec4\n \"\"\"\n return await self.api.post('sendImageMessage',\n params={\n 'target': target,\n 'qq': qq,\n 'group': group,\n 'urls': urls\n })\n\n @argument_validation\n async def upload_image(self, type: str, img: BytesIO):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4e0a\u4f20\u56fe\u7247\u6587\u4ef6\u81f3\u670d\u52a1\u5668\u5e76\u8fd4\u56deImage_id\n\n :\u53c2\u6570:\n\n * ``type: str``: \"friend\" \u6216 \"group\" \u6216 \"temp\"\n * ``img: BytesIO``: \u56fe\u7247\u7684BytesIO\u5bf9\u8c61\n \"\"\"\n return await self.api.upload('uploadImage',\n params={\n 'type': type,\n 'img': img\n })\n\n @argument_validation\n async def upload_voice(self, type: str, voice: BytesIO):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4e0a\u4f20\u8bed\u97f3\u6587\u4ef6\u81f3\u670d\u52a1\u5668\u5e76\u8fd4\u56devoice_id\n\n :\u53c2\u6570:\n\n * ``type: str``: \u5f53\u524d\u4ec5\u652f\u6301 \"group\"\n * ``voice: BytesIO``: \u8bed\u97f3\u7684BytesIO\u5bf9\u8c61\n \"\"\"\n return await self.api.upload('uploadVoice',\n params={\n 'type': type,\n 'voice': voice\n })\n\n @argument_validation\n async def fetch_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u8001\u6d88\u606f\u548c\u6700\u8001\u5404\u7c7b\u4e8b\u4ef6\n (\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('fetchMessage', params={'count': count})\n\n @argument_validation\n async def fetch_latest_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u65b0\u6d88\u606f\u548c\u6700\u65b0\u5404\u7c7b\u4e8b\u4ef6\n (\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('fetchLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def peek_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u8001\u6d88\u606f\u548c\u6700\u8001\u5404\u7c7b\u4e8b\u4ef6\n (\u4e0d\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('peekMessage', params={'count': count})\n\n @argument_validation\n async def peek_latest_message(self, count: int = 10):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6700\u65b0\u6d88\u606f\u548c\u6700\u65b0\u5404\u7c7b\u4e8b\u4ef6\n (\u4e0d\u4f1a\u4eceMiraiApiHttp\u6d88\u606f\u8bb0\u5f55\u4e2d\u5220\u9664)\n\n :\u53c2\u6570:\n\n * ``count: int``: \u83b7\u53d6\u6d88\u606f\u548c\u4e8b\u4ef6\u7684\u6570\u91cf\n \"\"\"\n return await self.api.request('peekLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def messsage_from_id(self, id: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u901a\u8fc7messageId\u83b7\u53d6\u4e00\u6761\u88ab\u7f13\u5b58\u7684\u6d88\u606f\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5230\u7684\u6d88\u606f\u548c\u5404\u7c7b\u4e8b\u4ef6\n\n :\u53c2\u6570:\n\n * ``id: int``: \u83b7\u53d6\u6d88\u606f\u7684message_id\n \"\"\"\n return await self.api.request('messageFromId', params={'id': id})\n\n @argument_validation\n async def count_message(self):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u63a5\u6536\u5e76\u7f13\u5b58\u7684\u6d88\u606f\u603b\u6570\uff0c\u6ce8\u610f\u4e0d\u5305\u542b\u88ab\u5220\u9664\u7684\n \"\"\"\n return await self.api.request('countMessage')\n\n @argument_validation\n async def friend_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u7684\u597d\u53cb\u5217\u8868\n\n :\u8fd4\u56de:\n\n - ``List[Dict[str, Any]]``: \u8fd4\u56de\u7684\u597d\u53cb\u5217\u8868\u6570\u636e\n \"\"\"\n return await self.api.request('friendList')\n\n @argument_validation\n async def group_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u7684\u7fa4\u5217\u8868\n\n :\u8fd4\u56de:\n\n - ``List[Dict[str, Any]]``: \u8fd4\u56de\u7684\u7fa4\u5217\u8868\u6570\u636e\n \"\"\"\n return await self.api.request('groupList')\n\n @argument_validation\n async def member_list(self, target: int) -> List[Dict[str, Any]]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6bot\u6307\u5b9a\u7fa4\u79cd\u7684\u6210\u5458\u5217\u8868\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n\n :\u8fd4\u56de:\n\n - ``List[Dict[str, Any]]``: \u8fd4\u56de\u7684\u7fa4\u6210\u5458\u5217\u8868\u6570\u636e\n \"\"\"\n return await self.api.request('memberList', params={'target': target})\n\n @argument_validation\n async def mute(self, target: int, member_id: int, time: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u6307\u5b9a\u7fa4\u7981\u8a00\u6307\u5b9a\u7fa4\u5458\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u6307\u5b9a\u7fa4\u5458QQ\u53f7\n * ``time: int``: \u7981\u8a00\u65f6\u957f\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6700\u591a30\u5929\n \"\"\"\n return await self.api.post('mute',\n params={\n 'target': target,\n 'memberId': member_id,\n 'time': time\n })\n\n @argument_validation\n async def unmute(self, target: int, member_id: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u6307\u5b9a\u7fa4\u89e3\u9664\u7fa4\u6210\u5458\u7981\u8a00\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u6307\u5b9a\u7fa4\u5458QQ\u53f7\n \"\"\"\n return await self.api.post('unmute',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def kick(self, target: int, member_id: int, msg: str):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u79fb\u9664\u6307\u5b9a\u7fa4\u6210\u5458\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u6307\u5b9a\u7fa4\u5458QQ\u53f7\n * ``msg: str``: \u4fe1\u606f\n \"\"\"\n return await self.api.post('kick',\n params={\n 'target': target,\n 'memberId': member_id,\n 'msg': msg\n })\n\n @argument_validation\n async def quit(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4f7fBot\u9000\u51fa\u7fa4\u804a\n\n :\u53c2\u6570:\n\n * ``target: int``: \u9000\u51fa\u7684\u7fa4\u53f7\n \"\"\"\n return await self.api.post('quit', params={'target': target})\n\n @argument_validation\n async def mute_all(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4ee4\u6307\u5b9a\u7fa4\u8fdb\u884c\u5168\u4f53\u7981\u8a00\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n \"\"\"\n return await self.api.post('muteAll', params={'target': target})\n\n @argument_validation\n async def unmute_all(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4ee4\u6307\u5b9a\u7fa4\u89e3\u9664\u5168\u4f53\u7981\u8a00\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n \"\"\"\n return await self.api.post('unmuteAll', params={'target': target})\n\n @argument_validation\n async def group_config(self, target: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6\u7fa4\u8bbe\u7f6e\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n\n :\u8fd4\u56de:\n\n .. code-block:: json\n\n {\n \"name\": \"\u7fa4\u540d\u79f0\",\n \"announcement\": \"\u7fa4\u516c\u544a\",\n \"confessTalk\": true,\n \"allowMemberInvite\": true,\n \"autoApprove\": true,\n \"anonymousChat\": true\n }\n \"\"\"\n return await self.api.request('groupConfig', params={'target': target})\n\n @argument_validation\n async def modify_group_config(self, target: int, config: Dict[str, Any]):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4fee\u6539\u7fa4\u8bbe\u7f6e\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``config: Dict[str, Any]``: \u7fa4\u8bbe\u7f6e, \u683c\u5f0f\u89c1 ``group_config`` \u7684\u8fd4\u56de\u503c\n \"\"\"\n return await self.api.post('groupConfig',\n params={\n 'target': target,\n 'config': config\n })\n\n @argument_validation\n async def member_info(self, target: int, member_id: int):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u83b7\u53d6\u7fa4\u5458\u8d44\u6599\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u7fa4\u5458QQ\u53f7\n\n :\u8fd4\u56de:\n\n .. code-block:: json\n\n {\n \"name\": \"\u7fa4\u540d\u7247\",\n \"specialTitle\": \"\u7fa4\u5934\u8854\"\n }\n \"\"\"\n return await self.api.request('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def modify_member_info(self, target: int, member_id: int,\n info: Dict[str, Any]):\n \"\"\"\n :\u8bf4\u660e:\n\n \u4f7f\u7528\u6b64\u65b9\u6cd5\u4fee\u6539\u7fa4\u5458\u8d44\u6599\uff08\u9700\u8981\u6709\u76f8\u5173\u6743\u9650\uff09\n\n :\u53c2\u6570:\n\n * ``target: int``: \u6307\u5b9a\u7fa4\u7684\u7fa4\u53f7\n * ``member_id: int``: \u7fa4\u5458QQ\u53f7\n * ``info: Dict[str, Any]``: \u7fa4\u5458\u8d44\u6599, \u683c\u5f0f\u89c1 ``member_info`` \u7684\u8fd4\u56de\u503c\n \"\"\"\n return await self.api.post('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id,\n 'info': info\n })\n", "path": "packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py"}]} |
gh_patches_debug_1441 | rasdani/github-patches | git_diff | conda__conda-5232 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda config stack trace when can't write config file
This situation should be handled nicer. `conda config` doesn't have permission to write the config file.
Thanks.
```
An unexpected error has occurred, please consider sending the
following traceback to the conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Include the output of the command 'conda info' in your report.
Traceback (most recent call last):
File "/opt/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/opt/anaconda/lib/python2.7/site-packages/conda/cli/main.py", line 179, in main
args.func(args, p)
File "/opt/anaconda/lib/python2.7/site-packages/conda/cli/main_config.py", line 339, in execute
with open(rc_path, 'w') as rc:
IOError: [Errno 13] Permission denied: '/opt/anaconda/.condarc'
```
<!---
@huboard:{"order":9.781875224740546e-29,"custom_state":""}
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/cli/main_config.py`
Content:
```
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 from __future__ import absolute_import, division, print_function, unicode_literals
7
8 from argparse import SUPPRESS
9 import collections
10 import json
11 import os
12 from os.path import join
13 import sys
14 from textwrap import wrap
15
16 from .common import Completer, add_parser_json, stdout_json_success
17 from .. import CondaError
18 from .._vendor.auxlib.compat import isiterable
19 from .._vendor.auxlib.entity import EntityEncoder
20 from ..base.constants import CONDA_HOMEPAGE_URL
21 from ..base.context import context
22 from ..common.compat import iteritems, string_types, text_type
23 from ..common.configuration import pretty_list, pretty_map
24 from ..common.constants import NULL
25 from ..common.yaml import yaml_dump, yaml_load
26 from ..config import (rc_bool_keys, rc_list_keys, rc_other, rc_string_keys, sys_rc_path,
27 user_rc_path)
28 from ..exceptions import CondaKeyError, CondaValueError, CouldntParseError
29
30 descr = """
31 Modify configuration values in .condarc. This is modeled after the git
32 config command. Writes to the user .condarc file (%s) by default.
33
34 """ % user_rc_path
35
36 # Note, the extra whitespace in the list keys is on purpose. It's so the
37 # formatting from help2man is still valid YAML (otherwise it line wraps the
38 # keys like "- conda - defaults"). Technically the parser here still won't
39 # recognize it because it removes the indentation, but at least it will be
40 # valid.
41 additional_descr = """
42 See `conda config --describe` or %s/docs/config.html
43 for details on all the options that can go in .condarc.
44
45 Examples:
46
47 Display all configuration values as calculated and compiled:
48
49 conda config --show
50
51 Display all identified configuration sources:
52
53 conda config --show-sources
54
55 Describe all available configuration options:
56
57 conda config --describe
58
59 Add the conda-canary channel:
60
61 conda config --add channels conda-canary
62
63 Set the output verbosity to level 3 (highest):
64
65 conda config --set verbosity 3
66 """ % CONDA_HOMEPAGE_URL
67
68
69 class SingleValueKey(Completer):
70 def _get_items(self):
71 return rc_bool_keys + \
72 rc_string_keys + \
73 ['yes', 'no', 'on', 'off', 'true', 'false']
74
75
76 class ListKey(Completer):
77 def _get_items(self):
78 return rc_list_keys
79
80
81 class BoolOrListKey(Completer):
82 def __contains__(self, other):
83 return other in self.get_items()
84
85 def _get_items(self):
86 return rc_list_keys + rc_bool_keys
87
88
89 def configure_parser(sub_parsers):
90 p = sub_parsers.add_parser(
91 'config',
92 description=descr,
93 help=descr,
94 epilog=additional_descr,
95 )
96 add_parser_json(p)
97
98 # TODO: use argparse.FileType
99 location = p.add_mutually_exclusive_group()
100 location.add_argument(
101 "--system",
102 action="store_true",
103 help="""Write to the system .condarc file ({system}). Otherwise writes to the user
104 config file ({user}).""".format(system=sys_rc_path,
105 user=user_rc_path),
106 )
107 location.add_argument(
108 "--env",
109 action="store_true",
110 help="Write to the active conda environment .condarc file (%s). "
111 "If no environment is active, write to the user config file (%s)."
112 "" % (os.getenv('CONDA_PREFIX', "<no active environment>"), user_rc_path),
113 )
114 location.add_argument(
115 "--file",
116 action="store",
117 help="""Write to the given file. Otherwise writes to the user config file ({user})
118 or the file path given by the 'CONDARC' environment variable, if it is set
119 (default: %(default)s).""".format(user=user_rc_path),
120 default=os.environ.get('CONDARC', user_rc_path)
121 )
122
123 # XXX: Does this really have to be mutually exclusive. I think the below
124 # code will work even if it is a regular group (although combination of
125 # --add and --remove with the same keys will not be well-defined).
126 action = p.add_mutually_exclusive_group(required=True)
127 action.add_argument(
128 "--show",
129 action="store_true",
130 help="Display all configuration values as calculated and compiled.",
131 )
132 action.add_argument(
133 "--show-sources",
134 action="store_true",
135 help="Display all identified configuration sources.",
136 )
137 action.add_argument(
138 "--validate",
139 action="store_true",
140 help="Validate all configuration sources.",
141 )
142 action.add_argument(
143 "--describe",
144 action="store_true",
145 help="Describe available configuration parameters.",
146 )
147 action.add_argument(
148 "--get",
149 nargs='*',
150 action="store",
151 help="Get a configuration value.",
152 default=None,
153 metavar='KEY',
154 choices=BoolOrListKey()
155 )
156 action.add_argument(
157 "--append",
158 nargs=2,
159 action="append",
160 help="""Add one configuration value to the end of a list key.""",
161 default=[],
162 choices=ListKey(),
163 metavar=('KEY', 'VALUE'),
164 )
165 action.add_argument(
166 "--prepend", "--add",
167 nargs=2,
168 action="append",
169 help="""Add one configuration value to the beginning of a list key.""",
170 default=[],
171 choices=ListKey(),
172 metavar=('KEY', 'VALUE'),
173 )
174 action.add_argument(
175 "--set",
176 nargs=2,
177 action="append",
178 help="""Set a boolean or string key""",
179 default=[],
180 choices=SingleValueKey(),
181 metavar=('KEY', 'VALUE'),
182 )
183 action.add_argument(
184 "--remove",
185 nargs=2,
186 action="append",
187 help="""Remove a configuration value from a list key. This removes
188 all instances of the value.""",
189 default=[],
190 metavar=('KEY', 'VALUE'),
191 )
192 action.add_argument(
193 "--remove-key",
194 nargs=1,
195 action="append",
196 help="""Remove a configuration key (and all its values).""",
197 default=[],
198 metavar="KEY",
199 )
200
201 p.add_argument(
202 "-f", "--force",
203 action="store_true",
204 default=NULL,
205 help=SUPPRESS, # TODO: No longer used. Remove in a future release.
206 )
207
208 p.set_defaults(func=execute)
209
210
211 def execute(args, parser):
212 try:
213 execute_config(args, parser)
214 except (CouldntParseError, NotImplementedError) as e:
215 raise CondaError(e)
216
217
218 def format_dict(d):
219 lines = []
220 for k, v in iteritems(d):
221 if isinstance(v, collections.Mapping):
222 if v:
223 lines.append("%s:" % k)
224 lines.append(pretty_map(v))
225 else:
226 lines.append("%s: {}" % k)
227 elif isiterable(v):
228 if v:
229 lines.append("%s:" % k)
230 lines.append(pretty_list(v))
231 else:
232 lines.append("%s: []" % k)
233 else:
234 lines.append("%s: %s" % (k, v if v is not None else "None"))
235 return lines
236
237
238 def execute_config(args, parser):
239 json_warnings = []
240 json_get = {}
241
242 if args.show_sources:
243 if context.json:
244 print(json.dumps(context.collect_all(), sort_keys=True,
245 indent=2, separators=(',', ': ')))
246 else:
247 lines = []
248 for source, reprs in iteritems(context.collect_all()):
249 lines.append("==> %s <==" % source)
250 lines.extend(format_dict(reprs))
251 lines.append('')
252 print('\n'.join(lines))
253 return
254
255 if args.show:
256 from collections import OrderedDict
257
258 d = OrderedDict((key, getattr(context, key))
259 for key in context.list_parameters())
260 if context.json:
261 print(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '),
262 cls=EntityEncoder))
263 else:
264 # coerce channels
265 d['custom_channels'] = {k: text_type(v).replace(k, '') # TODO: the replace here isn't quite right # NOQA
266 for k, v in iteritems(d['custom_channels'])}
267 # TODO: custom_multichannels needs better formatting
268 d['custom_multichannels'] = {k: json.dumps([text_type(c) for c in chnls])
269 for k, chnls in iteritems(d['custom_multichannels'])}
270
271 print('\n'.join(format_dict(d)))
272 context.validate_configuration()
273 return
274
275 if args.describe:
276 paramater_names = context.list_parameters()
277 if context.json:
278 print(json.dumps([context.describe_parameter(name) for name in paramater_names],
279 sort_keys=True, indent=2, separators=(',', ': '),
280 cls=EntityEncoder))
281 else:
282 def clean_element_type(element_types):
283 _types = set()
284 for et in element_types:
285 _types.add('str') if isinstance(et, string_types) else _types.add('%s' % et)
286 return tuple(sorted(_types))
287
288 for name in paramater_names:
289 details = context.describe_parameter(name)
290 aliases = details['aliases']
291 string_delimiter = details.get('string_delimiter')
292 element_types = details['element_types']
293 if details['parameter_type'] == 'primitive':
294 print("%s (%s)" % (name, ', '.join(clean_element_type(element_types))))
295 else:
296 print("%s (%s: %s)" % (name, details['parameter_type'],
297 ', '.join(clean_element_type(element_types))))
298 def_str = ' default: %s' % json.dumps(details['default_value'], indent=2,
299 separators=(',', ': '),
300 cls=EntityEncoder)
301 print('\n '.join(def_str.split('\n')))
302 if aliases:
303 print(" aliases: %s" % ', '.join(aliases))
304 if string_delimiter:
305 print(" string delimiter: '%s'" % string_delimiter)
306 print('\n '.join(wrap(' ' + details['description'], 70)))
307 print()
308 return
309
310 if args.validate:
311 context.validate_all()
312 return
313
314 if args.system:
315 rc_path = sys_rc_path
316 elif args.env:
317 if 'CONDA_PREFIX' in os.environ:
318 rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')
319 else:
320 rc_path = user_rc_path
321 elif args.file:
322 rc_path = args.file
323 else:
324 rc_path = user_rc_path
325
326 # read existing condarc
327 if os.path.exists(rc_path):
328 with open(rc_path, 'r') as fh:
329 rc_config = yaml_load(fh) or {}
330 else:
331 rc_config = {}
332
333 # Get
334 if args.get is not None:
335 context.validate_all()
336 if args.get == []:
337 args.get = sorted(rc_config.keys())
338 for key in args.get:
339 if key not in rc_list_keys + rc_bool_keys + rc_string_keys:
340 if key not in rc_other:
341 message = "unknown key %s" % key
342 if not context.json:
343 print(message, file=sys.stderr)
344 else:
345 json_warnings.append(message)
346 continue
347 if key not in rc_config:
348 continue
349
350 if context.json:
351 json_get[key] = rc_config[key]
352 continue
353
354 if isinstance(rc_config[key], (bool, string_types)):
355 print("--set", key, rc_config[key])
356 else: # assume the key is a list-type
357 # Note, since conda config --add prepends, these are printed in
358 # the reverse order so that entering them in this order will
359 # recreate the same file
360 items = rc_config.get(key, [])
361 numitems = len(items)
362 for q, item in enumerate(reversed(items)):
363 # Use repr so that it can be pasted back in to conda config --add
364 if key == "channels" and q in (0, numitems-1):
365 print("--add", key, repr(item),
366 " # lowest priority" if q == 0 else " # highest priority")
367 else:
368 print("--add", key, repr(item))
369
370 # prepend, append, add
371 for arg, prepend in zip((args.prepend, args.append), (True, False)):
372 sequence_parameters = [p for p in context.list_parameters()
373 if context.describe_parameter(p)['parameter_type'] == 'sequence']
374 for key, item in arg:
375 if key == 'channels' and key not in rc_config:
376 rc_config[key] = ['defaults']
377 if key not in sequence_parameters:
378 raise CondaValueError("Key '%s' is not a known sequence parameter." % key)
379 if not isinstance(rc_config.get(key, []), list):
380 bad = rc_config[key].__class__.__name__
381 raise CouldntParseError("key %r should be a list, not %s." % (key, bad))
382 if key == 'default_channels' and rc_path != sys_rc_path:
383 msg = "'default_channels' is only configurable for system installs"
384 raise NotImplementedError(msg)
385 arglist = rc_config.setdefault(key, [])
386 if item in arglist:
387 # Right now, all list keys should not contain duplicates
388 message = "Warning: '%s' already in '%s' list, moving to the %s" % (
389 item, key, "top" if prepend else "bottom")
390 arglist = rc_config[key] = [p for p in arglist if p != item]
391 if not context.json:
392 print(message, file=sys.stderr)
393 else:
394 json_warnings.append(message)
395 arglist.insert(0 if prepend else len(arglist), item)
396
397 # Set
398 for key, item in args.set:
399 primitive_parameters = [p for p in context.list_parameters()
400 if context.describe_parameter(p)['parameter_type'] == 'primitive']
401 if key not in primitive_parameters:
402 raise CondaValueError("Key '%s' is not a known primitive parameter." % key)
403 value = context.typify_parameter(key, item)
404 rc_config[key] = value
405
406 # Remove
407 for key, item in args.remove:
408 if key not in rc_config:
409 if key != 'channels':
410 raise CondaKeyError(key, "key %r is not in the config file" % key)
411 rc_config[key] = ['defaults']
412 if item not in rc_config[key]:
413 raise CondaKeyError(key, "%r is not in the %r key of the config file" %
414 (item, key))
415 rc_config[key] = [i for i in rc_config[key] if i != item]
416
417 # Remove Key
418 for key, in args.remove_key:
419 if key not in rc_config:
420 raise CondaKeyError(key, "key %r is not in the config file" %
421 key)
422 del rc_config[key]
423
424 # config.rc_keys
425 if not args.get:
426 with open(rc_path, 'w') as rc:
427 rc.write(yaml_dump(rc_config))
428
429 if context.json:
430 stdout_json_success(
431 rc_path=rc_path,
432 warnings=json_warnings,
433 get=json_get
434 )
435 return
436
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/cli/main_config.py b/conda/cli/main_config.py
--- a/conda/cli/main_config.py
+++ b/conda/cli/main_config.py
@@ -423,8 +423,12 @@
# config.rc_keys
if not args.get:
- with open(rc_path, 'w') as rc:
- rc.write(yaml_dump(rc_config))
+ try:
+ with open(rc_path, 'w') as rc:
+ rc.write(yaml_dump(rc_config))
+ except (IOError, OSError) as e:
+ raise CondaError('Cannot write to condarc file at %s\n'
+ 'Caused by %r' % (rc_path, e))
if context.json:
stdout_json_success(
| {"golden_diff": "diff --git a/conda/cli/main_config.py b/conda/cli/main_config.py\n--- a/conda/cli/main_config.py\n+++ b/conda/cli/main_config.py\n@@ -423,8 +423,12 @@\n \n # config.rc_keys\n if not args.get:\n- with open(rc_path, 'w') as rc:\n- rc.write(yaml_dump(rc_config))\n+ try:\n+ with open(rc_path, 'w') as rc:\n+ rc.write(yaml_dump(rc_config))\n+ except (IOError, OSError) as e:\n+ raise CondaError('Cannot write to condarc file at %s\\n'\n+ 'Caused by %r' % (rc_path, e))\n \n if context.json:\n stdout_json_success(\n", "issue": "conda config stack trace when can't write config file\nThis situation should be handled nicer. `conda config` doesn't have permission to write the config file. \n\nThanks.\n\n```\nAn unexpected error has occurred, please consider sending the\nfollowing traceback to the conda GitHub issue tracker at:\n\n https://github.com/conda/conda/issues\n\nInclude the output of the command 'conda info' in your report.\n\n\nTraceback (most recent call last):\n File \"/opt/anaconda/bin/conda\", line 5, in <module>\n sys.exit(main())\n File \"/opt/anaconda/lib/python2.7/site-packages/conda/cli/main.py\", line 179, in main\n args.func(args, p)\n File \"/opt/anaconda/lib/python2.7/site-packages/conda/cli/main_config.py\", line 339, in execute\n with open(rc_path, 'w') as rc:\nIOError: [Errno 13] Permission denied: '/opt/anaconda/.condarc'\n```\n\n<!---\n@huboard:{\"order\":9.781875224740546e-29,\"custom_state\":\"\"}\n-->\n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom argparse import SUPPRESS\nimport collections\nimport json\nimport os\nfrom os.path import join\nimport sys\nfrom textwrap import wrap\n\nfrom .common import Completer, add_parser_json, stdout_json_success\nfrom .. import CondaError\nfrom .._vendor.auxlib.compat import isiterable\nfrom .._vendor.auxlib.entity import EntityEncoder\nfrom ..base.constants import CONDA_HOMEPAGE_URL\nfrom ..base.context import context\nfrom ..common.compat import iteritems, string_types, text_type\nfrom ..common.configuration import pretty_list, pretty_map\nfrom ..common.constants import NULL\nfrom ..common.yaml import yaml_dump, yaml_load\nfrom ..config import (rc_bool_keys, rc_list_keys, rc_other, rc_string_keys, sys_rc_path,\n user_rc_path)\nfrom ..exceptions import CondaKeyError, CondaValueError, CouldntParseError\n\ndescr = \"\"\"\nModify configuration values in .condarc. This is modeled after the git\nconfig command. Writes to the user .condarc file (%s) by default.\n\n\"\"\" % user_rc_path\n\n# Note, the extra whitespace in the list keys is on purpose. It's so the\n# formatting from help2man is still valid YAML (otherwise it line wraps the\n# keys like \"- conda - defaults\"). Technically the parser here still won't\n# recognize it because it removes the indentation, but at least it will be\n# valid.\nadditional_descr = \"\"\"\nSee `conda config --describe` or %s/docs/config.html\nfor details on all the options that can go in .condarc.\n\nExamples:\n\nDisplay all configuration values as calculated and compiled:\n\n conda config --show\n\nDisplay all identified configuration sources:\n\n conda config --show-sources\n\nDescribe all available configuration options:\n\n conda config --describe\n\nAdd the conda-canary channel:\n\n conda config --add channels conda-canary\n\nSet the output verbosity to level 3 (highest):\n\n conda config --set verbosity 3\n\"\"\" % CONDA_HOMEPAGE_URL\n\n\nclass SingleValueKey(Completer):\n def _get_items(self):\n return rc_bool_keys + \\\n rc_string_keys + \\\n ['yes', 'no', 'on', 'off', 'true', 'false']\n\n\nclass ListKey(Completer):\n def _get_items(self):\n return rc_list_keys\n\n\nclass BoolOrListKey(Completer):\n def __contains__(self, other):\n return other in self.get_items()\n\n def _get_items(self):\n return rc_list_keys + rc_bool_keys\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'config',\n description=descr,\n help=descr,\n epilog=additional_descr,\n )\n add_parser_json(p)\n\n # TODO: use argparse.FileType\n location = p.add_mutually_exclusive_group()\n location.add_argument(\n \"--system\",\n action=\"store_true\",\n help=\"\"\"Write to the system .condarc file ({system}). Otherwise writes to the user\n config file ({user}).\"\"\".format(system=sys_rc_path,\n user=user_rc_path),\n )\n location.add_argument(\n \"--env\",\n action=\"store_true\",\n help=\"Write to the active conda environment .condarc file (%s). \"\n \"If no environment is active, write to the user config file (%s).\"\n \"\" % (os.getenv('CONDA_PREFIX', \"<no active environment>\"), user_rc_path),\n )\n location.add_argument(\n \"--file\",\n action=\"store\",\n help=\"\"\"Write to the given file. Otherwise writes to the user config file ({user})\nor the file path given by the 'CONDARC' environment variable, if it is set\n(default: %(default)s).\"\"\".format(user=user_rc_path),\n default=os.environ.get('CONDARC', user_rc_path)\n )\n\n # XXX: Does this really have to be mutually exclusive. I think the below\n # code will work even if it is a regular group (although combination of\n # --add and --remove with the same keys will not be well-defined).\n action = p.add_mutually_exclusive_group(required=True)\n action.add_argument(\n \"--show\",\n action=\"store_true\",\n help=\"Display all configuration values as calculated and compiled.\",\n )\n action.add_argument(\n \"--show-sources\",\n action=\"store_true\",\n help=\"Display all identified configuration sources.\",\n )\n action.add_argument(\n \"--validate\",\n action=\"store_true\",\n help=\"Validate all configuration sources.\",\n )\n action.add_argument(\n \"--describe\",\n action=\"store_true\",\n help=\"Describe available configuration parameters.\",\n )\n action.add_argument(\n \"--get\",\n nargs='*',\n action=\"store\",\n help=\"Get a configuration value.\",\n default=None,\n metavar='KEY',\n choices=BoolOrListKey()\n )\n action.add_argument(\n \"--append\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the end of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--prepend\", \"--add\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the beginning of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--set\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Set a boolean or string key\"\"\",\n default=[],\n choices=SingleValueKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Remove a configuration value from a list key. This removes\n all instances of the value.\"\"\",\n default=[],\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove-key\",\n nargs=1,\n action=\"append\",\n help=\"\"\"Remove a configuration key (and all its values).\"\"\",\n default=[],\n metavar=\"KEY\",\n )\n\n p.add_argument(\n \"-f\", \"--force\",\n action=\"store_true\",\n default=NULL,\n help=SUPPRESS, # TODO: No longer used. Remove in a future release.\n )\n\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n try:\n execute_config(args, parser)\n except (CouldntParseError, NotImplementedError) as e:\n raise CondaError(e)\n\n\ndef format_dict(d):\n lines = []\n for k, v in iteritems(d):\n if isinstance(v, collections.Mapping):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_map(v))\n else:\n lines.append(\"%s: {}\" % k)\n elif isiterable(v):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_list(v))\n else:\n lines.append(\"%s: []\" % k)\n else:\n lines.append(\"%s: %s\" % (k, v if v is not None else \"None\"))\n return lines\n\n\ndef execute_config(args, parser):\n json_warnings = []\n json_get = {}\n\n if args.show_sources:\n if context.json:\n print(json.dumps(context.collect_all(), sort_keys=True,\n indent=2, separators=(',', ': ')))\n else:\n lines = []\n for source, reprs in iteritems(context.collect_all()):\n lines.append(\"==> %s <==\" % source)\n lines.extend(format_dict(reprs))\n lines.append('')\n print('\\n'.join(lines))\n return\n\n if args.show:\n from collections import OrderedDict\n\n d = OrderedDict((key, getattr(context, key))\n for key in context.list_parameters())\n if context.json:\n print(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n # coerce channels\n d['custom_channels'] = {k: text_type(v).replace(k, '') # TODO: the replace here isn't quite right # NOQA\n for k, v in iteritems(d['custom_channels'])}\n # TODO: custom_multichannels needs better formatting\n d['custom_multichannels'] = {k: json.dumps([text_type(c) for c in chnls])\n for k, chnls in iteritems(d['custom_multichannels'])}\n\n print('\\n'.join(format_dict(d)))\n context.validate_configuration()\n return\n\n if args.describe:\n paramater_names = context.list_parameters()\n if context.json:\n print(json.dumps([context.describe_parameter(name) for name in paramater_names],\n sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n def clean_element_type(element_types):\n _types = set()\n for et in element_types:\n _types.add('str') if isinstance(et, string_types) else _types.add('%s' % et)\n return tuple(sorted(_types))\n\n for name in paramater_names:\n details = context.describe_parameter(name)\n aliases = details['aliases']\n string_delimiter = details.get('string_delimiter')\n element_types = details['element_types']\n if details['parameter_type'] == 'primitive':\n print(\"%s (%s)\" % (name, ', '.join(clean_element_type(element_types))))\n else:\n print(\"%s (%s: %s)\" % (name, details['parameter_type'],\n ', '.join(clean_element_type(element_types))))\n def_str = ' default: %s' % json.dumps(details['default_value'], indent=2,\n separators=(',', ': '),\n cls=EntityEncoder)\n print('\\n '.join(def_str.split('\\n')))\n if aliases:\n print(\" aliases: %s\" % ', '.join(aliases))\n if string_delimiter:\n print(\" string delimiter: '%s'\" % string_delimiter)\n print('\\n '.join(wrap(' ' + details['description'], 70)))\n print()\n return\n\n if args.validate:\n context.validate_all()\n return\n\n if args.system:\n rc_path = sys_rc_path\n elif args.env:\n if 'CONDA_PREFIX' in os.environ:\n rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')\n else:\n rc_path = user_rc_path\n elif args.file:\n rc_path = args.file\n else:\n rc_path = user_rc_path\n\n # read existing condarc\n if os.path.exists(rc_path):\n with open(rc_path, 'r') as fh:\n rc_config = yaml_load(fh) or {}\n else:\n rc_config = {}\n\n # Get\n if args.get is not None:\n context.validate_all()\n if args.get == []:\n args.get = sorted(rc_config.keys())\n for key in args.get:\n if key not in rc_list_keys + rc_bool_keys + rc_string_keys:\n if key not in rc_other:\n message = \"unknown key %s\" % key\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n continue\n if key not in rc_config:\n continue\n\n if context.json:\n json_get[key] = rc_config[key]\n continue\n\n if isinstance(rc_config[key], (bool, string_types)):\n print(\"--set\", key, rc_config[key])\n else: # assume the key is a list-type\n # Note, since conda config --add prepends, these are printed in\n # the reverse order so that entering them in this order will\n # recreate the same file\n items = rc_config.get(key, [])\n numitems = len(items)\n for q, item in enumerate(reversed(items)):\n # Use repr so that it can be pasted back in to conda config --add\n if key == \"channels\" and q in (0, numitems-1):\n print(\"--add\", key, repr(item),\n \" # lowest priority\" if q == 0 else \" # highest priority\")\n else:\n print(\"--add\", key, repr(item))\n\n # prepend, append, add\n for arg, prepend in zip((args.prepend, args.append), (True, False)):\n sequence_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'sequence']\n for key, item in arg:\n if key == 'channels' and key not in rc_config:\n rc_config[key] = ['defaults']\n if key not in sequence_parameters:\n raise CondaValueError(\"Key '%s' is not a known sequence parameter.\" % key)\n if not isinstance(rc_config.get(key, []), list):\n bad = rc_config[key].__class__.__name__\n raise CouldntParseError(\"key %r should be a list, not %s.\" % (key, bad))\n if key == 'default_channels' and rc_path != sys_rc_path:\n msg = \"'default_channels' is only configurable for system installs\"\n raise NotImplementedError(msg)\n arglist = rc_config.setdefault(key, [])\n if item in arglist:\n # Right now, all list keys should not contain duplicates\n message = \"Warning: '%s' already in '%s' list, moving to the %s\" % (\n item, key, \"top\" if prepend else \"bottom\")\n arglist = rc_config[key] = [p for p in arglist if p != item]\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n arglist.insert(0 if prepend else len(arglist), item)\n\n # Set\n for key, item in args.set:\n primitive_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'primitive']\n if key not in primitive_parameters:\n raise CondaValueError(\"Key '%s' is not a known primitive parameter.\" % key)\n value = context.typify_parameter(key, item)\n rc_config[key] = value\n\n # Remove\n for key, item in args.remove:\n if key not in rc_config:\n if key != 'channels':\n raise CondaKeyError(key, \"key %r is not in the config file\" % key)\n rc_config[key] = ['defaults']\n if item not in rc_config[key]:\n raise CondaKeyError(key, \"%r is not in the %r key of the config file\" %\n (item, key))\n rc_config[key] = [i for i in rc_config[key] if i != item]\n\n # Remove Key\n for key, in args.remove_key:\n if key not in rc_config:\n raise CondaKeyError(key, \"key %r is not in the config file\" %\n key)\n del rc_config[key]\n\n # config.rc_keys\n if not args.get:\n with open(rc_path, 'w') as rc:\n rc.write(yaml_dump(rc_config))\n\n if context.json:\n stdout_json_success(\n rc_path=rc_path,\n warnings=json_warnings,\n get=json_get\n )\n return\n", "path": "conda/cli/main_config.py"}], "after_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom argparse import SUPPRESS\nimport collections\nimport json\nimport os\nfrom os.path import join\nimport sys\nfrom textwrap import wrap\n\nfrom .common import Completer, add_parser_json, stdout_json_success\nfrom .. import CondaError\nfrom .._vendor.auxlib.compat import isiterable\nfrom .._vendor.auxlib.entity import EntityEncoder\nfrom ..base.constants import CONDA_HOMEPAGE_URL\nfrom ..base.context import context\nfrom ..common.compat import iteritems, string_types, text_type\nfrom ..common.configuration import pretty_list, pretty_map\nfrom ..common.constants import NULL\nfrom ..common.yaml import yaml_dump, yaml_load\nfrom ..config import (rc_bool_keys, rc_list_keys, rc_other, rc_string_keys, sys_rc_path,\n user_rc_path)\nfrom ..exceptions import CondaKeyError, CondaValueError, CouldntParseError\n\ndescr = \"\"\"\nModify configuration values in .condarc. This is modeled after the git\nconfig command. Writes to the user .condarc file (%s) by default.\n\n\"\"\" % user_rc_path\n\n# Note, the extra whitespace in the list keys is on purpose. It's so the\n# formatting from help2man is still valid YAML (otherwise it line wraps the\n# keys like \"- conda - defaults\"). Technically the parser here still won't\n# recognize it because it removes the indentation, but at least it will be\n# valid.\nadditional_descr = \"\"\"\nSee `conda config --describe` or %s/docs/config.html\nfor details on all the options that can go in .condarc.\n\nExamples:\n\nDisplay all configuration values as calculated and compiled:\n\n conda config --show\n\nDisplay all identified configuration sources:\n\n conda config --show-sources\n\nDescribe all available configuration options:\n\n conda config --describe\n\nAdd the conda-canary channel:\n\n conda config --add channels conda-canary\n\nSet the output verbosity to level 3 (highest):\n\n conda config --set verbosity 3\n\"\"\" % CONDA_HOMEPAGE_URL\n\n\nclass SingleValueKey(Completer):\n def _get_items(self):\n return rc_bool_keys + \\\n rc_string_keys + \\\n ['yes', 'no', 'on', 'off', 'true', 'false']\n\n\nclass ListKey(Completer):\n def _get_items(self):\n return rc_list_keys\n\n\nclass BoolOrListKey(Completer):\n def __contains__(self, other):\n return other in self.get_items()\n\n def _get_items(self):\n return rc_list_keys + rc_bool_keys\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'config',\n description=descr,\n help=descr,\n epilog=additional_descr,\n )\n add_parser_json(p)\n\n # TODO: use argparse.FileType\n location = p.add_mutually_exclusive_group()\n location.add_argument(\n \"--system\",\n action=\"store_true\",\n help=\"\"\"Write to the system .condarc file ({system}). Otherwise writes to the user\n config file ({user}).\"\"\".format(system=sys_rc_path,\n user=user_rc_path),\n )\n location.add_argument(\n \"--env\",\n action=\"store_true\",\n help=\"Write to the active conda environment .condarc file (%s). \"\n \"If no environment is active, write to the user config file (%s).\"\n \"\" % (os.getenv('CONDA_PREFIX', \"<no active environment>\"), user_rc_path),\n )\n location.add_argument(\n \"--file\",\n action=\"store\",\n help=\"\"\"Write to the given file. Otherwise writes to the user config file ({user})\nor the file path given by the 'CONDARC' environment variable, if it is set\n(default: %(default)s).\"\"\".format(user=user_rc_path),\n default=os.environ.get('CONDARC', user_rc_path)\n )\n\n # XXX: Does this really have to be mutually exclusive. I think the below\n # code will work even if it is a regular group (although combination of\n # --add and --remove with the same keys will not be well-defined).\n action = p.add_mutually_exclusive_group(required=True)\n action.add_argument(\n \"--show\",\n action=\"store_true\",\n help=\"Display all configuration values as calculated and compiled.\",\n )\n action.add_argument(\n \"--show-sources\",\n action=\"store_true\",\n help=\"Display all identified configuration sources.\",\n )\n action.add_argument(\n \"--validate\",\n action=\"store_true\",\n help=\"Validate all configuration sources.\",\n )\n action.add_argument(\n \"--describe\",\n action=\"store_true\",\n help=\"Describe available configuration parameters.\",\n )\n action.add_argument(\n \"--get\",\n nargs='*',\n action=\"store\",\n help=\"Get a configuration value.\",\n default=None,\n metavar='KEY',\n choices=BoolOrListKey()\n )\n action.add_argument(\n \"--append\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the end of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--prepend\", \"--add\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the beginning of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--set\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Set a boolean or string key\"\"\",\n default=[],\n choices=SingleValueKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Remove a configuration value from a list key. This removes\n all instances of the value.\"\"\",\n default=[],\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove-key\",\n nargs=1,\n action=\"append\",\n help=\"\"\"Remove a configuration key (and all its values).\"\"\",\n default=[],\n metavar=\"KEY\",\n )\n\n p.add_argument(\n \"-f\", \"--force\",\n action=\"store_true\",\n default=NULL,\n help=SUPPRESS, # TODO: No longer used. Remove in a future release.\n )\n\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n try:\n execute_config(args, parser)\n except (CouldntParseError, NotImplementedError) as e:\n raise CondaError(e)\n\n\ndef format_dict(d):\n lines = []\n for k, v in iteritems(d):\n if isinstance(v, collections.Mapping):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_map(v))\n else:\n lines.append(\"%s: {}\" % k)\n elif isiterable(v):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_list(v))\n else:\n lines.append(\"%s: []\" % k)\n else:\n lines.append(\"%s: %s\" % (k, v if v is not None else \"None\"))\n return lines\n\n\ndef execute_config(args, parser):\n json_warnings = []\n json_get = {}\n\n if args.show_sources:\n if context.json:\n print(json.dumps(context.collect_all(), sort_keys=True,\n indent=2, separators=(',', ': ')))\n else:\n lines = []\n for source, reprs in iteritems(context.collect_all()):\n lines.append(\"==> %s <==\" % source)\n lines.extend(format_dict(reprs))\n lines.append('')\n print('\\n'.join(lines))\n return\n\n if args.show:\n from collections import OrderedDict\n\n d = OrderedDict((key, getattr(context, key))\n for key in context.list_parameters())\n if context.json:\n print(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n # coerce channels\n d['custom_channels'] = {k: text_type(v).replace(k, '') # TODO: the replace here isn't quite right # NOQA\n for k, v in iteritems(d['custom_channels'])}\n # TODO: custom_multichannels needs better formatting\n d['custom_multichannels'] = {k: json.dumps([text_type(c) for c in chnls])\n for k, chnls in iteritems(d['custom_multichannels'])}\n\n print('\\n'.join(format_dict(d)))\n context.validate_configuration()\n return\n\n if args.describe:\n paramater_names = context.list_parameters()\n if context.json:\n print(json.dumps([context.describe_parameter(name) for name in paramater_names],\n sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n def clean_element_type(element_types):\n _types = set()\n for et in element_types:\n _types.add('str') if isinstance(et, string_types) else _types.add('%s' % et)\n return tuple(sorted(_types))\n\n for name in paramater_names:\n details = context.describe_parameter(name)\n aliases = details['aliases']\n string_delimiter = details.get('string_delimiter')\n element_types = details['element_types']\n if details['parameter_type'] == 'primitive':\n print(\"%s (%s)\" % (name, ', '.join(clean_element_type(element_types))))\n else:\n print(\"%s (%s: %s)\" % (name, details['parameter_type'],\n ', '.join(clean_element_type(element_types))))\n def_str = ' default: %s' % json.dumps(details['default_value'], indent=2,\n separators=(',', ': '),\n cls=EntityEncoder)\n print('\\n '.join(def_str.split('\\n')))\n if aliases:\n print(\" aliases: %s\" % ', '.join(aliases))\n if string_delimiter:\n print(\" string delimiter: '%s'\" % string_delimiter)\n print('\\n '.join(wrap(' ' + details['description'], 70)))\n print()\n return\n\n if args.validate:\n context.validate_all()\n return\n\n if args.system:\n rc_path = sys_rc_path\n elif args.env:\n if 'CONDA_PREFIX' in os.environ:\n rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')\n else:\n rc_path = user_rc_path\n elif args.file:\n rc_path = args.file\n else:\n rc_path = user_rc_path\n\n # read existing condarc\n if os.path.exists(rc_path):\n with open(rc_path, 'r') as fh:\n rc_config = yaml_load(fh) or {}\n else:\n rc_config = {}\n\n # Get\n if args.get is not None:\n context.validate_all()\n if args.get == []:\n args.get = sorted(rc_config.keys())\n for key in args.get:\n if key not in rc_list_keys + rc_bool_keys + rc_string_keys:\n if key not in rc_other:\n message = \"unknown key %s\" % key\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n continue\n if key not in rc_config:\n continue\n\n if context.json:\n json_get[key] = rc_config[key]\n continue\n\n if isinstance(rc_config[key], (bool, string_types)):\n print(\"--set\", key, rc_config[key])\n else: # assume the key is a list-type\n # Note, since conda config --add prepends, these are printed in\n # the reverse order so that entering them in this order will\n # recreate the same file\n items = rc_config.get(key, [])\n numitems = len(items)\n for q, item in enumerate(reversed(items)):\n # Use repr so that it can be pasted back in to conda config --add\n if key == \"channels\" and q in (0, numitems-1):\n print(\"--add\", key, repr(item),\n \" # lowest priority\" if q == 0 else \" # highest priority\")\n else:\n print(\"--add\", key, repr(item))\n\n # prepend, append, add\n for arg, prepend in zip((args.prepend, args.append), (True, False)):\n sequence_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'sequence']\n for key, item in arg:\n if key == 'channels' and key not in rc_config:\n rc_config[key] = ['defaults']\n if key not in sequence_parameters:\n raise CondaValueError(\"Key '%s' is not a known sequence parameter.\" % key)\n if not isinstance(rc_config.get(key, []), list):\n bad = rc_config[key].__class__.__name__\n raise CouldntParseError(\"key %r should be a list, not %s.\" % (key, bad))\n if key == 'default_channels' and rc_path != sys_rc_path:\n msg = \"'default_channels' is only configurable for system installs\"\n raise NotImplementedError(msg)\n arglist = rc_config.setdefault(key, [])\n if item in arglist:\n # Right now, all list keys should not contain duplicates\n message = \"Warning: '%s' already in '%s' list, moving to the %s\" % (\n item, key, \"top\" if prepend else \"bottom\")\n arglist = rc_config[key] = [p for p in arglist if p != item]\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n arglist.insert(0 if prepend else len(arglist), item)\n\n # Set\n for key, item in args.set:\n primitive_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'primitive']\n if key not in primitive_parameters:\n raise CondaValueError(\"Key '%s' is not a known primitive parameter.\" % key)\n value = context.typify_parameter(key, item)\n rc_config[key] = value\n\n # Remove\n for key, item in args.remove:\n if key not in rc_config:\n if key != 'channels':\n raise CondaKeyError(key, \"key %r is not in the config file\" % key)\n rc_config[key] = ['defaults']\n if item not in rc_config[key]:\n raise CondaKeyError(key, \"%r is not in the %r key of the config file\" %\n (item, key))\n rc_config[key] = [i for i in rc_config[key] if i != item]\n\n # Remove Key\n for key, in args.remove_key:\n if key not in rc_config:\n raise CondaKeyError(key, \"key %r is not in the config file\" %\n key)\n del rc_config[key]\n\n # config.rc_keys\n if not args.get:\n try:\n with open(rc_path, 'w') as rc:\n rc.write(yaml_dump(rc_config))\n except (IOError, OSError) as e:\n raise CondaError('Cannot write to condarc file at %s\\n'\n 'Caused by %r' % (rc_path, e))\n\n if context.json:\n stdout_json_success(\n rc_path=rc_path,\n warnings=json_warnings,\n get=json_get\n )\n return\n", "path": "conda/cli/main_config.py"}]} |
gh_patches_debug_1442 | rasdani/github-patches | git_diff | evennia__evennia-3042 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] "evennia xyzgrid help" causes TypeError: NoneType takes no arguments
#### Describe the bug
Fresh migration from git master to main and then installing xyzgrid prevents evennia xyzgrid commands from working. For example, "evennia xyzgrid help" causes TypeError: NoneType takes no arguments
#### To Reproduce
1. Migrated from git master branch to main branch for 1.x release of Evennia.
2. Installed [extra](use to be in requirements_extra).
At this point, I can run the server and log in.
3. Added the xyzgrid command set and restarted.
'path', 'goto', 'map' are seen in the command list. The Limbo room does not have a map. Everything appears to work fine.
4. Modify the server/conf/settings.py.
xyzgrid is now available.
When I use xyzgrid, such as 'evennia xyzgrid help', or any other xyzgrid command:
TypeError: NoneType takes no arguments
#### Expected behavior
'evennia xyzgrid <command>' should call the xyzgrid command.
#### Environment, Evennia version, OS etc
Evennia 1.0.1 (rev 38011cc48d)
OS: nt
Python: 3.11.1
Twisted: 22.10.0
Django: 4.1.4
#### Additional context
This is based off helix4's message in #general on discord. I added my current steps that seem to reproduce the same issue down below. Here is the original message from helix4, with steps for reproducing on the older version of the code.
I am trying to test XYZGrid on a brand new install but failing. 1. cloned the single branch of evennia-develop, and initiated an evennia game. 2. installed requirements_extra, and migrated. I can run the server and log in. 3. i added the command set and reloadead, i see path, goto, map ingame. the Limbo room does not have a map. seems to work well. 4. modify the server/conf/settings.py, xyzgrid is now available.
When I use xyzgrid, such as evennia xyzgrid help, or any other xyzgrid command:
from evennia.utils.eveditor import EvEditor
File "/home/ubuntu/3ProjectMUD/evennia/evennia/utils/eveditor.py", line 201, in <module>
class SaveYesNoCmdSet(CmdSet):
TypeError: NoneType takes no arguments
Original message
https://discord.com/channels/246323978879107073/246323978879107073/937578545704730624
Griatch's response
https://discord.com/channels/246323978879107073/246323978879107073/937610453184561183
Steps:
1. Migrated from git master branch to main branch for 1.x release of Evennia.
2. Installed [extra](use to be in requirements_extra).
At this point, I can run the server and log in.
3. Added the xyzgrid command set and restarted.
'path', 'goto', 'map' are seen in the command list. The Limbo room does not have a map. Everything appears to work fine.
4. Modify the server/conf/settings.py.
xyzgrid is now available.
When I use xyzgrid, such as 'evennia xyzgrid help', or any other xyzgrid command:
Traceback (most recent call last):
File "C:\muddev\evenv\Scripts\evennia_launcher.py", line 18, in <module>
main()
File "C:\muddev\evennia\evennia\server\evennia_launcher.py", line 2422, in main
if run_custom_commands(option, *unknown_args):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\muddev\evennia\evennia\server\evennia_launcher.py", line 2023, in run_custom_commands
mod = importlib.import_module(modpath)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
...
File "<frozen importlib._bootstrap>", line 1206, in _gcd_import
File "<frozen importlib._bootstrap>", line 1178, in _find_and_load
File "<frozen importlib._bootstrap>", line 1128, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1206, in _gcd_import
File "<frozen importlib._bootstrap>", line 1178, in _find_and_load
File "<frozen importlib._bootstrap>", line 1149, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 940, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "C:\muddev\evennia\evennia\contrib\grid\xyzgrid\__init__.py", line 6, in <module>
from . import commands # noqa
^^^^^^^^^^^^^^^^^^^^^^
File "C:\muddev\evennia\evennia\contrib\grid\xyzgrid\commands.py", line 15, in <module>
from evennia.commands.default import building
File "C:\muddev\evennia\evennia\commands\default\building.py", line 14, in <module>
from evennia.prototypes import menus as olc_menus
File "C:\muddev\evennia\evennia\prototypes\menus.py", line 20, in <module>
from evennia.utils.evmenu import EvMenu, list_node
File "C:\muddev\evennia\evennia\utils\evmenu.py", line 350, in <module>
class CmdEvMenuNode(Command):
TypeError: NoneType takes no arguments
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evennia/contrib/grid/xyzgrid/__init__.py`
Content:
```
1 """
2 XYZGrid - Griatch 2021
3
4 """
5
6 from . import commands # noqa
7 from . import example # noqa
8 from . import launchcmd # noqa
9 from . import prototypes # noqa
10 from . import tests # noqa
11 from . import utils # noqa
12 from . import xymap # noqa
13 from . import xymap_legend # noqa
14 from . import xyzgrid # noqa
15 from . import xyzroom # noqa
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evennia/contrib/grid/xyzgrid/__init__.py b/evennia/contrib/grid/xyzgrid/__init__.py
--- a/evennia/contrib/grid/xyzgrid/__init__.py
+++ b/evennia/contrib/grid/xyzgrid/__init__.py
@@ -2,14 +2,15 @@
XYZGrid - Griatch 2021
"""
-
-from . import commands # noqa
-from . import example # noqa
-from . import launchcmd # noqa
-from . import prototypes # noqa
-from . import tests # noqa
-from . import utils # noqa
-from . import xymap # noqa
-from . import xymap_legend # noqa
-from . import xyzgrid # noqa
-from . import xyzroom # noqa
+from . import (
+ example,
+ launchcmd,
+ prototypes,
+ tests,
+ utils,
+ xymap,
+ xymap_legend,
+ xyzgrid,
+ xyzroom,
+ commands,
+)
| {"golden_diff": "diff --git a/evennia/contrib/grid/xyzgrid/__init__.py b/evennia/contrib/grid/xyzgrid/__init__.py\n--- a/evennia/contrib/grid/xyzgrid/__init__.py\n+++ b/evennia/contrib/grid/xyzgrid/__init__.py\n@@ -2,14 +2,15 @@\n XYZGrid - Griatch 2021\n \n \"\"\"\n-\n-from . import commands # noqa\n-from . import example # noqa\n-from . import launchcmd # noqa\n-from . import prototypes # noqa\n-from . import tests # noqa\n-from . import utils # noqa\n-from . import xymap # noqa\n-from . import xymap_legend # noqa\n-from . import xyzgrid # noqa\n-from . import xyzroom # noqa\n+from . import (\n+ example,\n+ launchcmd,\n+ prototypes,\n+ tests,\n+ utils,\n+ xymap,\n+ xymap_legend,\n+ xyzgrid,\n+ xyzroom,\n+ commands,\n+)\n", "issue": "[BUG] \"evennia xyzgrid help\" causes TypeError: NoneType takes no arguments\n#### Describe the bug\r\nFresh migration from git master to main and then installing xyzgrid prevents evennia xyzgrid commands from working. For example, \"evennia xyzgrid help\" causes TypeError: NoneType takes no arguments\r\n\r\n#### To Reproduce\r\n1. Migrated from git master branch to main branch for 1.x release of Evennia.\r\n2. Installed [extra](use to be in requirements_extra). \r\n\r\nAt this point, I can run the server and log in.\r\n\r\n3. Added the xyzgrid command set and restarted. \r\n\r\n'path', 'goto', 'map' are seen in the command list. The Limbo room does not have a map. Everything appears to work fine.\r\n\r\n4. Modify the server/conf/settings.py.\r\n\r\nxyzgrid is now available.\r\n\r\nWhen I use xyzgrid, such as 'evennia xyzgrid help', or any other xyzgrid command:\r\nTypeError: NoneType takes no arguments\r\n\r\n#### Expected behavior\r\n'evennia xyzgrid <command>' should call the xyzgrid command.\r\n\r\n#### Environment, Evennia version, OS etc\r\n\r\n Evennia 1.0.1 (rev 38011cc48d)\r\n OS: nt\r\n Python: 3.11.1\r\n Twisted: 22.10.0\r\n Django: 4.1.4\r\n\r\n#### Additional context\r\n\r\nThis is based off helix4's message in #general on discord. I added my current steps that seem to reproduce the same issue down below. Here is the original message from helix4, with steps for reproducing on the older version of the code.\r\n\r\nI am trying to test XYZGrid on a brand new install but failing. 1. cloned the single branch of evennia-develop, and initiated an evennia game. 2. installed requirements_extra, and migrated. I can run the server and log in. 3. i added the command set and reloadead, i see path, goto, map ingame. the Limbo room does not have a map. seems to work well. 4. modify the server/conf/settings.py, xyzgrid is now available.\r\n\r\nWhen I use xyzgrid, such as evennia xyzgrid help, or any other xyzgrid command:\r\n from evennia.utils.eveditor import EvEditor\r\n File \"/home/ubuntu/3ProjectMUD/evennia/evennia/utils/eveditor.py\", line 201, in <module>\r\n class SaveYesNoCmdSet(CmdSet):\r\nTypeError: NoneType takes no arguments\r\n\r\nOriginal message\r\n\r\nhttps://discord.com/channels/246323978879107073/246323978879107073/937578545704730624\r\n\r\nGriatch's response\r\n\r\nhttps://discord.com/channels/246323978879107073/246323978879107073/937610453184561183\r\n\r\nSteps:\r\n\r\n1. Migrated from git master branch to main branch for 1.x release of Evennia.\r\n2. Installed [extra](use to be in requirements_extra). \r\n\r\nAt this point, I can run the server and log in.\r\n\r\n3. Added the xyzgrid command set and restarted. \r\n\r\n'path', 'goto', 'map' are seen in the command list. The Limbo room does not have a map. Everything appears to work fine.\r\n\r\n4. Modify the server/conf/settings.py.\r\n\r\nxyzgrid is now available.\r\n\r\nWhen I use xyzgrid, such as 'evennia xyzgrid help', or any other xyzgrid command:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\muddev\\evenv\\Scripts\\evennia_launcher.py\", line 18, in <module>\r\n main()\r\n File \"C:\\muddev\\evennia\\evennia\\server\\evennia_launcher.py\", line 2422, in main\r\n if run_custom_commands(option, *unknown_args):\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\muddev\\evennia\\evennia\\server\\evennia_launcher.py\", line 2023, in run_custom_commands\r\n mod = importlib.import_module(modpath)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n...\r\n\r\n File \"<frozen importlib._bootstrap>\", line 1206, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1178, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 1128, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 1206, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1178, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 1149, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\r\n File \"C:\\muddev\\evennia\\evennia\\contrib\\grid\\xyzgrid\\__init__.py\", line 6, in <module>\r\n from . import commands # noqa\r\n ^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\muddev\\evennia\\evennia\\contrib\\grid\\xyzgrid\\commands.py\", line 15, in <module>\r\n from evennia.commands.default import building\r\n File \"C:\\muddev\\evennia\\evennia\\commands\\default\\building.py\", line 14, in <module>\r\n from evennia.prototypes import menus as olc_menus\r\n File \"C:\\muddev\\evennia\\evennia\\prototypes\\menus.py\", line 20, in <module>\r\n from evennia.utils.evmenu import EvMenu, list_node\r\n File \"C:\\muddev\\evennia\\evennia\\utils\\evmenu.py\", line 350, in <module>\r\n class CmdEvMenuNode(Command):\r\nTypeError: NoneType takes no arguments\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nXYZGrid - Griatch 2021\n\n\"\"\"\n\nfrom . import commands # noqa\nfrom . import example # noqa\nfrom . import launchcmd # noqa\nfrom . import prototypes # noqa\nfrom . import tests # noqa\nfrom . import utils # noqa\nfrom . import xymap # noqa\nfrom . import xymap_legend # noqa\nfrom . import xyzgrid # noqa\nfrom . import xyzroom # noqa\n", "path": "evennia/contrib/grid/xyzgrid/__init__.py"}], "after_files": [{"content": "\"\"\"\nXYZGrid - Griatch 2021\n\n\"\"\"\nfrom . import (\n example,\n launchcmd,\n prototypes,\n tests,\n utils,\n xymap,\n xymap_legend,\n xyzgrid,\n xyzroom,\n commands,\n)\n", "path": "evennia/contrib/grid/xyzgrid/__init__.py"}]} |
gh_patches_debug_1443 | rasdani/github-patches | git_diff | mkdocs__mkdocs-3700 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty mkdocs_theme.yml breaks build
Hello! In the docs its [stated](https://www.mkdocs.org/dev-guide/themes/#theme-configuration) that a theme **can** have an empty `mkdocs_theme.yml` file:
> However, if the theme offers no configuration options, the file is still required and can be left blank.
Unfortunately this seems to have changed recently and now themes with empty `mkdocs_theme.yml` files are causing an exception when building:
```shell
> mkdocs build --verbose
DEBUG - Loading configuration file: ./mkdocs.yml
DEBUG - Loaded theme configuration for 'custom_theme' from
'./venv/lib/python3.12/site-packages/custom_theme/mkdocs_theme.yml':
None
Traceback (most recent call last):
[...]
File "./venv/lib/python3.12/site-packages/mkdocs/config/config_options.py", line 868, in run_validation
return theme.Theme(**theme_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "./venv/lib/python3.12/site-packages/mkdocs/theme.py", line 61, in __init__
self._load_theme_config(name)
File "./venv/lib/python3.12/site-packages/mkdocs/theme.py", line 143, in _load_theme_config
if parent_theme := theme_config.pop('extends', None):
^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'pop'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/theme.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import os
5 import warnings
6 from typing import Any, Collection, MutableMapping
7
8 import jinja2
9 import yaml
10
11 try:
12 from yaml import CSafeLoader as SafeLoader
13 except ImportError: # pragma: no cover
14 from yaml import SafeLoader # type: ignore
15
16 from mkdocs import localization, utils
17 from mkdocs.config.base import ValidationError
18 from mkdocs.utils import templates
19
20 log = logging.getLogger(__name__)
21
22
23 class Theme(MutableMapping[str, Any]):
24 """
25 A Theme object.
26
27 Args:
28 name: The name of the theme as defined by its entrypoint.
29 custom_dir: User defined directory for custom templates.
30 static_templates: A list of templates to render as static pages.
31
32 All other keywords are passed as-is and made available as a key/value mapping.
33 """
34
35 def __init__(
36 self,
37 name: str | None = None,
38 *,
39 custom_dir: str | None = None,
40 static_templates: Collection[str] = (),
41 locale: str | None = None,
42 **user_config,
43 ) -> None:
44 self.name = name
45 self._custom_dir = custom_dir
46 _vars: dict[str, Any] = {'name': name, 'locale': 'en'}
47 self.__vars = _vars
48
49 # MkDocs provided static templates are always included
50 package_dir = os.path.abspath(os.path.dirname(__file__))
51 mkdocs_templates = os.path.join(package_dir, 'templates')
52 self.static_templates = set(os.listdir(mkdocs_templates))
53
54 # Build self.dirs from various sources in order of precedence
55 self.dirs = []
56
57 if custom_dir is not None:
58 self.dirs.append(custom_dir)
59
60 if name:
61 self._load_theme_config(name)
62
63 # Include templates provided directly by MkDocs (outside any theme)
64 self.dirs.append(mkdocs_templates)
65
66 # Handle remaining user configs. Override theme configs (if set)
67 self.static_templates.update(static_templates)
68 _vars.update(user_config)
69
70 # Validate locale and convert to Locale object
71 if locale is None:
72 locale = _vars['locale']
73 _vars['locale'] = localization.parse_locale(locale)
74
75 name: str | None
76
77 @property
78 def locale(self) -> localization.Locale:
79 return self['locale']
80
81 @property
82 def custom_dir(self) -> str | None:
83 return self._custom_dir
84
85 @property
86 def _vars(self) -> dict[str, Any]:
87 warnings.warn(
88 "Do not access Theme._vars, instead access the keys of Theme directly.",
89 DeprecationWarning,
90 )
91 return self.__vars
92
93 dirs: list[str]
94
95 static_templates: set[str]
96
97 def __repr__(self) -> str:
98 return "{}(name={!r}, dirs={!r}, static_templates={!r}, {})".format(
99 self.__class__.__name__,
100 self.name,
101 self.dirs,
102 self.static_templates,
103 ', '.join(f'{k}={v!r}' for k, v in self.items()),
104 )
105
106 def __getitem__(self, key: str) -> Any:
107 return self.__vars[key]
108
109 def __setitem__(self, key: str, value):
110 self.__vars[key] = value
111
112 def __delitem__(self, key: str):
113 del self.__vars[key]
114
115 def __contains__(self, item: object) -> bool:
116 return item in self.__vars
117
118 def __len__(self):
119 return len(self.__vars)
120
121 def __iter__(self):
122 return iter(self.__vars)
123
124 def _load_theme_config(self, name: str) -> None:
125 """Recursively load theme and any parent themes."""
126 theme_dir = utils.get_theme_dir(name)
127 utils.get_themes.cache_clear()
128 self.dirs.append(theme_dir)
129
130 try:
131 file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')
132 with open(file_path, 'rb') as f:
133 theme_config = yaml.load(f, SafeLoader)
134 except OSError as e:
135 log.debug(e)
136 raise ValidationError(
137 f"The theme '{name}' does not appear to have a configuration file. "
138 f"Please upgrade to a current version of the theme."
139 )
140
141 log.debug(f"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}")
142
143 if parent_theme := theme_config.pop('extends', None):
144 themes = utils.get_theme_names()
145 if parent_theme not in themes:
146 raise ValidationError(
147 f"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. "
148 f"The available installed themes are: {', '.join(themes)}"
149 )
150 self._load_theme_config(parent_theme)
151
152 self.static_templates.update(theme_config.pop('static_templates', []))
153 self.__vars.update(theme_config)
154
155 def get_env(self) -> jinja2.Environment:
156 """Return a Jinja environment for the theme."""
157 loader = jinja2.FileSystemLoader(self.dirs)
158 # No autoreload because editing a template in the middle of a build is not useful.
159 env = jinja2.Environment(loader=loader, auto_reload=False)
160 env.filters['url'] = templates.url_filter
161 env.filters['script_tag'] = templates.script_tag_filter
162 localization.install_translations(env, self.locale, self.dirs)
163 return env
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/theme.py b/mkdocs/theme.py
--- a/mkdocs/theme.py
+++ b/mkdocs/theme.py
@@ -138,6 +138,9 @@
f"Please upgrade to a current version of the theme."
)
+ if theme_config is None:
+ theme_config = {}
+
log.debug(f"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}")
if parent_theme := theme_config.pop('extends', None):
| {"golden_diff": "diff --git a/mkdocs/theme.py b/mkdocs/theme.py\n--- a/mkdocs/theme.py\n+++ b/mkdocs/theme.py\n@@ -138,6 +138,9 @@\n f\"Please upgrade to a current version of the theme.\"\n )\n \n+ if theme_config is None:\n+ theme_config = {}\n+\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n \n if parent_theme := theme_config.pop('extends', None):\n", "issue": "Empty mkdocs_theme.yml breaks build\nHello! In the docs its [stated](https://www.mkdocs.org/dev-guide/themes/#theme-configuration) that a theme **can** have an empty `mkdocs_theme.yml` file:\r\n\r\n> However, if the theme offers no configuration options, the file is still required and can be left blank.\r\n\r\nUnfortunately this seems to have changed recently and now themes with empty `mkdocs_theme.yml` files are causing an exception when building:\r\n\r\n```shell\r\n> mkdocs build --verbose\r\nDEBUG - Loading configuration file: ./mkdocs.yml\r\nDEBUG - Loaded theme configuration for 'custom_theme' from\r\n './venv/lib/python3.12/site-packages/custom_theme/mkdocs_theme.yml':\r\n None\r\nTraceback (most recent call last):\r\n [...]\r\n File \"./venv/lib/python3.12/site-packages/mkdocs/config/config_options.py\", line 868, in run_validation\r\n return theme.Theme(**theme_config)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"./venv/lib/python3.12/site-packages/mkdocs/theme.py\", line 61, in __init__\r\n self._load_theme_config(name)\r\n File \"./venv/lib/python3.12/site-packages/mkdocs/theme.py\", line 143, in _load_theme_config\r\n if parent_theme := theme_config.pop('extends', None):\r\n ^^^^^^^^^^^^^^^^\r\nAttributeError: 'NoneType' object has no attribute 'pop'\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport warnings\nfrom typing import Any, Collection, MutableMapping\n\nimport jinja2\nimport yaml\n\ntry:\n from yaml import CSafeLoader as SafeLoader\nexcept ImportError: # pragma: no cover\n from yaml import SafeLoader # type: ignore\n\nfrom mkdocs import localization, utils\nfrom mkdocs.config.base import ValidationError\nfrom mkdocs.utils import templates\n\nlog = logging.getLogger(__name__)\n\n\nclass Theme(MutableMapping[str, Any]):\n \"\"\"\n A Theme object.\n\n Args:\n name: The name of the theme as defined by its entrypoint.\n custom_dir: User defined directory for custom templates.\n static_templates: A list of templates to render as static pages.\n\n All other keywords are passed as-is and made available as a key/value mapping.\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n *,\n custom_dir: str | None = None,\n static_templates: Collection[str] = (),\n locale: str | None = None,\n **user_config,\n ) -> None:\n self.name = name\n self._custom_dir = custom_dir\n _vars: dict[str, Any] = {'name': name, 'locale': 'en'}\n self.__vars = _vars\n\n # MkDocs provided static templates are always included\n package_dir = os.path.abspath(os.path.dirname(__file__))\n mkdocs_templates = os.path.join(package_dir, 'templates')\n self.static_templates = set(os.listdir(mkdocs_templates))\n\n # Build self.dirs from various sources in order of precedence\n self.dirs = []\n\n if custom_dir is not None:\n self.dirs.append(custom_dir)\n\n if name:\n self._load_theme_config(name)\n\n # Include templates provided directly by MkDocs (outside any theme)\n self.dirs.append(mkdocs_templates)\n\n # Handle remaining user configs. Override theme configs (if set)\n self.static_templates.update(static_templates)\n _vars.update(user_config)\n\n # Validate locale and convert to Locale object\n if locale is None:\n locale = _vars['locale']\n _vars['locale'] = localization.parse_locale(locale)\n\n name: str | None\n\n @property\n def locale(self) -> localization.Locale:\n return self['locale']\n\n @property\n def custom_dir(self) -> str | None:\n return self._custom_dir\n\n @property\n def _vars(self) -> dict[str, Any]:\n warnings.warn(\n \"Do not access Theme._vars, instead access the keys of Theme directly.\",\n DeprecationWarning,\n )\n return self.__vars\n\n dirs: list[str]\n\n static_templates: set[str]\n\n def __repr__(self) -> str:\n return \"{}(name={!r}, dirs={!r}, static_templates={!r}, {})\".format(\n self.__class__.__name__,\n self.name,\n self.dirs,\n self.static_templates,\n ', '.join(f'{k}={v!r}' for k, v in self.items()),\n )\n\n def __getitem__(self, key: str) -> Any:\n return self.__vars[key]\n\n def __setitem__(self, key: str, value):\n self.__vars[key] = value\n\n def __delitem__(self, key: str):\n del self.__vars[key]\n\n def __contains__(self, item: object) -> bool:\n return item in self.__vars\n\n def __len__(self):\n return len(self.__vars)\n\n def __iter__(self):\n return iter(self.__vars)\n\n def _load_theme_config(self, name: str) -> None:\n \"\"\"Recursively load theme and any parent themes.\"\"\"\n theme_dir = utils.get_theme_dir(name)\n utils.get_themes.cache_clear()\n self.dirs.append(theme_dir)\n\n try:\n file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')\n with open(file_path, 'rb') as f:\n theme_config = yaml.load(f, SafeLoader)\n except OSError as e:\n log.debug(e)\n raise ValidationError(\n f\"The theme '{name}' does not appear to have a configuration file. \"\n f\"Please upgrade to a current version of the theme.\"\n )\n\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n\n if parent_theme := theme_config.pop('extends', None):\n themes = utils.get_theme_names()\n if parent_theme not in themes:\n raise ValidationError(\n f\"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. \"\n f\"The available installed themes are: {', '.join(themes)}\"\n )\n self._load_theme_config(parent_theme)\n\n self.static_templates.update(theme_config.pop('static_templates', []))\n self.__vars.update(theme_config)\n\n def get_env(self) -> jinja2.Environment:\n \"\"\"Return a Jinja environment for the theme.\"\"\"\n loader = jinja2.FileSystemLoader(self.dirs)\n # No autoreload because editing a template in the middle of a build is not useful.\n env = jinja2.Environment(loader=loader, auto_reload=False)\n env.filters['url'] = templates.url_filter\n env.filters['script_tag'] = templates.script_tag_filter\n localization.install_translations(env, self.locale, self.dirs)\n return env\n", "path": "mkdocs/theme.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport warnings\nfrom typing import Any, Collection, MutableMapping\n\nimport jinja2\nimport yaml\n\ntry:\n from yaml import CSafeLoader as SafeLoader\nexcept ImportError: # pragma: no cover\n from yaml import SafeLoader # type: ignore\n\nfrom mkdocs import localization, utils\nfrom mkdocs.config.base import ValidationError\nfrom mkdocs.utils import templates\n\nlog = logging.getLogger(__name__)\n\n\nclass Theme(MutableMapping[str, Any]):\n \"\"\"\n A Theme object.\n\n Args:\n name: The name of the theme as defined by its entrypoint.\n custom_dir: User defined directory for custom templates.\n static_templates: A list of templates to render as static pages.\n\n All other keywords are passed as-is and made available as a key/value mapping.\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n *,\n custom_dir: str | None = None,\n static_templates: Collection[str] = (),\n locale: str | None = None,\n **user_config,\n ) -> None:\n self.name = name\n self._custom_dir = custom_dir\n _vars: dict[str, Any] = {'name': name, 'locale': 'en'}\n self.__vars = _vars\n\n # MkDocs provided static templates are always included\n package_dir = os.path.abspath(os.path.dirname(__file__))\n mkdocs_templates = os.path.join(package_dir, 'templates')\n self.static_templates = set(os.listdir(mkdocs_templates))\n\n # Build self.dirs from various sources in order of precedence\n self.dirs = []\n\n if custom_dir is not None:\n self.dirs.append(custom_dir)\n\n if name:\n self._load_theme_config(name)\n\n # Include templates provided directly by MkDocs (outside any theme)\n self.dirs.append(mkdocs_templates)\n\n # Handle remaining user configs. Override theme configs (if set)\n self.static_templates.update(static_templates)\n _vars.update(user_config)\n\n # Validate locale and convert to Locale object\n if locale is None:\n locale = _vars['locale']\n _vars['locale'] = localization.parse_locale(locale)\n\n name: str | None\n\n @property\n def locale(self) -> localization.Locale:\n return self['locale']\n\n @property\n def custom_dir(self) -> str | None:\n return self._custom_dir\n\n @property\n def _vars(self) -> dict[str, Any]:\n warnings.warn(\n \"Do not access Theme._vars, instead access the keys of Theme directly.\",\n DeprecationWarning,\n )\n return self.__vars\n\n dirs: list[str]\n\n static_templates: set[str]\n\n def __repr__(self) -> str:\n return \"{}(name={!r}, dirs={!r}, static_templates={!r}, {})\".format(\n self.__class__.__name__,\n self.name,\n self.dirs,\n self.static_templates,\n ', '.join(f'{k}={v!r}' for k, v in self.items()),\n )\n\n def __getitem__(self, key: str) -> Any:\n return self.__vars[key]\n\n def __setitem__(self, key: str, value):\n self.__vars[key] = value\n\n def __delitem__(self, key: str):\n del self.__vars[key]\n\n def __contains__(self, item: object) -> bool:\n return item in self.__vars\n\n def __len__(self):\n return len(self.__vars)\n\n def __iter__(self):\n return iter(self.__vars)\n\n def _load_theme_config(self, name: str) -> None:\n \"\"\"Recursively load theme and any parent themes.\"\"\"\n theme_dir = utils.get_theme_dir(name)\n utils.get_themes.cache_clear()\n self.dirs.append(theme_dir)\n\n try:\n file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')\n with open(file_path, 'rb') as f:\n theme_config = yaml.load(f, SafeLoader)\n except OSError as e:\n log.debug(e)\n raise ValidationError(\n f\"The theme '{name}' does not appear to have a configuration file. \"\n f\"Please upgrade to a current version of the theme.\"\n )\n\n if theme_config is None:\n theme_config = {}\n\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n\n if parent_theme := theme_config.pop('extends', None):\n themes = utils.get_theme_names()\n if parent_theme not in themes:\n raise ValidationError(\n f\"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. \"\n f\"The available installed themes are: {', '.join(themes)}\"\n )\n self._load_theme_config(parent_theme)\n\n self.static_templates.update(theme_config.pop('static_templates', []))\n self.__vars.update(theme_config)\n\n def get_env(self) -> jinja2.Environment:\n \"\"\"Return a Jinja environment for the theme.\"\"\"\n loader = jinja2.FileSystemLoader(self.dirs)\n # No autoreload because editing a template in the middle of a build is not useful.\n env = jinja2.Environment(loader=loader, auto_reload=False)\n env.filters['url'] = templates.url_filter\n env.filters['script_tag'] = templates.script_tag_filter\n localization.install_translations(env, self.locale, self.dirs)\n return env\n", "path": "mkdocs/theme.py"}]} |
gh_patches_debug_1444 | rasdani/github-patches | git_diff | tensorflow__tensor2tensor-1557 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
distributed training on multiple machine fails
### Description
I am trying to do distributed training on multiple machines with 1 GPU each. It is failing on the workers. Please look into this!
...
### Environment information
```
OS: Linux - 18
On master i run -
t2t-trainer --master=grpc://10.10.1.2:2219 --ps_replicas=3 --worker_replicas=1 --worker_gpu=0 --worker_id=0 --ps_gpu=1 --sync --schedule=train --worker_job='/job:master' --model=transformer --hparams_set=transformer_base --problem=translate_ende_wmt32k --data_dir=/users/kshiteej/varunimagenet/tensor2tensor/t2t_data/ --output_dir=/users/kshiteej/
On PS-
1. t2t-trainer --schedule=run_std_server
2. t2t-trainer --schedule=run_std_server
3. t2t-trainer --schedule=run_std_server
OUTPUT of Master -
..
.
.
.
13] Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
I0331 22:40:02.157696 139967148951360 basic_session_run_hooks.py:527] Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
OUTPUT of Worker -
Traceback (most recent call last):
File "/usr/local/bin/t2t-trainer", line 33, in <module>
tf.app.run()
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/platform/app.py", line 125, in run
_sys.exit(main(argv))
File "/usr/local/bin/t2t-trainer", line 28, in main
t2t_trainer.main(argv)
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/bin/t2t_trainer.py", line 413, in main
hparams = create_hparams()
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/bin/t2t_trainer.py", line 176, in create_hparams
return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,hparams_path=hparams_path)
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/hparams_lib.py", line 48, in create_hparams
hparams = registry.hparams(hparams_set)
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/registry.py", line 254, in __getitem__
(key, self.name, display_list_by_prefix(sorted(self), 4)))
KeyError: 'None never registered with registry hparams. Available:\n adaptive:\n * adaptive_universal_transformer_base\n * adaptive_universal_tr...
..
..
..
$ pip freeze | grep tensor
# your output here
mesh-tensorflow==0.0.5
tensor2tensor==1.13.1
tensorboard==1.13.0
tensorflow-datasets==1.0.1
tensorflow-estimator==1.13.0
tensorflow-gpu==1.13.1
tensorflow-metadata==0.13.0
tensorflow-probability==0.6.0
tensorflow-tensorboard==0.4.0
$ python -V
# your output here
```
Python 2.7.15rc1
### For bugs: reproduction and error logs
```
# Steps to reproduce:
...
```
```
# Error logs:
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensor2tensor/bin/t2t_trainer.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2019 The Tensor2Tensor Authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Train and evaluate."""
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 import contextlib
22 import os
23 import sys
24 from tensor2tensor import models # pylint: disable=unused-import
25 from tensor2tensor import problems as problems_lib # pylint: disable=unused-import
26 from tensor2tensor.data_generators import problem # pylint: disable=unused-import
27
28 from tensor2tensor.utils import cloud_mlengine
29 from tensor2tensor.utils import decoding
30 from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import
31 from tensor2tensor.utils import hparams_lib
32 from tensor2tensor.utils import mlperf_log
33 from tensor2tensor.utils import registry
34 from tensor2tensor.utils import trainer_lib
35 from tensor2tensor.utils import usr_dir
36 import tensorflow as tf
37
38 from tensorflow.contrib.tpu.python.tpu import tpu_config
39
40
41 flags = tf.flags
42 FLAGS = flags.FLAGS
43
44 # See utils/flags.py for additional command-line flags.
45 flags.DEFINE_string("t2t_usr_dir", None,
46 "Path to a Python module that will be imported. The "
47 "__init__.py file should include the necessary imports. "
48 "The imported files should contain registrations, "
49 "e.g. @registry.register_model calls, that will then be "
50 "available to the t2t-trainer.")
51 flags.DEFINE_integer("random_seed", None, "Random seed.")
52 flags.DEFINE_integer("tpu_num_shards", 8, "Number of tpu shards.")
53 flags.DEFINE_string("tpu_job_name", None,
54 "TPU job name. TPUEstimator can auto-infer this but if the "
55 "configuration is esoteric it should be provided here.")
56 flags.DEFINE_integer("iterations_per_loop", 100,
57 "Number of iterations in a TPU training loop.")
58 flags.DEFINE_bool("use_tpu", False, "Whether to use TPU.")
59 flags.DEFINE_bool("use_tpu_estimator", False, "Whether to use TPUEstimator. "
60 "This is always enabled when use_tpu is True.")
61 flags.DEFINE_bool("xla_compile", False,
62 "Whether to use XLA to compile model_fn.")
63 flags.DEFINE_integer("xla_jit_level", -1,
64 "GlobalJitLevel to use while compiling the full graph.")
65 flags.DEFINE_integer("tpu_infeed_sleep_secs", None,
66 "How long to sleep the infeed thread.")
67 flags.DEFINE_bool("generate_data", False, "Generate data before training?")
68 flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen",
69 "Temporary storage directory, used if --generate_data.")
70 flags.DEFINE_bool("profile", False, "Profile performance?")
71 flags.DEFINE_integer("inter_op_parallelism_threads", 0,
72 "Number of inter_op_parallelism_threads to use for CPU. "
73 "See TensorFlow config.proto for details.")
74 flags.DEFINE_integer("intra_op_parallelism_threads", 0,
75 "Number of intra_op_parallelism_threads to use for CPU. "
76 "See TensorFlow config.proto for details.")
77 # TODO(lukaszkaiser): resolve memory and variable assign issues and set to True.
78 flags.DEFINE_bool(
79 "optionally_use_dist_strat", False,
80 "Whether to use TensorFlow DistributionStrategy instead of explicitly "
81 "replicating the model. DistributionStrategy is used only if the "
82 "model replication configuration is supported by the DistributionStrategy.")
83 # To maintain compatibility with some internal libs, we guard against these flag
84 # definitions possibly erroring. Apologies for the ugliness.
85 try:
86 flags.DEFINE_string("master", "", "Address of TensorFlow master.")
87 flags.DEFINE_string("output_dir", "", "Base output directory for run.")
88 flags.DEFINE_string("schedule", "continuous_train_and_eval",
89 "Method of Experiment to run.")
90 flags.DEFINE_integer("eval_steps", 100,
91 "Number of steps in evaluation. By default, eval will "
92 "stop after eval_steps or when it runs through the eval "
93 "dataset once in full, whichever comes first, so this "
94 "can be a very large number.")
95 except: # pylint: disable=bare-except
96 pass
97
98 flags.DEFINE_string("std_server_protocol", "grpc",
99 "Protocol for tf.train.Server.")
100
101 # Google Cloud TPUs
102 flags.DEFINE_string("cloud_tpu_name", "%s-tpu" % os.getenv("USER"),
103 "Name of Cloud TPU instance to use or create.")
104
105 # Google Cloud ML Engine
106 flags.DEFINE_bool("cloud_mlengine", False,
107 "Whether to launch on Cloud ML Engine.")
108 flags.DEFINE_string("cloud_mlengine_master_type", None,
109 "Machine type for master on Cloud ML Engine. "
110 "If provided, overrides default selections based on "
111 "--worker_gpu. User is responsible for ensuring "
112 "type is valid and that --worker_gpu matches number of "
113 "GPUs on machine type. See documentation: "
114 "https://cloud.google.com/ml-engine/reference/rest/v1/"
115 "projects.jobs#traininginput")
116 # Hyperparameter tuning on Cloud ML Engine
117 # Pass an --hparams_range to enable
118 flags.DEFINE_string("autotune_objective", None,
119 "TensorBoard metric name to optimize.")
120 flags.DEFINE_bool("autotune_maximize", True,
121 "Whether to maximize (vs. minimize) autotune_objective.")
122 flags.DEFINE_integer("autotune_max_trials", 10,
123 "Maximum number of tuning experiments to run.")
124 flags.DEFINE_integer("autotune_parallel_trials", 1,
125 "How many trials to run in parallel (will spin up this "
126 "many jobs.")
127 # Note than in open-source TensorFlow, the dash gets converted to an underscore,
128 # so access is FLAGS.job_dir.
129 flags.DEFINE_string("job-dir", None,
130 "DO NOT USE. Exists only for Cloud ML Engine to pass in "
131 "during hyperparameter tuning. Overrides --output_dir.")
132 flags.DEFINE_integer("log_step_count_steps", 100,
133 "Number of local steps after which progress is printed "
134 "out")
135
136
137
138 def set_hparams_from_args(args):
139 """Set hparams overrides from unparsed args list."""
140 if not args:
141 return
142
143 hp_prefix = "--hp_"
144 tf.logging.info("Found unparsed command-line arguments. Checking if any "
145 "start with %s and interpreting those as hparams "
146 "settings.", hp_prefix)
147
148 pairs = []
149 i = 0
150 while i < len(args):
151 arg = args[i]
152 if arg.startswith(hp_prefix):
153 pairs.append((arg[len(hp_prefix):], args[i+1]))
154 i += 2
155 else:
156 tf.logging.warn("Found unknown flag: %s", arg)
157 i += 1
158
159 as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs])
160 if FLAGS.hparams:
161 as_hparams = "," + as_hparams
162 FLAGS.hparams += as_hparams
163
164
165 def create_hparams():
166 """Create hparams."""
167 if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set:
168 tf.logging.warn("Not all hyperparameter sets work on TPU. "
169 "Prefer hparams_sets with a '_tpu' suffix, "
170 "e.g. transformer_tpu, if available for your model.")
171 hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
172 return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,
173 hparams_path=hparams_path)
174
175
176 def create_experiment_fn():
177 return trainer_lib.create_experiment_fn(
178 model_name=FLAGS.model,
179 problem_name=FLAGS.problem,
180 data_dir=os.path.expanduser(FLAGS.data_dir),
181 train_steps=FLAGS.train_steps,
182 eval_steps=FLAGS.eval_steps,
183 min_eval_frequency=FLAGS.local_eval_frequency,
184 schedule=FLAGS.schedule,
185 eval_throttle_seconds=FLAGS.eval_throttle_seconds,
186 export=FLAGS.export_saved_model,
187 decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams),
188 use_tfdbg=FLAGS.tfdbg,
189 use_dbgprofile=FLAGS.dbgprofile,
190 eval_early_stopping_steps=FLAGS.eval_early_stopping_steps,
191 eval_early_stopping_metric=FLAGS.eval_early_stopping_metric,
192 eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta,
193 eval_early_stopping_metric_minimize=FLAGS
194 .eval_early_stopping_metric_minimize,
195 eval_timeout_mins=FLAGS.eval_timeout_mins,
196 eval_use_test_set=FLAGS.eval_use_test_set,
197 use_tpu=FLAGS.use_tpu,
198 use_tpu_estimator=FLAGS.use_tpu_estimator,
199 use_xla=FLAGS.xla_compile,
200 warm_start_from=FLAGS.warm_start_from,
201 decode_from_file=FLAGS.decode_from_file,
202 decode_to_file=FLAGS.decode_to_file,
203 decode_reference=FLAGS.decode_reference,
204 std_server_protocol=FLAGS.std_server_protocol)
205
206
207 def create_run_config(hp, output_dir=None):
208 """Create a run config.
209
210 Args:
211 hp: model hyperparameters
212 output_dir: model's output directory, defaults to output_dir flag.
213
214 Returns:
215 a run config
216 """
217 save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)
218 save_ckpt_secs = FLAGS.save_checkpoints_secs or None
219 if save_ckpt_secs:
220 save_ckpt_steps = None
221 assert FLAGS.output_dir or FLAGS.checkpoint_path
222 tpu_config_extra_kwargs = {}
223 if FLAGS.tpu_job_name is not None:
224 tpu_config_extra_kwargs["tpu_job_name"] = FLAGS.tpu_job_name
225
226 if getattr(hp, "mtf_mode", False):
227 save_ckpt_steps = None # Disable the default saver
228 save_ckpt_secs = None # Disable the default saver
229 tpu_config_extra_kwargs = {
230 "num_cores_per_replica": 1,
231 "per_host_input_for_training": tpu_config.InputPipelineConfig.BROADCAST,
232 }
233
234 # the various custom getters we have written do not play well together yet.
235 # TODO(noam): ask rsepassi for help here.
236 daisy_chain_variables = (
237 hp.daisy_chain_variables and
238 hp.activation_dtype == "float32" and
239 hp.weight_dtype == "float32")
240 return trainer_lib.create_run_config(
241 model_name=FLAGS.model,
242 model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),
243 master=FLAGS.master,
244 iterations_per_loop=FLAGS.iterations_per_loop,
245 num_shards=FLAGS.tpu_num_shards,
246 log_device_placement=FLAGS.log_device_placement,
247 save_checkpoints_steps=save_ckpt_steps,
248 save_checkpoints_secs=save_ckpt_secs,
249 keep_checkpoint_max=FLAGS.keep_checkpoint_max,
250 keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,
251 num_gpus=FLAGS.worker_gpu,
252 gpu_order=FLAGS.gpu_order,
253 num_async_replicas=FLAGS.worker_replicas,
254 gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,
255 enable_graph_rewriter=FLAGS.enable_graph_rewriter,
256 use_tpu=FLAGS.use_tpu,
257 use_tpu_estimator=FLAGS.use_tpu_estimator,
258 xla_jit_level=FLAGS.xla_jit_level,
259 schedule=FLAGS.schedule,
260 no_data_parallelism=hp.no_data_parallelism,
261 optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,
262 daisy_chain_variables=daisy_chain_variables,
263 ps_replicas=FLAGS.ps_replicas,
264 ps_job=FLAGS.ps_job,
265 ps_gpu=FLAGS.ps_gpu,
266 sync=FLAGS.sync,
267 worker_id=FLAGS.worker_id,
268 worker_job=FLAGS.worker_job,
269 random_seed=FLAGS.random_seed,
270 tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,
271 inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
272 log_step_count_steps=FLAGS.log_step_count_steps,
273 intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
274 tpu_config_extra_kwargs=tpu_config_extra_kwargs,
275 cloud_tpu_name=FLAGS.cloud_tpu_name)
276
277
278 def generate_data():
279 # Generate data if requested.
280 data_dir = os.path.expanduser(FLAGS.data_dir)
281 tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
282 tf.gfile.MakeDirs(data_dir)
283 tf.gfile.MakeDirs(tmp_dir)
284
285 problem_name = FLAGS.problem
286 tf.logging.info("Generating data for %s" % problem_name)
287 registry.problem(problem_name).generate_data(data_dir, tmp_dir)
288
289
290 @contextlib.contextmanager
291 def profile_context():
292 if FLAGS.profile:
293 with tf.contrib.tfprof.ProfileContext(
294 "t2tprof", trace_steps=range(100), dump_steps=range(100)) as pctx:
295 opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
296 pctx.add_auto_profiling("op", opts, range(100))
297 yield
298 else:
299 yield
300
301
302 def maybe_log_registry_and_exit():
303 if FLAGS.registry_help:
304 tf.logging.info(registry.help_string())
305 sys.exit(0)
306
307
308 def is_chief():
309 schedules = ["train", "train_and_evaluate", "continuous_train_and_eval"]
310 return FLAGS.worker_id == 0 and FLAGS.schedule in schedules
311
312
313 def save_metadata(hparams):
314 """Saves FLAGS and hparams to output_dir."""
315 output_dir = os.path.expanduser(FLAGS.output_dir)
316 if not tf.gfile.Exists(output_dir):
317 tf.gfile.MakeDirs(output_dir)
318
319 # Save FLAGS in txt file
320 if hasattr(FLAGS, "flags_into_string"):
321 flags_str = FLAGS.flags_into_string()
322 t2t_flags_str = "\n".join([
323 "--%s=%s" % (f.name, f.value)
324 for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"]
325 ])
326 else:
327 flags_dict = FLAGS.__dict__["__flags"]
328 flags_str = "\n".join(
329 ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()])
330 t2t_flags_str = None
331
332 flags_txt = os.path.join(output_dir, "flags.txt")
333 with tf.gfile.Open(flags_txt, "w") as f:
334 f.write(flags_str)
335
336 if t2t_flags_str:
337 t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt")
338 with tf.gfile.Open(t2t_flags_txt, "w") as f:
339 f.write(t2t_flags_str)
340
341 # Save hparams as hparams.json
342 new_hparams = hparams_lib.copy_hparams(hparams)
343 # Modality class is not JSON serializable so remove.
344 new_hparams.del_hparam("modality")
345
346 hparams_fname = os.path.join(output_dir, "hparams.json")
347 with tf.gfile.Open(hparams_fname, "w") as f:
348 f.write(new_hparams.to_json(indent=0, sort_keys=True))
349
350
351 def execute_schedule(exp):
352 if not hasattr(exp, FLAGS.schedule):
353 raise ValueError(
354 "Experiment has no method %s, from --schedule" % FLAGS.schedule)
355 with profile_context():
356 getattr(exp, FLAGS.schedule)()
357
358
359 def run_std_server():
360 exp = trainer_lib.T2TExperiment(*([None] * 5))
361 exp.run_std_server()
362
363
364 def main(argv):
365 tf.logging.set_verbosity(tf.logging.INFO)
366
367 usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
368
369 # If we just have to print the registry, do that and exit early.
370 maybe_log_registry_and_exit()
371
372 # Create HParams.
373 if argv:
374 set_hparams_from_args(argv[1:])
375 hparams = create_hparams()
376
377 if FLAGS.schedule == "train" or FLAGS.schedule == "train_eval_and_decode":
378 mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)
379 if FLAGS.schedule == "run_std_server":
380 run_std_server()
381 mlperf_log.transformer_print(
382 key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed,
383 hparams=hparams)
384 trainer_lib.set_random_seed(FLAGS.random_seed)
385
386 if FLAGS.cloud_mlengine:
387 cloud_mlengine.launch()
388 return
389
390 if FLAGS.generate_data:
391 generate_data()
392
393 if cloud_mlengine.job_dir():
394 FLAGS.output_dir = cloud_mlengine.job_dir()
395
396 exp_fn = create_experiment_fn()
397 exp = exp_fn(create_run_config(hparams), hparams)
398 if is_chief():
399 save_metadata(hparams)
400 execute_schedule(exp)
401 if FLAGS.schedule != "train":
402 mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL,
403 hparams=hparams)
404
405
406 if __name__ == "__main__":
407 tf.logging.set_verbosity(tf.logging.INFO)
408 tf.app.run()
409
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensor2tensor/bin/t2t_trainer.py b/tensor2tensor/bin/t2t_trainer.py
--- a/tensor2tensor/bin/t2t_trainer.py
+++ b/tensor2tensor/bin/t2t_trainer.py
@@ -372,7 +372,8 @@
# Create HParams.
if argv:
set_hparams_from_args(argv[1:])
- hparams = create_hparams()
+ if FLAGS.schedule != "run_std_server":
+ hparams = create_hparams()
if FLAGS.schedule == "train" or FLAGS.schedule == "train_eval_and_decode":
mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)
| {"golden_diff": "diff --git a/tensor2tensor/bin/t2t_trainer.py b/tensor2tensor/bin/t2t_trainer.py\n--- a/tensor2tensor/bin/t2t_trainer.py\n+++ b/tensor2tensor/bin/t2t_trainer.py\n@@ -372,7 +372,8 @@\n # Create HParams.\n if argv:\n set_hparams_from_args(argv[1:])\n- hparams = create_hparams()\n+ if FLAGS.schedule != \"run_std_server\":\n+ hparams = create_hparams()\n \n if FLAGS.schedule == \"train\" or FLAGS.schedule == \"train_eval_and_decode\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)\n", "issue": "distributed training on multiple machine fails\n### Description\r\nI am trying to do distributed training on multiple machines with 1 GPU each. It is failing on the workers. Please look into this!\r\n...\r\n\r\n### Environment information\r\n```\r\nOS: Linux - 18\r\n\r\nOn master i run -\r\nt2t-trainer --master=grpc://10.10.1.2:2219 --ps_replicas=3 --worker_replicas=1 --worker_gpu=0 --worker_id=0 --ps_gpu=1 --sync --schedule=train --worker_job='/job:master' --model=transformer --hparams_set=transformer_base --problem=translate_ende_wmt32k --data_dir=/users/kshiteej/varunimagenet/tensor2tensor/t2t_data/ --output_dir=/users/kshiteej/\r\n\r\nOn PS-\r\n1. t2t-trainer --schedule=run_std_server \r\n2. t2t-trainer --schedule=run_std_server \r\n3. t2t-trainer --schedule=run_std_server \r\n\r\nOUTPUT of Master - \r\n..\r\n.\r\n.\r\n.\r\n13] Done calling model_fn.\r\nINFO:tensorflow:Create CheckpointSaverHook.\r\nI0331 22:40:02.157696 139967148951360 basic_session_run_hooks.py:527] Create CheckpointSaverHook.\r\nINFO:tensorflow:Graph was finalized.\r\n\r\nOUTPUT of Worker - \r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/t2t-trainer\", line 33, in <module>\r\n tf.app.run()\r\n File \"/usr/local/lib/python3.6/dist-packages/tensorflow/python/platform/app.py\", line 125, in run\r\n _sys.exit(main(argv))\r\n File \"/usr/local/bin/t2t-trainer\", line 28, in main\r\n t2t_trainer.main(argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/tensor2tensor/bin/t2t_trainer.py\", line 413, in main\r\n hparams = create_hparams()\r\n File \"/usr/local/lib/python3.6/dist-packages/tensor2tensor/bin/t2t_trainer.py\", line 176, in create_hparams\r\n return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,hparams_path=hparams_path)\r\n File \"/usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/hparams_lib.py\", line 48, in create_hparams\r\n hparams = registry.hparams(hparams_set)\r\n File \"/usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/registry.py\", line 254, in __getitem__\r\n (key, self.name, display_list_by_prefix(sorted(self), 4)))\r\nKeyError: 'None never registered with registry hparams. Available:\\n adaptive:\\n * adaptive_universal_transformer_base\\n * adaptive_universal_tr...\r\n..\r\n..\r\n..\r\n\r\n\r\n\r\n$ pip freeze | grep tensor\r\n# your output here\r\nmesh-tensorflow==0.0.5\r\ntensor2tensor==1.13.1\r\ntensorboard==1.13.0\r\ntensorflow-datasets==1.0.1\r\ntensorflow-estimator==1.13.0\r\ntensorflow-gpu==1.13.1\r\ntensorflow-metadata==0.13.0\r\ntensorflow-probability==0.6.0\r\ntensorflow-tensorboard==0.4.0\r\n\r\n$ python -V\r\n# your output here\r\n```\r\nPython 2.7.15rc1\r\n\r\n### For bugs: reproduction and error logs\r\n\r\n```\r\n# Steps to reproduce:\r\n...\r\n```\r\n\r\n```\r\n# Error logs:\r\n...\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train and evaluate.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport sys\nfrom tensor2tensor import models # pylint: disable=unused-import\nfrom tensor2tensor import problems as problems_lib # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import problem # pylint: disable=unused-import\n\nfrom tensor2tensor.utils import cloud_mlengine\nfrom tensor2tensor.utils import decoding\nfrom tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import\nfrom tensor2tensor.utils import hparams_lib\nfrom tensor2tensor.utils import mlperf_log\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\nimport tensorflow as tf\n\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\n\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# See utils/flags.py for additional command-line flags.\nflags.DEFINE_string(\"t2t_usr_dir\", None,\n \"Path to a Python module that will be imported. The \"\n \"__init__.py file should include the necessary imports. \"\n \"The imported files should contain registrations, \"\n \"e.g. @registry.register_model calls, that will then be \"\n \"available to the t2t-trainer.\")\nflags.DEFINE_integer(\"random_seed\", None, \"Random seed.\")\nflags.DEFINE_integer(\"tpu_num_shards\", 8, \"Number of tpu shards.\")\nflags.DEFINE_string(\"tpu_job_name\", None,\n \"TPU job name. TPUEstimator can auto-infer this but if the \"\n \"configuration is esoteric it should be provided here.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 100,\n \"Number of iterations in a TPU training loop.\")\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU.\")\nflags.DEFINE_bool(\"use_tpu_estimator\", False, \"Whether to use TPUEstimator. \"\n \"This is always enabled when use_tpu is True.\")\nflags.DEFINE_bool(\"xla_compile\", False,\n \"Whether to use XLA to compile model_fn.\")\nflags.DEFINE_integer(\"xla_jit_level\", -1,\n \"GlobalJitLevel to use while compiling the full graph.\")\nflags.DEFINE_integer(\"tpu_infeed_sleep_secs\", None,\n \"How long to sleep the infeed thread.\")\nflags.DEFINE_bool(\"generate_data\", False, \"Generate data before training?\")\nflags.DEFINE_string(\"tmp_dir\", \"/tmp/t2t_datagen\",\n \"Temporary storage directory, used if --generate_data.\")\nflags.DEFINE_bool(\"profile\", False, \"Profile performance?\")\nflags.DEFINE_integer(\"inter_op_parallelism_threads\", 0,\n \"Number of inter_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\nflags.DEFINE_integer(\"intra_op_parallelism_threads\", 0,\n \"Number of intra_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\n# TODO(lukaszkaiser): resolve memory and variable assign issues and set to True.\nflags.DEFINE_bool(\n \"optionally_use_dist_strat\", False,\n \"Whether to use TensorFlow DistributionStrategy instead of explicitly \"\n \"replicating the model. DistributionStrategy is used only if the \"\n \"model replication configuration is supported by the DistributionStrategy.\")\n# To maintain compatibility with some internal libs, we guard against these flag\n# definitions possibly erroring. Apologies for the ugliness.\ntry:\n flags.DEFINE_string(\"master\", \"\", \"Address of TensorFlow master.\")\n flags.DEFINE_string(\"output_dir\", \"\", \"Base output directory for run.\")\n flags.DEFINE_string(\"schedule\", \"continuous_train_and_eval\",\n \"Method of Experiment to run.\")\n flags.DEFINE_integer(\"eval_steps\", 100,\n \"Number of steps in evaluation. By default, eval will \"\n \"stop after eval_steps or when it runs through the eval \"\n \"dataset once in full, whichever comes first, so this \"\n \"can be a very large number.\")\nexcept: # pylint: disable=bare-except\n pass\n\nflags.DEFINE_string(\"std_server_protocol\", \"grpc\",\n \"Protocol for tf.train.Server.\")\n\n# Google Cloud TPUs\nflags.DEFINE_string(\"cloud_tpu_name\", \"%s-tpu\" % os.getenv(\"USER\"),\n \"Name of Cloud TPU instance to use or create.\")\n\n# Google Cloud ML Engine\nflags.DEFINE_bool(\"cloud_mlengine\", False,\n \"Whether to launch on Cloud ML Engine.\")\nflags.DEFINE_string(\"cloud_mlengine_master_type\", None,\n \"Machine type for master on Cloud ML Engine. \"\n \"If provided, overrides default selections based on \"\n \"--worker_gpu. User is responsible for ensuring \"\n \"type is valid and that --worker_gpu matches number of \"\n \"GPUs on machine type. See documentation: \"\n \"https://cloud.google.com/ml-engine/reference/rest/v1/\"\n \"projects.jobs#traininginput\")\n# Hyperparameter tuning on Cloud ML Engine\n# Pass an --hparams_range to enable\nflags.DEFINE_string(\"autotune_objective\", None,\n \"TensorBoard metric name to optimize.\")\nflags.DEFINE_bool(\"autotune_maximize\", True,\n \"Whether to maximize (vs. minimize) autotune_objective.\")\nflags.DEFINE_integer(\"autotune_max_trials\", 10,\n \"Maximum number of tuning experiments to run.\")\nflags.DEFINE_integer(\"autotune_parallel_trials\", 1,\n \"How many trials to run in parallel (will spin up this \"\n \"many jobs.\")\n# Note than in open-source TensorFlow, the dash gets converted to an underscore,\n# so access is FLAGS.job_dir.\nflags.DEFINE_string(\"job-dir\", None,\n \"DO NOT USE. Exists only for Cloud ML Engine to pass in \"\n \"during hyperparameter tuning. Overrides --output_dir.\")\nflags.DEFINE_integer(\"log_step_count_steps\", 100,\n \"Number of local steps after which progress is printed \"\n \"out\")\n\n\n\ndef set_hparams_from_args(args):\n \"\"\"Set hparams overrides from unparsed args list.\"\"\"\n if not args:\n return\n\n hp_prefix = \"--hp_\"\n tf.logging.info(\"Found unparsed command-line arguments. Checking if any \"\n \"start with %s and interpreting those as hparams \"\n \"settings.\", hp_prefix)\n\n pairs = []\n i = 0\n while i < len(args):\n arg = args[i]\n if arg.startswith(hp_prefix):\n pairs.append((arg[len(hp_prefix):], args[i+1]))\n i += 2\n else:\n tf.logging.warn(\"Found unknown flag: %s\", arg)\n i += 1\n\n as_hparams = \",\".join([\"%s=%s\" % (key, val) for key, val in pairs])\n if FLAGS.hparams:\n as_hparams = \",\" + as_hparams\n FLAGS.hparams += as_hparams\n\n\ndef create_hparams():\n \"\"\"Create hparams.\"\"\"\n if FLAGS.use_tpu and \"tpu\" not in FLAGS.hparams_set:\n tf.logging.warn(\"Not all hyperparameter sets work on TPU. \"\n \"Prefer hparams_sets with a '_tpu' suffix, \"\n \"e.g. transformer_tpu, if available for your model.\")\n hparams_path = os.path.join(FLAGS.output_dir, \"hparams.json\")\n return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,\n hparams_path=hparams_path)\n\n\ndef create_experiment_fn():\n return trainer_lib.create_experiment_fn(\n model_name=FLAGS.model,\n problem_name=FLAGS.problem,\n data_dir=os.path.expanduser(FLAGS.data_dir),\n train_steps=FLAGS.train_steps,\n eval_steps=FLAGS.eval_steps,\n min_eval_frequency=FLAGS.local_eval_frequency,\n schedule=FLAGS.schedule,\n eval_throttle_seconds=FLAGS.eval_throttle_seconds,\n export=FLAGS.export_saved_model,\n decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams),\n use_tfdbg=FLAGS.tfdbg,\n use_dbgprofile=FLAGS.dbgprofile,\n eval_early_stopping_steps=FLAGS.eval_early_stopping_steps,\n eval_early_stopping_metric=FLAGS.eval_early_stopping_metric,\n eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta,\n eval_early_stopping_metric_minimize=FLAGS\n .eval_early_stopping_metric_minimize,\n eval_timeout_mins=FLAGS.eval_timeout_mins,\n eval_use_test_set=FLAGS.eval_use_test_set,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n use_xla=FLAGS.xla_compile,\n warm_start_from=FLAGS.warm_start_from,\n decode_from_file=FLAGS.decode_from_file,\n decode_to_file=FLAGS.decode_to_file,\n decode_reference=FLAGS.decode_reference,\n std_server_protocol=FLAGS.std_server_protocol)\n\n\ndef create_run_config(hp, output_dir=None):\n \"\"\"Create a run config.\n\n Args:\n hp: model hyperparameters\n output_dir: model's output directory, defaults to output_dir flag.\n\n Returns:\n a run config\n \"\"\"\n save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n if save_ckpt_secs:\n save_ckpt_steps = None\n assert FLAGS.output_dir or FLAGS.checkpoint_path\n tpu_config_extra_kwargs = {}\n if FLAGS.tpu_job_name is not None:\n tpu_config_extra_kwargs[\"tpu_job_name\"] = FLAGS.tpu_job_name\n\n if getattr(hp, \"mtf_mode\", False):\n save_ckpt_steps = None # Disable the default saver\n save_ckpt_secs = None # Disable the default saver\n tpu_config_extra_kwargs = {\n \"num_cores_per_replica\": 1,\n \"per_host_input_for_training\": tpu_config.InputPipelineConfig.BROADCAST,\n }\n\n # the various custom getters we have written do not play well together yet.\n # TODO(noam): ask rsepassi for help here.\n daisy_chain_variables = (\n hp.daisy_chain_variables and\n hp.activation_dtype == \"float32\" and\n hp.weight_dtype == \"float32\")\n return trainer_lib.create_run_config(\n model_name=FLAGS.model,\n model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),\n master=FLAGS.master,\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.tpu_num_shards,\n log_device_placement=FLAGS.log_device_placement,\n save_checkpoints_steps=save_ckpt_steps,\n save_checkpoints_secs=save_ckpt_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_gpus=FLAGS.worker_gpu,\n gpu_order=FLAGS.gpu_order,\n num_async_replicas=FLAGS.worker_replicas,\n gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n xla_jit_level=FLAGS.xla_jit_level,\n schedule=FLAGS.schedule,\n no_data_parallelism=hp.no_data_parallelism,\n optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,\n daisy_chain_variables=daisy_chain_variables,\n ps_replicas=FLAGS.ps_replicas,\n ps_job=FLAGS.ps_job,\n ps_gpu=FLAGS.ps_gpu,\n sync=FLAGS.sync,\n worker_id=FLAGS.worker_id,\n worker_job=FLAGS.worker_job,\n random_seed=FLAGS.random_seed,\n tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n log_step_count_steps=FLAGS.log_step_count_steps,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n tpu_config_extra_kwargs=tpu_config_extra_kwargs,\n cloud_tpu_name=FLAGS.cloud_tpu_name)\n\n\ndef generate_data():\n # Generate data if requested.\n data_dir = os.path.expanduser(FLAGS.data_dir)\n tmp_dir = os.path.expanduser(FLAGS.tmp_dir)\n tf.gfile.MakeDirs(data_dir)\n tf.gfile.MakeDirs(tmp_dir)\n\n problem_name = FLAGS.problem\n tf.logging.info(\"Generating data for %s\" % problem_name)\n registry.problem(problem_name).generate_data(data_dir, tmp_dir)\n\n\[email protected]\ndef profile_context():\n if FLAGS.profile:\n with tf.contrib.tfprof.ProfileContext(\n \"t2tprof\", trace_steps=range(100), dump_steps=range(100)) as pctx:\n opts = tf.profiler.ProfileOptionBuilder.time_and_memory()\n pctx.add_auto_profiling(\"op\", opts, range(100))\n yield\n else:\n yield\n\n\ndef maybe_log_registry_and_exit():\n if FLAGS.registry_help:\n tf.logging.info(registry.help_string())\n sys.exit(0)\n\n\ndef is_chief():\n schedules = [\"train\", \"train_and_evaluate\", \"continuous_train_and_eval\"]\n return FLAGS.worker_id == 0 and FLAGS.schedule in schedules\n\n\ndef save_metadata(hparams):\n \"\"\"Saves FLAGS and hparams to output_dir.\"\"\"\n output_dir = os.path.expanduser(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n # Save FLAGS in txt file\n if hasattr(FLAGS, \"flags_into_string\"):\n flags_str = FLAGS.flags_into_string()\n t2t_flags_str = \"\\n\".join([\n \"--%s=%s\" % (f.name, f.value)\n for f in FLAGS.flags_by_module_dict()[\"tensor2tensor.utils.flags\"]\n ])\n else:\n flags_dict = FLAGS.__dict__[\"__flags\"]\n flags_str = \"\\n\".join(\n [\"--%s=%s\" % (name, str(f)) for (name, f) in flags_dict.items()])\n t2t_flags_str = None\n\n flags_txt = os.path.join(output_dir, \"flags.txt\")\n with tf.gfile.Open(flags_txt, \"w\") as f:\n f.write(flags_str)\n\n if t2t_flags_str:\n t2t_flags_txt = os.path.join(output_dir, \"flags_t2t.txt\")\n with tf.gfile.Open(t2t_flags_txt, \"w\") as f:\n f.write(t2t_flags_str)\n\n # Save hparams as hparams.json\n new_hparams = hparams_lib.copy_hparams(hparams)\n # Modality class is not JSON serializable so remove.\n new_hparams.del_hparam(\"modality\")\n\n hparams_fname = os.path.join(output_dir, \"hparams.json\")\n with tf.gfile.Open(hparams_fname, \"w\") as f:\n f.write(new_hparams.to_json(indent=0, sort_keys=True))\n\n\ndef execute_schedule(exp):\n if not hasattr(exp, FLAGS.schedule):\n raise ValueError(\n \"Experiment has no method %s, from --schedule\" % FLAGS.schedule)\n with profile_context():\n getattr(exp, FLAGS.schedule)()\n\n\ndef run_std_server():\n exp = trainer_lib.T2TExperiment(*([None] * 5))\n exp.run_std_server()\n\n\ndef main(argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n\n # If we just have to print the registry, do that and exit early.\n maybe_log_registry_and_exit()\n\n # Create HParams.\n if argv:\n set_hparams_from_args(argv[1:])\n hparams = create_hparams()\n\n if FLAGS.schedule == \"train\" or FLAGS.schedule == \"train_eval_and_decode\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)\n if FLAGS.schedule == \"run_std_server\":\n run_std_server()\n mlperf_log.transformer_print(\n key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed,\n hparams=hparams)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n\n if FLAGS.cloud_mlengine:\n cloud_mlengine.launch()\n return\n\n if FLAGS.generate_data:\n generate_data()\n\n if cloud_mlengine.job_dir():\n FLAGS.output_dir = cloud_mlengine.job_dir()\n\n exp_fn = create_experiment_fn()\n exp = exp_fn(create_run_config(hparams), hparams)\n if is_chief():\n save_metadata(hparams)\n execute_schedule(exp)\n if FLAGS.schedule != \"train\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL,\n hparams=hparams)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n", "path": "tensor2tensor/bin/t2t_trainer.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train and evaluate.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport sys\nfrom tensor2tensor import models # pylint: disable=unused-import\nfrom tensor2tensor import problems as problems_lib # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import problem # pylint: disable=unused-import\n\nfrom tensor2tensor.utils import cloud_mlengine\nfrom tensor2tensor.utils import decoding\nfrom tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import\nfrom tensor2tensor.utils import hparams_lib\nfrom tensor2tensor.utils import mlperf_log\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\nimport tensorflow as tf\n\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\n\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# See utils/flags.py for additional command-line flags.\nflags.DEFINE_string(\"t2t_usr_dir\", None,\n \"Path to a Python module that will be imported. The \"\n \"__init__.py file should include the necessary imports. \"\n \"The imported files should contain registrations, \"\n \"e.g. @registry.register_model calls, that will then be \"\n \"available to the t2t-trainer.\")\nflags.DEFINE_integer(\"random_seed\", None, \"Random seed.\")\nflags.DEFINE_integer(\"tpu_num_shards\", 8, \"Number of tpu shards.\")\nflags.DEFINE_string(\"tpu_job_name\", None,\n \"TPU job name. TPUEstimator can auto-infer this but if the \"\n \"configuration is esoteric it should be provided here.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 100,\n \"Number of iterations in a TPU training loop.\")\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU.\")\nflags.DEFINE_bool(\"use_tpu_estimator\", False, \"Whether to use TPUEstimator. \"\n \"This is always enabled when use_tpu is True.\")\nflags.DEFINE_bool(\"xla_compile\", False,\n \"Whether to use XLA to compile model_fn.\")\nflags.DEFINE_integer(\"xla_jit_level\", -1,\n \"GlobalJitLevel to use while compiling the full graph.\")\nflags.DEFINE_integer(\"tpu_infeed_sleep_secs\", None,\n \"How long to sleep the infeed thread.\")\nflags.DEFINE_bool(\"generate_data\", False, \"Generate data before training?\")\nflags.DEFINE_string(\"tmp_dir\", \"/tmp/t2t_datagen\",\n \"Temporary storage directory, used if --generate_data.\")\nflags.DEFINE_bool(\"profile\", False, \"Profile performance?\")\nflags.DEFINE_integer(\"inter_op_parallelism_threads\", 0,\n \"Number of inter_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\nflags.DEFINE_integer(\"intra_op_parallelism_threads\", 0,\n \"Number of intra_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\n# TODO(lukaszkaiser): resolve memory and variable assign issues and set to True.\nflags.DEFINE_bool(\n \"optionally_use_dist_strat\", False,\n \"Whether to use TensorFlow DistributionStrategy instead of explicitly \"\n \"replicating the model. DistributionStrategy is used only if the \"\n \"model replication configuration is supported by the DistributionStrategy.\")\n# To maintain compatibility with some internal libs, we guard against these flag\n# definitions possibly erroring. Apologies for the ugliness.\ntry:\n flags.DEFINE_string(\"master\", \"\", \"Address of TensorFlow master.\")\n flags.DEFINE_string(\"output_dir\", \"\", \"Base output directory for run.\")\n flags.DEFINE_string(\"schedule\", \"continuous_train_and_eval\",\n \"Method of Experiment to run.\")\n flags.DEFINE_integer(\"eval_steps\", 100,\n \"Number of steps in evaluation. By default, eval will \"\n \"stop after eval_steps or when it runs through the eval \"\n \"dataset once in full, whichever comes first, so this \"\n \"can be a very large number.\")\nexcept: # pylint: disable=bare-except\n pass\n\nflags.DEFINE_string(\"std_server_protocol\", \"grpc\",\n \"Protocol for tf.train.Server.\")\n\n# Google Cloud TPUs\nflags.DEFINE_string(\"cloud_tpu_name\", \"%s-tpu\" % os.getenv(\"USER\"),\n \"Name of Cloud TPU instance to use or create.\")\n\n# Google Cloud ML Engine\nflags.DEFINE_bool(\"cloud_mlengine\", False,\n \"Whether to launch on Cloud ML Engine.\")\nflags.DEFINE_string(\"cloud_mlengine_master_type\", None,\n \"Machine type for master on Cloud ML Engine. \"\n \"If provided, overrides default selections based on \"\n \"--worker_gpu. User is responsible for ensuring \"\n \"type is valid and that --worker_gpu matches number of \"\n \"GPUs on machine type. See documentation: \"\n \"https://cloud.google.com/ml-engine/reference/rest/v1/\"\n \"projects.jobs#traininginput\")\n# Hyperparameter tuning on Cloud ML Engine\n# Pass an --hparams_range to enable\nflags.DEFINE_string(\"autotune_objective\", None,\n \"TensorBoard metric name to optimize.\")\nflags.DEFINE_bool(\"autotune_maximize\", True,\n \"Whether to maximize (vs. minimize) autotune_objective.\")\nflags.DEFINE_integer(\"autotune_max_trials\", 10,\n \"Maximum number of tuning experiments to run.\")\nflags.DEFINE_integer(\"autotune_parallel_trials\", 1,\n \"How many trials to run in parallel (will spin up this \"\n \"many jobs.\")\n# Note than in open-source TensorFlow, the dash gets converted to an underscore,\n# so access is FLAGS.job_dir.\nflags.DEFINE_string(\"job-dir\", None,\n \"DO NOT USE. Exists only for Cloud ML Engine to pass in \"\n \"during hyperparameter tuning. Overrides --output_dir.\")\nflags.DEFINE_integer(\"log_step_count_steps\", 100,\n \"Number of local steps after which progress is printed \"\n \"out\")\n\n\n\ndef set_hparams_from_args(args):\n \"\"\"Set hparams overrides from unparsed args list.\"\"\"\n if not args:\n return\n\n hp_prefix = \"--hp_\"\n tf.logging.info(\"Found unparsed command-line arguments. Checking if any \"\n \"start with %s and interpreting those as hparams \"\n \"settings.\", hp_prefix)\n\n pairs = []\n i = 0\n while i < len(args):\n arg = args[i]\n if arg.startswith(hp_prefix):\n pairs.append((arg[len(hp_prefix):], args[i+1]))\n i += 2\n else:\n tf.logging.warn(\"Found unknown flag: %s\", arg)\n i += 1\n\n as_hparams = \",\".join([\"%s=%s\" % (key, val) for key, val in pairs])\n if FLAGS.hparams:\n as_hparams = \",\" + as_hparams\n FLAGS.hparams += as_hparams\n\n\ndef create_hparams():\n \"\"\"Create hparams.\"\"\"\n if FLAGS.use_tpu and \"tpu\" not in FLAGS.hparams_set:\n tf.logging.warn(\"Not all hyperparameter sets work on TPU. \"\n \"Prefer hparams_sets with a '_tpu' suffix, \"\n \"e.g. transformer_tpu, if available for your model.\")\n hparams_path = os.path.join(FLAGS.output_dir, \"hparams.json\")\n return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,\n hparams_path=hparams_path)\n\n\ndef create_experiment_fn():\n return trainer_lib.create_experiment_fn(\n model_name=FLAGS.model,\n problem_name=FLAGS.problem,\n data_dir=os.path.expanduser(FLAGS.data_dir),\n train_steps=FLAGS.train_steps,\n eval_steps=FLAGS.eval_steps,\n min_eval_frequency=FLAGS.local_eval_frequency,\n schedule=FLAGS.schedule,\n eval_throttle_seconds=FLAGS.eval_throttle_seconds,\n export=FLAGS.export_saved_model,\n decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams),\n use_tfdbg=FLAGS.tfdbg,\n use_dbgprofile=FLAGS.dbgprofile,\n eval_early_stopping_steps=FLAGS.eval_early_stopping_steps,\n eval_early_stopping_metric=FLAGS.eval_early_stopping_metric,\n eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta,\n eval_early_stopping_metric_minimize=FLAGS\n .eval_early_stopping_metric_minimize,\n eval_timeout_mins=FLAGS.eval_timeout_mins,\n eval_use_test_set=FLAGS.eval_use_test_set,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n use_xla=FLAGS.xla_compile,\n warm_start_from=FLAGS.warm_start_from,\n decode_from_file=FLAGS.decode_from_file,\n decode_to_file=FLAGS.decode_to_file,\n decode_reference=FLAGS.decode_reference,\n std_server_protocol=FLAGS.std_server_protocol)\n\n\ndef create_run_config(hp, output_dir=None):\n \"\"\"Create a run config.\n\n Args:\n hp: model hyperparameters\n output_dir: model's output directory, defaults to output_dir flag.\n\n Returns:\n a run config\n \"\"\"\n save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n if save_ckpt_secs:\n save_ckpt_steps = None\n assert FLAGS.output_dir or FLAGS.checkpoint_path\n tpu_config_extra_kwargs = {}\n if FLAGS.tpu_job_name is not None:\n tpu_config_extra_kwargs[\"tpu_job_name\"] = FLAGS.tpu_job_name\n\n if getattr(hp, \"mtf_mode\", False):\n save_ckpt_steps = None # Disable the default saver\n save_ckpt_secs = None # Disable the default saver\n tpu_config_extra_kwargs = {\n \"num_cores_per_replica\": 1,\n \"per_host_input_for_training\": tpu_config.InputPipelineConfig.BROADCAST,\n }\n\n # the various custom getters we have written do not play well together yet.\n # TODO(noam): ask rsepassi for help here.\n daisy_chain_variables = (\n hp.daisy_chain_variables and\n hp.activation_dtype == \"float32\" and\n hp.weight_dtype == \"float32\")\n return trainer_lib.create_run_config(\n model_name=FLAGS.model,\n model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),\n master=FLAGS.master,\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.tpu_num_shards,\n log_device_placement=FLAGS.log_device_placement,\n save_checkpoints_steps=save_ckpt_steps,\n save_checkpoints_secs=save_ckpt_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_gpus=FLAGS.worker_gpu,\n gpu_order=FLAGS.gpu_order,\n num_async_replicas=FLAGS.worker_replicas,\n gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n xla_jit_level=FLAGS.xla_jit_level,\n schedule=FLAGS.schedule,\n no_data_parallelism=hp.no_data_parallelism,\n optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,\n daisy_chain_variables=daisy_chain_variables,\n ps_replicas=FLAGS.ps_replicas,\n ps_job=FLAGS.ps_job,\n ps_gpu=FLAGS.ps_gpu,\n sync=FLAGS.sync,\n worker_id=FLAGS.worker_id,\n worker_job=FLAGS.worker_job,\n random_seed=FLAGS.random_seed,\n tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n log_step_count_steps=FLAGS.log_step_count_steps,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n tpu_config_extra_kwargs=tpu_config_extra_kwargs,\n cloud_tpu_name=FLAGS.cloud_tpu_name)\n\n\ndef generate_data():\n # Generate data if requested.\n data_dir = os.path.expanduser(FLAGS.data_dir)\n tmp_dir = os.path.expanduser(FLAGS.tmp_dir)\n tf.gfile.MakeDirs(data_dir)\n tf.gfile.MakeDirs(tmp_dir)\n\n problem_name = FLAGS.problem\n tf.logging.info(\"Generating data for %s\" % problem_name)\n registry.problem(problem_name).generate_data(data_dir, tmp_dir)\n\n\[email protected]\ndef profile_context():\n if FLAGS.profile:\n with tf.contrib.tfprof.ProfileContext(\n \"t2tprof\", trace_steps=range(100), dump_steps=range(100)) as pctx:\n opts = tf.profiler.ProfileOptionBuilder.time_and_memory()\n pctx.add_auto_profiling(\"op\", opts, range(100))\n yield\n else:\n yield\n\n\ndef maybe_log_registry_and_exit():\n if FLAGS.registry_help:\n tf.logging.info(registry.help_string())\n sys.exit(0)\n\n\ndef is_chief():\n schedules = [\"train\", \"train_and_evaluate\", \"continuous_train_and_eval\"]\n return FLAGS.worker_id == 0 and FLAGS.schedule in schedules\n\n\ndef save_metadata(hparams):\n \"\"\"Saves FLAGS and hparams to output_dir.\"\"\"\n output_dir = os.path.expanduser(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n # Save FLAGS in txt file\n if hasattr(FLAGS, \"flags_into_string\"):\n flags_str = FLAGS.flags_into_string()\n t2t_flags_str = \"\\n\".join([\n \"--%s=%s\" % (f.name, f.value)\n for f in FLAGS.flags_by_module_dict()[\"tensor2tensor.utils.flags\"]\n ])\n else:\n flags_dict = FLAGS.__dict__[\"__flags\"]\n flags_str = \"\\n\".join(\n [\"--%s=%s\" % (name, str(f)) for (name, f) in flags_dict.items()])\n t2t_flags_str = None\n\n flags_txt = os.path.join(output_dir, \"flags.txt\")\n with tf.gfile.Open(flags_txt, \"w\") as f:\n f.write(flags_str)\n\n if t2t_flags_str:\n t2t_flags_txt = os.path.join(output_dir, \"flags_t2t.txt\")\n with tf.gfile.Open(t2t_flags_txt, \"w\") as f:\n f.write(t2t_flags_str)\n\n # Save hparams as hparams.json\n new_hparams = hparams_lib.copy_hparams(hparams)\n # Modality class is not JSON serializable so remove.\n new_hparams.del_hparam(\"modality\")\n\n hparams_fname = os.path.join(output_dir, \"hparams.json\")\n with tf.gfile.Open(hparams_fname, \"w\") as f:\n f.write(new_hparams.to_json(indent=0, sort_keys=True))\n\n\ndef execute_schedule(exp):\n if not hasattr(exp, FLAGS.schedule):\n raise ValueError(\n \"Experiment has no method %s, from --schedule\" % FLAGS.schedule)\n with profile_context():\n getattr(exp, FLAGS.schedule)()\n\n\ndef run_std_server():\n exp = trainer_lib.T2TExperiment(*([None] * 5))\n exp.run_std_server()\n\n\ndef main(argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n\n # If we just have to print the registry, do that and exit early.\n maybe_log_registry_and_exit()\n\n # Create HParams.\n if argv:\n set_hparams_from_args(argv[1:])\n if FLAGS.schedule != \"run_std_server\":\n hparams = create_hparams()\n\n if FLAGS.schedule == \"train\" or FLAGS.schedule == \"train_eval_and_decode\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)\n if FLAGS.schedule == \"run_std_server\":\n run_std_server()\n mlperf_log.transformer_print(\n key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed,\n hparams=hparams)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n\n if FLAGS.cloud_mlengine:\n cloud_mlengine.launch()\n return\n\n if FLAGS.generate_data:\n generate_data()\n\n if cloud_mlengine.job_dir():\n FLAGS.output_dir = cloud_mlengine.job_dir()\n\n exp_fn = create_experiment_fn()\n exp = exp_fn(create_run_config(hparams), hparams)\n if is_chief():\n save_metadata(hparams)\n execute_schedule(exp)\n if FLAGS.schedule != \"train\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL,\n hparams=hparams)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n", "path": "tensor2tensor/bin/t2t_trainer.py"}]} |
gh_patches_debug_1445 | rasdani/github-patches | git_diff | mindsdb__lightwood-868 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TS] error if 'group by' column contains a single value
If 'group by' column has single value for all cells, then error appears. Can be replicated with any ts-dataset if add filter to data select query:
```
create predictor p_name from int_name (select * from test_data.ts_dataset where location='good') predict rental_price order by date group by location window 5 horizon 3;
```
error is
```
Traceback (most recent call last):
File "./mindsdb/interfaces/model/learn_process.py", line 175, in run_learn
run_fit(predictor_id, df)
File "./mindsdb/utilities/functions.py", line 56, in wrapper
return func(*args, **kwargs)
File "./mindsdb/interfaces/model/learn_process.py", line 148, in run_fit
raise e
File "./mindsdb/interfaces/model/learn_process.py", line 119, in run_fit
predictor.learn(df)
File "/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/helpers/log.py", line 30, in wrap
result = f(predictor, *args, **kw)
File "/tmp/52931846b2322b65fafeb5782f9a3e9e76650c2aac7cecf516512142146485069.py", line 450, in learn
self.analyze_data(data)
File "/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/helpers/log.py", line 30, in wrap
result = f(predictor, *args, **kw)
File "/tmp/52931846b2322b65fafeb5782f9a3e9e76650c2aac7cecf516512142146485069.py", line 137, in analyze_data
self.statistical_analysis = lightwood.data.statistical_analysis(
File "/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/data/statistical_analysis.py", line 120, in statistical_analysis
if dtypes[col] in (dtype.categorical, dtype.binary, dtype.tags):
KeyError: 'location'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/api/json_ai.py`
Content:
```
1 # TODO: _add_implicit_values unit test ensures NO changes for a fully specified file.
2 from copy import deepcopy
3 from lightwood.helpers.templating import call, inline_dict, align
4 from lightwood.api import dtype
5 from lightwood.api.types import (
6 JsonAI,
7 TypeInformation,
8 StatisticalAnalysis,
9 ProblemDefinition,
10 )
11 import inspect
12 from lightwood.helpers.log import log
13
14
15 # For custom modules, we create a module loader with necessary imports below
16 IMPORT_EXTERNAL_DIRS = """
17 for import_dir in [os.path.join(os.path.expanduser('~/lightwood_modules'), lightwood_version.replace('.', '_')), os.path.join('/etc/lightwood_modules', lightwood_version.replace('.', '_'))]:
18 if os.path.exists(import_dir) and os.access(import_dir, os.R_OK):
19 for file_name in list(os.walk(import_dir))[0][2]:
20 if file_name[-3:] != '.py':
21 continue
22 mod_name = file_name[:-3]
23 loader = importlib.machinery.SourceFileLoader(mod_name,
24 os.path.join(import_dir, file_name))
25 module = ModuleType(loader.name)
26 loader.exec_module(module)
27 sys.modules[mod_name] = module
28 exec(f'import {mod_name}')
29 """ # noqa
30
31 IMPORTS = """
32 import lightwood
33 from lightwood import __version__ as lightwood_version
34 from lightwood.analysis import *
35 from lightwood.api import *
36 from lightwood.data import *
37 from lightwood.encoder import *
38 from lightwood.ensemble import *
39 from lightwood.helpers.device import *
40 from lightwood.helpers.general import *
41 from lightwood.helpers.log import *
42 from lightwood.helpers.numeric import *
43 from lightwood.helpers.imputers import *
44 from lightwood.helpers.parallelism import *
45 from lightwood.helpers.seed import *
46 from lightwood.helpers.text import *
47 from lightwood.helpers.torch import *
48 from lightwood.mixer import *
49 import pandas as pd
50 from typing import Dict, List, Union
51 import os
52 from types import ModuleType
53 import importlib.machinery
54 import sys
55 import time
56 """
57
58
59 def lookup_encoder(
60 col_dtype: str,
61 col_name: str,
62 is_target: bool,
63 problem_defintion: ProblemDefinition,
64 is_target_predicting_encoder: bool,
65 statistical_analysis: StatisticalAnalysis,
66 ):
67 """
68 Assign a default encoder for a given column based on its data type, and whether it is a target. Encoders intake raw (but cleaned) data and return an feature representation. This function assigns, per data type, what the featurizer should be. This function runs on each column within the dataset available for model building to assign how it should be featurized.
69
70 Users may override to create a custom encoder to enable their own featurization process. However, in order to generate template JSON-AI, this code runs automatically. Users may edit the generated syntax and use custom approaches while model building.
71
72 For each encoder, "args" may be passed. These args depend an encoder requires during its preparation call.
73
74 :param col_dtype: A data-type of a column specified
75 :param col_name: The name of the column
76 :param is_target: Whether the column is the target for prediction. If true, only certain possible feature representations are allowed, particularly for complex data types.
77 :param problem_definition: The ``ProblemDefinition`` criteria; this populates specifics on how models and encoders may be trained.
78 :param is_target_predicting_encoder:
79 """ # noqa
80
81 tss = problem_defintion.timeseries_settings
82 encoder_lookup = {
83 dtype.integer: "NumericEncoder",
84 dtype.float: "NumericEncoder",
85 dtype.binary: "BinaryEncoder",
86 dtype.categorical: "CategoricalAutoEncoder"
87 if statistical_analysis is None
88 or len(statistical_analysis.histograms[col_name]) > 100
89 else "OneHotEncoder",
90 dtype.tags: "MultiHotEncoder",
91 dtype.date: "DatetimeEncoder",
92 dtype.datetime: "DatetimeEncoder",
93 dtype.image: "Img2VecEncoder",
94 dtype.rich_text: "PretrainedLangEncoder",
95 dtype.short_text: "CategoricalAutoEncoder",
96 dtype.quantity: "NumericEncoder",
97 dtype.audio: "MFCCEncoder",
98 dtype.num_array: "NumArrayEncoder",
99 dtype.cat_array: "CatArrayEncoder",
100 dtype.num_tsarray: "TimeSeriesEncoder",
101 dtype.cat_tsarray: "TimeSeriesEncoder",
102 }
103
104 # If column is a target, only specific feature representations are allowed that enable supervised tasks
105 target_encoder_lookup_override = {
106 dtype.rich_text: "VocabularyEncoder",
107 dtype.categorical: "OneHotEncoder",
108 }
109
110 # Assign a default encoder to each column.
111 encoder_dict = {"module": encoder_lookup[col_dtype], "args": {}}
112
113 # If the column is a target, ensure that the feature representation can enable supervised tasks
114 if is_target:
115 encoder_dict["args"] = {"is_target": "True"}
116
117 if col_dtype in target_encoder_lookup_override:
118 encoder_dict["module"] = target_encoder_lookup_override[col_dtype]
119
120 if col_dtype in (dtype.categorical, dtype.binary):
121 if problem_defintion.unbias_target:
122 encoder_dict["args"][
123 "target_weights"
124 ] = "$statistical_analysis.target_weights"
125 if problem_defintion.target_weights is not None:
126 encoder_dict["args"][
127 "target_weights"
128 ] = problem_defintion.target_weights
129
130 if col_dtype in (dtype.integer, dtype.float, dtype.num_array, dtype.num_tsarray):
131 encoder_dict["args"][
132 "positive_domain"
133 ] = "$statistical_analysis.positive_domain"
134
135 # Time-series representations require more advanced flags
136 if tss.is_timeseries:
137 gby = tss.group_by if tss.group_by is not None else []
138 if col_name in tss.order_by:
139 encoder_dict["module"] = "ArrayEncoder"
140 encoder_dict["args"]["original_type"] = f'"{tss.target_type}"'
141 encoder_dict["args"]["window"] = f"{tss.window}"
142
143 if is_target:
144 if col_dtype in [dtype.integer]:
145 encoder_dict["args"]["grouped_by"] = f"{gby}"
146 encoder_dict["module"] = "TsNumericEncoder"
147 if col_dtype in [dtype.float]:
148 encoder_dict["args"]["grouped_by"] = f"{gby}"
149 encoder_dict["module"] = "TsNumericEncoder"
150 if tss.horizon > 1:
151 encoder_dict["args"]["grouped_by"] = f"{gby}"
152 encoder_dict["args"]["timesteps"] = f"{tss.horizon}"
153 if col_dtype in [dtype.num_tsarray]:
154 encoder_dict["module"] = "TsArrayNumericEncoder"
155 elif col_dtype in [dtype.cat_tsarray]:
156 encoder_dict["module"] = "TsCatArrayEncoder"
157
158 if "__mdb_ts_previous" in col_name or col_name in tss.historical_columns:
159 encoder_dict["module"] = "TimeSeriesEncoder"
160 encoder_dict["args"]["original_type"] = f'"{tss.target_type}"'
161 encoder_dict["args"]["window"] = f"{tss.window}"
162
163 # Set arguments for the encoder
164 if encoder_dict["module"] == "PretrainedLangEncoder" and not is_target:
165 encoder_dict["args"]["output_type"] = "$dtype_dict[$target]"
166
167 if eval(encoder_dict["module"]).is_trainable_encoder:
168 encoder_dict["args"]["stop_after"] = "$problem_definition.seconds_per_encoder"
169
170 if is_target_predicting_encoder:
171 encoder_dict["args"]["embed_mode"] = "False"
172 return encoder_dict
173
174
175 def generate_json_ai(
176 type_information: TypeInformation,
177 statistical_analysis: StatisticalAnalysis,
178 problem_definition: ProblemDefinition,
179 ) -> JsonAI:
180 """
181 Given ``TypeInformation``, ``StatisticalAnalysis``, and the ``ProblemDefinition``, generate a JSON config file with the necessary elements of the ML pipeline populated.
182
183 :param TypeInformation: Specifies what data types each column within the dataset are
184 :param statistical_analysis:
185 :param problem_definition: Specifies details of the model training/building procedure, as defined by ``ProblemDefinition``
186
187 :returns: JSON-AI object with fully populated details of the ML pipeline
188 """ # noqaexec
189 exec(IMPORTS, globals())
190 exec(IMPORT_EXTERNAL_DIRS, globals())
191 target = problem_definition.target
192 input_cols = []
193 tss = problem_definition.timeseries_settings
194 dtype_dict = type_information.dtypes
195 for k in type_information.identifiers:
196 del dtype_dict[k]
197 dependency_dict = {}
198
199 for col_name, col_dtype in dtype_dict.items():
200 if (
201 (col_name not in type_information.identifiers
202 and col_dtype not in (dtype.invalid, dtype.empty)
203 and col_name != target)
204 or
205 (tss.group_by is not None and col_name in tss.group_by)
206 ):
207 if col_name != problem_definition.target:
208 input_cols.append(col_name)
209
210 is_target_predicting_encoder = False
211 is_ts = problem_definition.timeseries_settings.is_timeseries
212
213 # Single text column classification
214 if (
215 len(input_cols) == 1
216 and type_information.dtypes[input_cols[0]] in (dtype.rich_text)
217 and type_information.dtypes[target] in (dtype.categorical, dtype.binary)
218 ):
219 is_target_predicting_encoder = True
220
221 if is_target_predicting_encoder:
222 submodels = [
223 {
224 "module": "Unit",
225 "args": {
226 "target_encoder": "$encoders[self.target]",
227 "stop_after": "$problem_definition.seconds_per_mixer",
228 },
229 }
230 ]
231 else:
232 submodels = [
233 {
234 "module": "Neural",
235 "args": {
236 "fit_on_dev": True,
237 "stop_after": "$problem_definition.seconds_per_mixer",
238 "search_hyperparameters": True,
239 },
240 }
241 ]
242
243 if (not tss.is_timeseries or tss.horizon == 1) and dtype_dict[target] not in (dtype.num_array, dtype.cat_array):
244 submodels.extend(
245 [
246 {
247 "module": "LightGBM",
248 "args": {
249 "stop_after": "$problem_definition.seconds_per_mixer",
250 "fit_on_dev": True,
251 },
252 },
253 {
254 "module": "Regression",
255 "args": {
256 "stop_after": "$problem_definition.seconds_per_mixer",
257 },
258 },
259 ]
260 )
261 elif tss.is_timeseries and tss.horizon > 1:
262 submodels.extend(
263 [
264 {
265 "module": "LightGBMArray",
266 "args": {
267 "fit_on_dev": True,
268 "stop_after": "$problem_definition.seconds_per_mixer",
269 "n_ts_predictions": "$problem_definition.timeseries_settings.horizon",
270 },
271 }
272 ]
273 )
274
275 if tss.use_previous_target and dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):
276 submodels.extend(
277 [
278 {
279 "module": "SkTime",
280 "args": {
281 "stop_after": "$problem_definition.seconds_per_mixer",
282 "n_ts_predictions": "$problem_definition.timeseries_settings.horizon",
283 },
284 }
285 ]
286 )
287
288 model = {
289 "module": "BestOf",
290 "args": {
291 "submodels": submodels,
292 "args": "$pred_args",
293 "accuracy_functions": "$accuracy_functions",
294 "ts_analysis": "self.ts_analysis" if is_ts else None,
295 }
296 }
297
298 if tss.is_timeseries and tss.horizon > 1:
299 if dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):
300 dtype_dict[target] = dtype.num_tsarray
301 else:
302 dtype_dict[target] = dtype.cat_tsarray
303
304 encoders = {
305 target: lookup_encoder(
306 dtype_dict[target],
307 target,
308 True,
309 problem_definition,
310 False,
311 statistical_analysis,
312 )
313 }
314
315 for col in input_cols:
316 encoders[col] = lookup_encoder(
317 dtype_dict[col],
318 col,
319 False,
320 problem_definition,
321 is_target_predicting_encoder,
322 statistical_analysis,
323 )
324
325 # Decide on the accuracy functions to use
326 output_dtype = dtype_dict[target]
327 if output_dtype in [
328 dtype.integer,
329 dtype.float,
330 dtype.date,
331 dtype.datetime,
332 dtype.quantity,
333 ]:
334 accuracy_functions = ["r2_score"]
335 elif output_dtype in [dtype.categorical, dtype.tags, dtype.binary]:
336 accuracy_functions = ["balanced_accuracy_score"]
337 elif output_dtype in (dtype.num_array, dtype.num_tsarray):
338 accuracy_functions = ["evaluate_num_array_accuracy"]
339 elif output_dtype in (dtype.cat_array, dtype.cat_tsarray):
340 accuracy_functions = ["evaluate_cat_array_accuracy"]
341 else:
342 raise Exception(
343 f"Please specify a custom accuracy function for output type {output_dtype}"
344 )
345
346 # special dispatch for t+1 time series forecasters
347 if is_ts:
348 if output_dtype in [dtype.integer, dtype.float]:
349 accuracy_functions = ["evaluate_num_array_accuracy"]
350
351 if problem_definition.time_aim is None:
352 # 5 days
353 problem_definition.time_aim = 3 * 24 * 3600
354
355 # Encoders are assigned 1/3 of the time unless a user overrides this (equal time per encoder)
356 if problem_definition.seconds_per_encoder is None:
357 nr_trainable_encoders = len(
358 [
359 x
360 for x in encoders.values()
361 if eval(x["module"]).is_trainable_encoder
362 ]
363 )
364 if nr_trainable_encoders > 0:
365 problem_definition.seconds_per_encoder = 0.33 * problem_definition.time_aim / nr_trainable_encoders
366
367 # Mixers are assigned 1/3 of the time aim (or 2/3 if there are no trainable encoders )\
368 # unless a user overrides this (equal time per mixer)
369 if problem_definition.seconds_per_mixer is None:
370 if problem_definition.seconds_per_encoder is None:
371 problem_definition.seconds_per_mixer = 0.66 * problem_definition.time_aim / len(model['args']['submodels'])
372 else:
373 problem_definition.seconds_per_mixer = 0.33 * problem_definition.time_aim / len(model['args']['submodels'])
374
375 return JsonAI(
376 cleaner=None,
377 splitter=None,
378 analyzer=None,
379 explainer=None,
380 encoders=encoders,
381 dtype_dict=dtype_dict,
382 dependency_dict=dependency_dict,
383 model=model,
384 problem_definition=problem_definition,
385 identifiers=type_information.identifiers,
386 timeseries_transformer=None,
387 timeseries_analyzer=None,
388 accuracy_functions=accuracy_functions,
389 )
390
391
392 def _merge_implicit_values(field: dict, implicit_value: dict) -> dict:
393 """
394 Helper function for `_populate_implicit_field`.
395 Takes a user-defined field along with its implicit value, and merges them together.
396
397 :param field: JsonAI field with user-defined parameters.
398 :param implicit_value: implicit values for the field.
399 :return: original field with implicit values merged into it.
400 """
401 exec(IMPORTS, globals())
402 exec(IMPORT_EXTERNAL_DIRS, globals())
403 module = eval(field["module"])
404
405 if inspect.isclass(module):
406 args = list(inspect.signature(module.__init__).parameters.keys())[1:]
407 else:
408 args = module.__code__.co_varnames
409
410 for arg in args:
411 if "args" not in field:
412 field["args"] = implicit_value["args"]
413 else:
414 if arg not in field["args"]:
415 if arg in implicit_value["args"]:
416 field["args"][arg] = implicit_value["args"][arg]
417
418 return field
419
420
421 def _populate_implicit_field(
422 json_ai: JsonAI, field_name: str, implicit_value: dict, is_timeseries: bool
423 ) -> None:
424 """
425 Populate the implicit field of the JsonAI, either by filling it in entirely if missing, or by introspecting the class or function and assigning default values to the args in it's signature that are in the implicit default but haven't been populated by the user
426
427 :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.
428 :params: field_name: Name of the field the implicit field in ``JsonAI``
429 :params: implicit_value: The dictionary containing implicit values for the module and arg in the field
430 :params: is_timeseries: Whether or not this is a timeseries problem
431
432 :returns: nothing, this method mutates the respective field of the ``JsonAI`` object it receives
433 """ # noqa
434 # These imports might be slow, in which case the only <easy> solution is to line this code
435 field = json_ai.__getattribute__(field_name)
436 if field is None:
437 # This if is to only populated timeseries-specific implicit fields for implicit problems
438 if is_timeseries or field_name not in (
439 "timeseries_analyzer",
440 "timeseries_transformer",
441 ):
442 field = implicit_value
443
444 # If the user specified one or more subfields in a field that's a list
445 # Populate them with implicit arguments form the implicit values from that subfield
446 elif isinstance(field, list) and isinstance(implicit_value, list):
447 for i in range(len(field)):
448 sub_field_implicit = [
449 x for x in implicit_value if x["module"] == field[i]["module"]
450 ]
451 if len(sub_field_implicit) == 1:
452 field[i] = _merge_implicit_values(field[i], sub_field_implicit[0])
453 for sub_field_implicit in implicit_value:
454 if (
455 len([x for x in field if x["module"] == sub_field_implicit["module"]])
456 == 0
457 ):
458 field.append(sub_field_implicit)
459 # If the user specified the field, add implicit arguments which we didn't specify
460 else:
461 field = _merge_implicit_values(field, implicit_value)
462 json_ai.__setattr__(field_name, field)
463
464
465 def _add_implicit_values(json_ai: JsonAI) -> JsonAI:
466 """
467 To enable brevity in writing, auto-generate the "unspecified/missing" details required in the ML pipeline.
468
469 :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.
470
471 :returns: ``JSONAI`` object with all necessary parameters that were previously left unmentioned filled in.
472 """
473 problem_definition = json_ai.problem_definition
474 tss = problem_definition.timeseries_settings
475 is_ts = tss.is_timeseries
476
477 # Add implicit arguments
478 # @TODO: Consider removing once we have a proper editor in studio
479 mixers = json_ai.model['args']['submodels']
480 for i in range(len(mixers)):
481 if mixers[i]["module"] == "Unit":
482 pass
483 elif mixers[i]["module"] == "Neural":
484 mixers[i]["args"]["target_encoder"] = mixers[i]["args"].get(
485 "target_encoder", "$encoders[self.target]"
486 )
487 mixers[i]["args"]["target"] = mixers[i]["args"].get("target", "$target")
488 mixers[i]["args"]["dtype_dict"] = mixers[i]["args"].get(
489 "dtype_dict", "$dtype_dict"
490 )
491 mixers[i]["args"]["timeseries_settings"] = mixers[i]["args"].get(
492 "timeseries_settings", "$problem_definition.timeseries_settings"
493 )
494 mixers[i]["args"]["net"] = mixers[i]["args"].get(
495 "net",
496 '"DefaultNet"'
497 if not tss.is_timeseries or not tss.use_previous_target
498 else '"ArNet"',
499 )
500
501 elif mixers[i]["module"] == "LightGBM":
502 mixers[i]["args"]["target"] = mixers[i]["args"].get("target", "$target")
503 mixers[i]["args"]["dtype_dict"] = mixers[i]["args"].get(
504 "dtype_dict", "$dtype_dict"
505 )
506 mixers[i]["args"]["input_cols"] = mixers[i]["args"].get(
507 "input_cols", "$input_cols"
508 )
509 mixers[i]["args"]["target_encoder"] = mixers[i]["args"].get(
510 "target_encoder", "$encoders[self.target]"
511 )
512 mixers[i]["args"]["use_optuna"] = True
513 elif mixers[i]["module"] == "Regression":
514 mixers[i]["args"]["target"] = mixers[i]["args"].get("target", "$target")
515 mixers[i]["args"]["dtype_dict"] = mixers[i]["args"].get(
516 "dtype_dict", "$dtype_dict"
517 )
518 mixers[i]["args"]["target_encoder"] = mixers[i]["args"].get(
519 "target_encoder", "$encoders[self.target]"
520 )
521 elif mixers[i]["module"] == "LightGBMArray":
522 mixers[i]["args"]["target"] = mixers[i]["args"].get("target", "$target")
523 mixers[i]["args"]["dtype_dict"] = mixers[i]["args"].get(
524 "dtype_dict", "$dtype_dict"
525 )
526 mixers[i]["args"]["input_cols"] = mixers[i]["args"].get(
527 "input_cols", "$input_cols"
528 )
529 mixers[i]["args"]["target_encoder"] = mixers[i]["args"].get(
530 "target_encoder", "$encoders[self.target]"
531 )
532 elif mixers[i]["module"] == "SkTime":
533 mixers[i]["args"]["target"] = mixers[i]["args"].get("target", "$target")
534 mixers[i]["args"]["dtype_dict"] = mixers[i]["args"].get(
535 "dtype_dict", "$dtype_dict"
536 )
537 mixers[i]["args"]["ts_analysis"] = mixers[i]["args"].get(
538 "ts_analysis", "$ts_analysis"
539 )
540 # enforce fit_on_all if this mixer is specified
541 problem_definition.fit_on_all = True
542
543 json_ai.model["args"]["target"] = json_ai.model["args"].get("target", "$target")
544 json_ai.model["args"]["data"] = json_ai.model["args"].get("data", "encoded_test_data")
545 json_ai.model["args"]["mixers"] = json_ai.model["args"].get("mixers", "$mixers")
546
547 for name in json_ai.encoders:
548 if name not in json_ai.dependency_dict:
549 json_ai.dependency_dict[name] = []
550
551 # Add "hidden" fields
552 hidden_fields = {
553 "cleaner": {
554 "module": "cleaner",
555 "args": {
556 "pct_invalid": "$problem_definition.pct_invalid",
557 "identifiers": "$identifiers",
558 "data": "data",
559 "dtype_dict": "$dtype_dict",
560 "target": "$target",
561 "mode": "$mode",
562 "imputers": "$imputers",
563 "timeseries_settings": "$problem_definition.timeseries_settings",
564 "anomaly_detection": "$problem_definition.anomaly_detection",
565 },
566 },
567 "splitter": {
568 "module": "splitter",
569 "args": {
570 "tss": "$problem_definition.timeseries_settings",
571 "data": "data",
572 "seed": 1,
573 "target": "$target",
574 "dtype_dict": "$dtype_dict",
575 "pct_train": 0.8,
576 "pct_dev": 0.1,
577 "pct_test": 0.1,
578 },
579 },
580 "analyzer": {
581 "module": "model_analyzer",
582 "args": {
583 "stats_info": "$statistical_analysis",
584 "tss": "$problem_definition.timeseries_settings",
585 "accuracy_functions": "$accuracy_functions",
586 "predictor": "$ensemble",
587 "data": "encoded_test_data",
588 "train_data": "encoded_train_data",
589 "target": "$target",
590 "dtype_dict": "$dtype_dict",
591 "analysis_blocks": "$analysis_blocks",
592 "ts_analysis": "$ts_analysis" if is_ts else None,
593 },
594 },
595 "explainer": {
596 "module": "explain",
597 "args": {
598 "timeseries_settings": "$problem_definition.timeseries_settings",
599 "positive_domain": "$statistical_analysis.positive_domain",
600 "anomaly_detection": "$problem_definition.anomaly_detection",
601 "data": "data",
602 "encoded_data": "encoded_data",
603 "predictions": "df",
604 "analysis": "$runtime_analyzer",
605 "ts_analysis": "$ts_analysis" if is_ts else None,
606 "target_name": "$target",
607 "target_dtype": "$dtype_dict[self.target]",
608 "explainer_blocks": "$analysis_blocks",
609 "pred_args": "$pred_args",
610 },
611 },
612 "analysis_blocks": [
613 {
614 "module": "ICP",
615 "args": {
616 "fixed_significance": None,
617 "confidence_normalizer": False,
618 "positive_domain": "$statistical_analysis.positive_domain",
619 },
620 },
621 {
622 "module": "AccStats",
623 "args": {"deps": ["ICP"]},
624 },
625 {
626 "module": "ConfStats",
627 "args": {"deps": ["ICP"]},
628 },
629 ] if problem_definition.use_default_analysis else [],
630 "timeseries_transformer": {
631 "module": "transform_timeseries",
632 "args": {
633 "timeseries_settings": "$problem_definition.timeseries_settings",
634 "data": "data",
635 "dtype_dict": "$dtype_dict",
636 "target": "$target",
637 "mode": "$mode",
638 },
639 },
640 "timeseries_analyzer": {
641 "module": "timeseries_analyzer",
642 "args": {
643 "timeseries_settings": "$problem_definition.timeseries_settings",
644 "data": "data",
645 "dtype_dict": "$dtype_dict",
646 "target": "$target",
647 },
648 },
649 }
650
651 for field_name, implicit_value in hidden_fields.items():
652 _populate_implicit_field(json_ai, field_name, implicit_value, tss.is_timeseries)
653
654 return json_ai
655
656
657 def code_from_json_ai(json_ai: JsonAI) -> str:
658 """
659 Generates a custom ``PredictorInterface`` given the specifications from ``JsonAI`` object.
660
661 :param json_ai: ``JsonAI`` object with fully specified parameters
662
663 :returns: Automated syntax of the ``PredictorInterface`` object.
664 """
665 json_ai = deepcopy(json_ai)
666 # ----------------- #
667 # Fill in any missing values
668 json_ai = _add_implicit_values(json_ai)
669
670 # ----------------- #
671
672 # Instantiate data types
673 dtype_dict = {}
674
675 for k in json_ai.dtype_dict:
676 if json_ai.dtype_dict[k] not in (dtype.invalid, dtype.empty):
677 dtype_dict[k] = json_ai.dtype_dict[k]
678
679 # Populate imputers
680 imputer_dict = {}
681 if json_ai.imputers:
682 for imputer in json_ai.imputers:
683 imputer_dict[imputer['args']['target'].replace('\'', '').replace('\"', '')] = call(imputer)
684 json_ai.imputers = imputer_dict
685 imputers = inline_dict(json_ai.imputers)
686
687 # Populate encoders
688 encoder_dict = {}
689 for col_name, encoder in json_ai.encoders.items():
690 encoder_dict[col_name] = call(encoder)
691
692 # Populate time-series specific details
693 tss = json_ai.problem_definition.timeseries_settings
694 if tss.is_timeseries and tss.use_previous_target:
695 col_name = f"__mdb_ts_previous_{json_ai.problem_definition.target}"
696 target_type = json_ai.dtype_dict[json_ai.problem_definition.target]
697 json_ai.problem_definition.timeseries_settings.target_type = target_type
698 encoder_dict[col_name] = call(
699 lookup_encoder(
700 target_type,
701 col_name,
702 False,
703 json_ai.problem_definition,
704 False,
705 None,
706 )
707 )
708
709 dtype_dict[col_name] = target_type
710 # @TODO: Is populating the json_ai at this stage even necessary?
711 json_ai.encoders[col_name] = encoder_dict[col_name]
712 json_ai.dtype_dict[col_name] = target_type
713 json_ai.dependency_dict[col_name] = []
714
715 # ----------------- #
716
717 input_cols = [x.replace("'", "\\'").replace('"', '\\"') for x in json_ai.encoders
718 if x != json_ai.problem_definition.target]
719 input_cols = ",".join([f"""'{name}'""" for name in input_cols])
720
721 # ----------------- #
722 # Time-series specific code blocks
723 # ----------------- #
724
725 ts_transform_code = ""
726 ts_analyze_code = None
727 ts_encoder_code = ""
728 if json_ai.timeseries_transformer is not None:
729 ts_transform_code = f"""
730 log.info('Transforming timeseries data')
731 data = {call(json_ai.timeseries_transformer)}
732 """
733 ts_analyze_code = f"""
734 self.ts_analysis = {call(json_ai.timeseries_analyzer)}
735 """
736 # @TODO: set these kwargs/properties in the json ai construction (if possible)
737 if json_ai.timeseries_analyzer is not None:
738 ts_encoder_code = """
739 if encoder.is_timeseries_encoder:
740 kwargs['ts_analysis'] = self.ts_analysis
741 """
742
743 if json_ai.problem_definition.timeseries_settings.is_timeseries:
744 ts_target_code = """
745 if encoder.is_target:
746 encoder.normalizers = self.ts_analysis['target_normalizers']
747 encoder.group_combinations = self.ts_analysis['group_combinations']
748 """
749 else:
750 ts_target_code = ""
751
752 # ----------------- #
753 # Statistical Analysis Body
754 # ----------------- #
755
756 analyze_data_body = f"""
757 log.info("Performing statistical analysis on data")
758 self.statistical_analysis = lightwood.data.statistical_analysis(data,
759 self.dtype_dict,
760 {json_ai.identifiers},
761 self.problem_definition)
762
763 # Instantiate post-training evaluation
764 self.analysis_blocks = [{', '.join([call(block) for block in json_ai.analysis_blocks])}]
765 """
766
767 analyze_data_body = align(analyze_data_body, 2)
768
769 # ----------------- #
770 # Pre-processing Body
771 # ----------------- #
772
773 clean_body = f"""
774 log.info('Cleaning the data')
775 self.imputers = {imputers}
776 data = {call(json_ai.cleaner)}
777
778 # Time-series blocks
779 {ts_transform_code}
780 """
781 if ts_analyze_code is not None:
782 clean_body += f"""
783 if self.mode != 'predict':
784 {align(ts_analyze_code,1)}
785 """
786
787 clean_body += '\nreturn data'
788
789 clean_body = align(clean_body, 2)
790
791 # ----------------- #
792 # Train-Test Splitter Body
793 # ----------------- #
794
795 split_body = f"""
796 log.info("Splitting the data into train/test")
797 train_test_data = {call(json_ai.splitter)}
798
799 return train_test_data
800 """
801
802 split_body = align(split_body, 2)
803
804 # ----------------- #
805 # Prepare features Body
806 # ----------------- #
807
808 prepare_body = f"""
809 self.mode = 'train'
810
811 if self.statistical_analysis is None:
812 raise Exception("Please run analyze_data first")
813
814 # Column to encoder mapping
815 self.encoders = {inline_dict(encoder_dict)}
816
817 # Prepare the training + dev data
818 concatenated_train_dev = pd.concat([data['train'], data['dev']])
819
820 log.info('Preparing the encoders')
821
822 encoder_prepping_dict = {{}}
823
824 # Prepare encoders that do not require learned strategies
825 for col_name, encoder in self.encoders.items():
826 if col_name != self.target and not encoder.is_trainable_encoder:
827 encoder_prepping_dict[col_name] = [encoder, concatenated_train_dev[col_name], 'prepare']
828 log.info(f'Encoder prepping dict length of: {{len(encoder_prepping_dict)}}')
829
830 # Setup parallelization
831 parallel_prepped_encoders = mut_method_call(encoder_prepping_dict)
832 for col_name, encoder in parallel_prepped_encoders.items():
833 self.encoders[col_name] = encoder
834
835 # Prepare the target
836 if self.target not in parallel_prepped_encoders:
837 if self.encoders[self.target].is_trainable_encoder:
838 self.encoders[self.target].prepare(data['train'][self.target], data['dev'][self.target])
839 else:
840 self.encoders[self.target].prepare(pd.concat([data['train'], data['dev']])[self.target])
841
842 # Prepare any non-target encoders that are learned
843 for col_name, encoder in self.encoders.items():
844 if col_name != self.target and encoder.is_trainable_encoder:
845 priming_data = pd.concat([data['train'], data['dev']])
846 kwargs = {{}}
847 if self.dependencies[col_name]:
848 kwargs['dependency_data'] = {{}}
849 for col in self.dependencies[col_name]:
850 kwargs['dependency_data'][col] = {{
851 'original_type': self.dtype_dict[col],
852 'data': priming_data[col]
853 }}
854 {align(ts_encoder_code, 3)}
855
856 # If an encoder representation requires the target, provide priming data
857 if hasattr(encoder, 'uses_target'):
858 kwargs['encoded_target_values'] = self.encoders[self.target].encode(priming_data[self.target])
859
860 encoder.prepare(data['train'][col_name], data['dev'][col_name], **kwargs)
861
862 {align(ts_target_code, 1)}
863 """
864 prepare_body = align(prepare_body, 2)
865
866 # ----------------- #
867 # Featurize Data Body
868 # ----------------- #
869
870 feature_body = f"""
871 log.info('Featurizing the data')
872
873 feature_data = {{ key: EncodedDs(self.encoders, data, self.target) for key, data in split_data.items() if key != "stratified_on"}}
874
875 return feature_data
876
877 """ # noqa
878
879 feature_body = align(feature_body, 2)
880
881 # ----------------- #
882 # Fit Mixer Body
883 # ----------------- #
884
885 fit_body = f"""
886 self.mode = 'train'
887
888 # --------------- #
889 # Extract data
890 # --------------- #
891 # Extract the featurized data into train/dev/test
892 encoded_train_data = enc_data['train']
893 encoded_dev_data = enc_data['dev']
894 encoded_test_data = enc_data['test']
895
896 log.info('Training the mixers')
897
898 # --------------- #
899 # Fit Models
900 # --------------- #
901 # Assign list of mixers
902 self.mixers = [{', '.join([call(x) for x in json_ai.model["args"]["submodels"]])}]
903
904 # Train mixers
905 trained_mixers = []
906 for mixer in self.mixers:
907 try:
908 self.fit_mixer(mixer, encoded_train_data, encoded_dev_data)
909 trained_mixers.append(mixer)
910 except Exception as e:
911 log.warning(f'Exception: {{e}} when training mixer: {{mixer}}')
912 if {json_ai.problem_definition.strict_mode} and mixer.stable:
913 raise e
914
915 # Update mixers to trained versions
916 self.mixers = trained_mixers
917
918 # --------------- #
919 # Create Ensembles
920 # --------------- #
921 log.info('Ensembling the mixer')
922 # Create an ensemble of mixers to identify best performing model
923 self.pred_args = PredictionArguments()
924 # Dirty hack
925 self.ensemble = {call(json_ai.model)}
926 self.supports_proba = self.ensemble.supports_proba
927 """
928 fit_body = align(fit_body, 2)
929
930 # ----------------- #
931 # Analyze Ensemble Body
932 # ----------------- #
933
934 analyze_ensemble = f"""
935
936 # --------------- #
937 # Extract data
938 # --------------- #
939 # Extract the featurized data into train/dev/test
940 encoded_train_data = enc_data['train']
941 encoded_dev_data = enc_data['dev']
942 encoded_test_data = enc_data['test']
943
944 # --------------- #
945 # Analyze Ensembles
946 # --------------- #
947 log.info('Analyzing the ensemble of mixers')
948 self.model_analysis, self.runtime_analyzer = {call(json_ai.analyzer)}
949 """
950 analyze_ensemble = align(analyze_ensemble, 2)
951
952 # ----------------- #
953 # Adjust Ensemble Body
954 # ----------------- #
955
956 adjust_body = f"""
957 self.mode = 'train'
958
959 # --------------- #
960 # Prepare data
961 # --------------- #
962 if old_data is None:
963 old_data = pd.DataFrame()
964
965 if isinstance(old_data, pd.DataFrame):
966 old_data = EncodedDs(self.encoders, old_data, self.target)
967
968 if isinstance(new_data, pd.DataFrame):
969 new_data = EncodedDs(self.encoders, new_data, self.target)
970
971 # --------------- #
972 # Update/Adjust Mixers
973 # --------------- #
974 log.info('Updating the mixers')
975
976 for mixer in self.mixers:
977 mixer.partial_fit(new_data, old_data)
978 """ # noqa
979
980 adjust_body = align(adjust_body, 2)
981
982 # ----------------- #
983 # Learn Body
984 # ----------------- #
985
986 learn_body = """
987 self.mode = 'train'
988
989 # Perform stats analysis
990 self.analyze_data(data)
991
992 # Pre-process the data
993 data = self.preprocess(data)
994
995 # Create train/test (dev) split
996 train_dev_test = self.split(data)
997
998 # Prepare encoders
999 self.prepare(train_dev_test)
1000
1001 # Create feature vectors from data
1002 enc_train_test = self.featurize(train_dev_test)
1003
1004 # Prepare mixers
1005 self.fit(enc_train_test)
1006
1007 # Analyze the ensemble
1008 self.analyze_ensemble(enc_train_test)
1009
1010 # ------------------------ #
1011 # Enable model partial fit AFTER it is trained and evaluated for performance with the appropriate train/dev/test splits.
1012 # This assumes the predictor could continuously evolve, hence including reserved testing data may improve predictions.
1013 # SET `json_ai.problem_definition.fit_on_all=False` TO TURN THIS BLOCK OFF.
1014
1015 # Update the mixers with partial fit
1016 if self.problem_definition.fit_on_all:
1017
1018 log.info("Adjustment on validation requested.")
1019 self.adjust(enc_train_test["test"], ConcatedEncodedDs([enc_train_test["train"], enc_train_test["dev"]]))
1020
1021 """
1022 learn_body = align(learn_body, 2)
1023 # ----------------- #
1024 # Predict Body
1025 # ----------------- #
1026
1027 predict_body = f"""
1028 self.mode = 'predict'
1029
1030 if len(data) == 0:
1031 raise Exception("Empty input, aborting prediction. Please try again with some input data.")
1032
1033 # Remove columns that user specifies to ignore
1034 log.info(f'Dropping features: {{self.problem_definition.ignore_features}}')
1035 data = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')
1036 for col in self.input_cols:
1037 if col not in data.columns:
1038 data[col] = [None] * len(data)
1039
1040 # Pre-process the data
1041 data = self.preprocess(data)
1042
1043 # Featurize the data
1044 encoded_ds = self.featurize({{"predict_data": data}})["predict_data"]
1045 encoded_data = encoded_ds.get_encoded_data(include_target=False)
1046
1047 self.pred_args = PredictionArguments.from_dict(args)
1048 df = self.ensemble(encoded_ds, args=self.pred_args)
1049
1050 if self.pred_args.all_mixers:
1051 return df
1052 else:
1053 insights, global_insights = {call(json_ai.explainer)}
1054 return insights
1055 """
1056
1057 predict_body = align(predict_body, 2)
1058
1059 predictor_code = f"""
1060 {IMPORTS}
1061 {IMPORT_EXTERNAL_DIRS}
1062
1063 class Predictor(PredictorInterface):
1064 target: str
1065 mixers: List[BaseMixer]
1066 encoders: Dict[str, BaseEncoder]
1067 ensemble: BaseEnsemble
1068 mode: str
1069
1070 def __init__(self):
1071 seed({json_ai.problem_definition.seed_nr})
1072 self.target = '{json_ai.problem_definition.target}'
1073 self.mode = 'inactive'
1074 self.problem_definition = ProblemDefinition.from_dict({json_ai.problem_definition.to_dict()})
1075 self.accuracy_functions = {json_ai.accuracy_functions}
1076 self.identifiers = {json_ai.identifiers}
1077 self.dtype_dict = {inline_dict(dtype_dict)}
1078
1079 # Any feature-column dependencies
1080 self.dependencies = {inline_dict(json_ai.dependency_dict)}
1081
1082 self.input_cols = [{input_cols}]
1083
1084 # Initial stats analysis
1085 self.statistical_analysis = None
1086 self.runtime_log = dict()
1087
1088 @timed
1089 def analyze_data(self, data: pd.DataFrame) -> None:
1090 # Perform a statistical analysis on the unprocessed data
1091 {analyze_data_body}
1092
1093 @timed
1094 def preprocess(self, data: pd.DataFrame) -> pd.DataFrame:
1095 # Preprocess and clean data
1096 {clean_body}
1097
1098 @timed
1099 def split(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]:
1100 # Split the data into training/testing splits
1101 {split_body}
1102
1103 @timed
1104 def prepare(self, data: Dict[str, pd.DataFrame]) -> None:
1105 # Prepare encoders to featurize data
1106 {prepare_body}
1107
1108 @timed
1109 def featurize(self, split_data: Dict[str, pd.DataFrame]):
1110 # Featurize data into numerical representations for models
1111 {feature_body}
1112
1113 @timed
1114 def fit(self, enc_data: Dict[str, pd.DataFrame]) -> None:
1115 # Fit predictors to estimate target
1116 {fit_body}
1117
1118 @timed
1119 def fit_mixer(self, mixer, encoded_train_data, encoded_dev_data) -> None:
1120 mixer.fit(encoded_train_data, encoded_dev_data)
1121
1122 @timed
1123 def analyze_ensemble(self, enc_data: Dict[str, pd.DataFrame]) -> None:
1124 # Evaluate quality of fit for the ensemble of mixers
1125 {analyze_ensemble}
1126
1127 @timed
1128 def learn(self, data: pd.DataFrame) -> None:
1129 log.info(f'Dropping features: {{self.problem_definition.ignore_features}}')
1130 data = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')
1131 {learn_body}
1132
1133 @timed
1134 def adjust(self, new_data: Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame],
1135 old_data: Optional[Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame]] = None) -> None:
1136 # Update mixers with new information
1137 {adjust_body}
1138
1139 @timed
1140 def predict(self, data: pd.DataFrame, args: Dict = {{}}) -> pd.DataFrame:
1141 {predict_body}
1142 """
1143
1144 try:
1145 import black
1146 except Exception:
1147 black = None
1148
1149 if black is not None:
1150 log.info('Unable to import black formatter, predictor code might be a bit ugly.')
1151 predictor_code = black.format_str(predictor_code, mode=black.FileMode())
1152
1153 return predictor_code
1154
1155
1156 def validate_json_ai(json_ai: JsonAI) -> bool:
1157 """
1158 Checks the validity of a ``JsonAI`` object
1159
1160 :param json_ai: A ``JsonAI`` object
1161
1162 :returns: Whether the JsonAI is valid, i.e. doesn't contain prohibited values, unknown values and can be turned into code.
1163 """ # noqa
1164 from lightwood.api.high_level import predictor_from_code, code_from_json_ai
1165
1166 try:
1167 predictor_from_code(code_from_json_ai(json_ai))
1168 return True
1169 except Exception:
1170 return False
1171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightwood/api/json_ai.py b/lightwood/api/json_ai.py
--- a/lightwood/api/json_ai.py
+++ b/lightwood/api/json_ai.py
@@ -193,7 +193,8 @@
tss = problem_definition.timeseries_settings
dtype_dict = type_information.dtypes
for k in type_information.identifiers:
- del dtype_dict[k]
+ if not (tss.is_timeseries and tss.group_by and k in tss.group_by):
+ del dtype_dict[k]
dependency_dict = {}
for col_name, col_dtype in dtype_dict.items():
| {"golden_diff": "diff --git a/lightwood/api/json_ai.py b/lightwood/api/json_ai.py\n--- a/lightwood/api/json_ai.py\n+++ b/lightwood/api/json_ai.py\n@@ -193,7 +193,8 @@\n tss = problem_definition.timeseries_settings\n dtype_dict = type_information.dtypes\n for k in type_information.identifiers:\n- del dtype_dict[k]\n+ if not (tss.is_timeseries and tss.group_by and k in tss.group_by):\n+ del dtype_dict[k]\n dependency_dict = {}\n \n for col_name, col_dtype in dtype_dict.items():\n", "issue": "[TS] error if 'group by' column contains a single value\nIf 'group by' column has single value for all cells, then error appears. Can be replicated with any ts-dataset if add filter to data select query:\r\n```\r\ncreate predictor p_name from int_name (select * from test_data.ts_dataset where location='good') predict rental_price order by date group by location window 5 horizon 3;\r\n```\r\nerror is\r\n```\r\nTraceback (most recent call last):\r\n File \"./mindsdb/interfaces/model/learn_process.py\", line 175, in run_learn\r\n run_fit(predictor_id, df)\r\n File \"./mindsdb/utilities/functions.py\", line 56, in wrapper\r\n return func(*args, **kwargs)\r\n File \"./mindsdb/interfaces/model/learn_process.py\", line 148, in run_fit\r\n raise e\r\n File \"./mindsdb/interfaces/model/learn_process.py\", line 119, in run_fit\r\n predictor.learn(df)\r\n File \"/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/helpers/log.py\", line 30, in wrap\r\n result = f(predictor, *args, **kw)\r\n File \"/tmp/52931846b2322b65fafeb5782f9a3e9e76650c2aac7cecf516512142146485069.py\", line 450, in learn\r\n self.analyze_data(data)\r\n File \"/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/helpers/log.py\", line 30, in wrap\r\n result = f(predictor, *args, **kw)\r\n File \"/tmp/52931846b2322b65fafeb5782f9a3e9e76650c2aac7cecf516512142146485069.py\", line 137, in analyze_data\r\n self.statistical_analysis = lightwood.data.statistical_analysis(\r\n File \"/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/data/statistical_analysis.py\", line 120, in statistical_analysis\r\n if dtypes[col] in (dtype.categorical, dtype.binary, dtype.tags):\r\nKeyError: 'location'\r\n```\n", "before_files": [{"content": "# TODO: _add_implicit_values unit test ensures NO changes for a fully specified file.\nfrom copy import deepcopy\nfrom lightwood.helpers.templating import call, inline_dict, align\nfrom lightwood.api import dtype\nfrom lightwood.api.types import (\n JsonAI,\n TypeInformation,\n StatisticalAnalysis,\n ProblemDefinition,\n)\nimport inspect\nfrom lightwood.helpers.log import log\n\n\n# For custom modules, we create a module loader with necessary imports below\nIMPORT_EXTERNAL_DIRS = \"\"\"\nfor import_dir in [os.path.join(os.path.expanduser('~/lightwood_modules'), lightwood_version.replace('.', '_')), os.path.join('/etc/lightwood_modules', lightwood_version.replace('.', '_'))]:\n if os.path.exists(import_dir) and os.access(import_dir, os.R_OK):\n for file_name in list(os.walk(import_dir))[0][2]:\n if file_name[-3:] != '.py':\n continue\n mod_name = file_name[:-3]\n loader = importlib.machinery.SourceFileLoader(mod_name,\n os.path.join(import_dir, file_name))\n module = ModuleType(loader.name)\n loader.exec_module(module)\n sys.modules[mod_name] = module\n exec(f'import {mod_name}')\n\"\"\" # noqa\n\nIMPORTS = \"\"\"\nimport lightwood\nfrom lightwood import __version__ as lightwood_version\nfrom lightwood.analysis import *\nfrom lightwood.api import *\nfrom lightwood.data import *\nfrom lightwood.encoder import *\nfrom lightwood.ensemble import *\nfrom lightwood.helpers.device import *\nfrom lightwood.helpers.general import *\nfrom lightwood.helpers.log import *\nfrom lightwood.helpers.numeric import *\nfrom lightwood.helpers.imputers import *\nfrom lightwood.helpers.parallelism import *\nfrom lightwood.helpers.seed import *\nfrom lightwood.helpers.text import *\nfrom lightwood.helpers.torch import *\nfrom lightwood.mixer import *\nimport pandas as pd\nfrom typing import Dict, List, Union\nimport os\nfrom types import ModuleType\nimport importlib.machinery\nimport sys\nimport time\n\"\"\"\n\n\ndef lookup_encoder(\n col_dtype: str,\n col_name: str,\n is_target: bool,\n problem_defintion: ProblemDefinition,\n is_target_predicting_encoder: bool,\n statistical_analysis: StatisticalAnalysis,\n):\n \"\"\"\n Assign a default encoder for a given column based on its data type, and whether it is a target. Encoders intake raw (but cleaned) data and return an feature representation. This function assigns, per data type, what the featurizer should be. This function runs on each column within the dataset available for model building to assign how it should be featurized.\n\n Users may override to create a custom encoder to enable their own featurization process. However, in order to generate template JSON-AI, this code runs automatically. Users may edit the generated syntax and use custom approaches while model building.\n\n For each encoder, \"args\" may be passed. These args depend an encoder requires during its preparation call.\n\n :param col_dtype: A data-type of a column specified\n :param col_name: The name of the column\n :param is_target: Whether the column is the target for prediction. If true, only certain possible feature representations are allowed, particularly for complex data types.\n :param problem_definition: The ``ProblemDefinition`` criteria; this populates specifics on how models and encoders may be trained.\n :param is_target_predicting_encoder:\n \"\"\" # noqa\n\n tss = problem_defintion.timeseries_settings\n encoder_lookup = {\n dtype.integer: \"NumericEncoder\",\n dtype.float: \"NumericEncoder\",\n dtype.binary: \"BinaryEncoder\",\n dtype.categorical: \"CategoricalAutoEncoder\"\n if statistical_analysis is None\n or len(statistical_analysis.histograms[col_name]) > 100\n else \"OneHotEncoder\",\n dtype.tags: \"MultiHotEncoder\",\n dtype.date: \"DatetimeEncoder\",\n dtype.datetime: \"DatetimeEncoder\",\n dtype.image: \"Img2VecEncoder\",\n dtype.rich_text: \"PretrainedLangEncoder\",\n dtype.short_text: \"CategoricalAutoEncoder\",\n dtype.quantity: \"NumericEncoder\",\n dtype.audio: \"MFCCEncoder\",\n dtype.num_array: \"NumArrayEncoder\",\n dtype.cat_array: \"CatArrayEncoder\",\n dtype.num_tsarray: \"TimeSeriesEncoder\",\n dtype.cat_tsarray: \"TimeSeriesEncoder\",\n }\n\n # If column is a target, only specific feature representations are allowed that enable supervised tasks\n target_encoder_lookup_override = {\n dtype.rich_text: \"VocabularyEncoder\",\n dtype.categorical: \"OneHotEncoder\",\n }\n\n # Assign a default encoder to each column.\n encoder_dict = {\"module\": encoder_lookup[col_dtype], \"args\": {}}\n\n # If the column is a target, ensure that the feature representation can enable supervised tasks\n if is_target:\n encoder_dict[\"args\"] = {\"is_target\": \"True\"}\n\n if col_dtype in target_encoder_lookup_override:\n encoder_dict[\"module\"] = target_encoder_lookup_override[col_dtype]\n\n if col_dtype in (dtype.categorical, dtype.binary):\n if problem_defintion.unbias_target:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = \"$statistical_analysis.target_weights\"\n if problem_defintion.target_weights is not None:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = problem_defintion.target_weights\n\n if col_dtype in (dtype.integer, dtype.float, dtype.num_array, dtype.num_tsarray):\n encoder_dict[\"args\"][\n \"positive_domain\"\n ] = \"$statistical_analysis.positive_domain\"\n\n # Time-series representations require more advanced flags\n if tss.is_timeseries:\n gby = tss.group_by if tss.group_by is not None else []\n if col_name in tss.order_by:\n encoder_dict[\"module\"] = \"ArrayEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n if is_target:\n if col_dtype in [dtype.integer]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if col_dtype in [dtype.float]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if tss.horizon > 1:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"args\"][\"timesteps\"] = f\"{tss.horizon}\"\n if col_dtype in [dtype.num_tsarray]:\n encoder_dict[\"module\"] = \"TsArrayNumericEncoder\"\n elif col_dtype in [dtype.cat_tsarray]:\n encoder_dict[\"module\"] = \"TsCatArrayEncoder\"\n\n if \"__mdb_ts_previous\" in col_name or col_name in tss.historical_columns:\n encoder_dict[\"module\"] = \"TimeSeriesEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n # Set arguments for the encoder\n if encoder_dict[\"module\"] == \"PretrainedLangEncoder\" and not is_target:\n encoder_dict[\"args\"][\"output_type\"] = \"$dtype_dict[$target]\"\n\n if eval(encoder_dict[\"module\"]).is_trainable_encoder:\n encoder_dict[\"args\"][\"stop_after\"] = \"$problem_definition.seconds_per_encoder\"\n\n if is_target_predicting_encoder:\n encoder_dict[\"args\"][\"embed_mode\"] = \"False\"\n return encoder_dict\n\n\ndef generate_json_ai(\n type_information: TypeInformation,\n statistical_analysis: StatisticalAnalysis,\n problem_definition: ProblemDefinition,\n) -> JsonAI:\n \"\"\"\n Given ``TypeInformation``, ``StatisticalAnalysis``, and the ``ProblemDefinition``, generate a JSON config file with the necessary elements of the ML pipeline populated.\n\n :param TypeInformation: Specifies what data types each column within the dataset are\n :param statistical_analysis:\n :param problem_definition: Specifies details of the model training/building procedure, as defined by ``ProblemDefinition``\n\n :returns: JSON-AI object with fully populated details of the ML pipeline\n \"\"\" # noqaexec\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n target = problem_definition.target\n input_cols = []\n tss = problem_definition.timeseries_settings\n dtype_dict = type_information.dtypes\n for k in type_information.identifiers:\n del dtype_dict[k]\n dependency_dict = {}\n\n for col_name, col_dtype in dtype_dict.items():\n if (\n (col_name not in type_information.identifiers\n and col_dtype not in (dtype.invalid, dtype.empty)\n and col_name != target)\n or\n (tss.group_by is not None and col_name in tss.group_by)\n ):\n if col_name != problem_definition.target:\n input_cols.append(col_name)\n\n is_target_predicting_encoder = False\n is_ts = problem_definition.timeseries_settings.is_timeseries\n\n # Single text column classification\n if (\n len(input_cols) == 1\n and type_information.dtypes[input_cols[0]] in (dtype.rich_text)\n and type_information.dtypes[target] in (dtype.categorical, dtype.binary)\n ):\n is_target_predicting_encoder = True\n\n if is_target_predicting_encoder:\n submodels = [\n {\n \"module\": \"Unit\",\n \"args\": {\n \"target_encoder\": \"$encoders[self.target]\",\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n }\n ]\n else:\n submodels = [\n {\n \"module\": \"Neural\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"search_hyperparameters\": True,\n },\n }\n ]\n\n if (not tss.is_timeseries or tss.horizon == 1) and dtype_dict[target] not in (dtype.num_array, dtype.cat_array):\n submodels.extend(\n [\n {\n \"module\": \"LightGBM\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"fit_on_dev\": True,\n },\n },\n {\n \"module\": \"Regression\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n },\n ]\n )\n elif tss.is_timeseries and tss.horizon > 1:\n submodels.extend(\n [\n {\n \"module\": \"LightGBMArray\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n if tss.use_previous_target and dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n submodels.extend(\n [\n {\n \"module\": \"SkTime\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n model = {\n \"module\": \"BestOf\",\n \"args\": {\n \"submodels\": submodels,\n \"args\": \"$pred_args\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"ts_analysis\": \"self.ts_analysis\" if is_ts else None,\n }\n }\n\n if tss.is_timeseries and tss.horizon > 1:\n if dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n dtype_dict[target] = dtype.num_tsarray\n else:\n dtype_dict[target] = dtype.cat_tsarray\n\n encoders = {\n target: lookup_encoder(\n dtype_dict[target],\n target,\n True,\n problem_definition,\n False,\n statistical_analysis,\n )\n }\n\n for col in input_cols:\n encoders[col] = lookup_encoder(\n dtype_dict[col],\n col,\n False,\n problem_definition,\n is_target_predicting_encoder,\n statistical_analysis,\n )\n\n # Decide on the accuracy functions to use\n output_dtype = dtype_dict[target]\n if output_dtype in [\n dtype.integer,\n dtype.float,\n dtype.date,\n dtype.datetime,\n dtype.quantity,\n ]:\n accuracy_functions = [\"r2_score\"]\n elif output_dtype in [dtype.categorical, dtype.tags, dtype.binary]:\n accuracy_functions = [\"balanced_accuracy_score\"]\n elif output_dtype in (dtype.num_array, dtype.num_tsarray):\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n elif output_dtype in (dtype.cat_array, dtype.cat_tsarray):\n accuracy_functions = [\"evaluate_cat_array_accuracy\"]\n else:\n raise Exception(\n f\"Please specify a custom accuracy function for output type {output_dtype}\"\n )\n\n # special dispatch for t+1 time series forecasters\n if is_ts:\n if output_dtype in [dtype.integer, dtype.float]:\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n\n if problem_definition.time_aim is None:\n # 5 days\n problem_definition.time_aim = 3 * 24 * 3600\n\n # Encoders are assigned 1/3 of the time unless a user overrides this (equal time per encoder)\n if problem_definition.seconds_per_encoder is None:\n nr_trainable_encoders = len(\n [\n x\n for x in encoders.values()\n if eval(x[\"module\"]).is_trainable_encoder\n ]\n )\n if nr_trainable_encoders > 0:\n problem_definition.seconds_per_encoder = 0.33 * problem_definition.time_aim / nr_trainable_encoders\n\n # Mixers are assigned 1/3 of the time aim (or 2/3 if there are no trainable encoders )\\\n # unless a user overrides this (equal time per mixer)\n if problem_definition.seconds_per_mixer is None:\n if problem_definition.seconds_per_encoder is None:\n problem_definition.seconds_per_mixer = 0.66 * problem_definition.time_aim / len(model['args']['submodels'])\n else:\n problem_definition.seconds_per_mixer = 0.33 * problem_definition.time_aim / len(model['args']['submodels'])\n\n return JsonAI(\n cleaner=None,\n splitter=None,\n analyzer=None,\n explainer=None,\n encoders=encoders,\n dtype_dict=dtype_dict,\n dependency_dict=dependency_dict,\n model=model,\n problem_definition=problem_definition,\n identifiers=type_information.identifiers,\n timeseries_transformer=None,\n timeseries_analyzer=None,\n accuracy_functions=accuracy_functions,\n )\n\n\ndef _merge_implicit_values(field: dict, implicit_value: dict) -> dict:\n \"\"\"\n Helper function for `_populate_implicit_field`.\n Takes a user-defined field along with its implicit value, and merges them together.\n\n :param field: JsonAI field with user-defined parameters.\n :param implicit_value: implicit values for the field.\n :return: original field with implicit values merged into it.\n \"\"\"\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n module = eval(field[\"module\"])\n\n if inspect.isclass(module):\n args = list(inspect.signature(module.__init__).parameters.keys())[1:]\n else:\n args = module.__code__.co_varnames\n\n for arg in args:\n if \"args\" not in field:\n field[\"args\"] = implicit_value[\"args\"]\n else:\n if arg not in field[\"args\"]:\n if arg in implicit_value[\"args\"]:\n field[\"args\"][arg] = implicit_value[\"args\"][arg]\n\n return field\n\n\ndef _populate_implicit_field(\n json_ai: JsonAI, field_name: str, implicit_value: dict, is_timeseries: bool\n) -> None:\n \"\"\"\n Populate the implicit field of the JsonAI, either by filling it in entirely if missing, or by introspecting the class or function and assigning default values to the args in it's signature that are in the implicit default but haven't been populated by the user\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n :params: field_name: Name of the field the implicit field in ``JsonAI``\n :params: implicit_value: The dictionary containing implicit values for the module and arg in the field\n :params: is_timeseries: Whether or not this is a timeseries problem\n\n :returns: nothing, this method mutates the respective field of the ``JsonAI`` object it receives\n \"\"\" # noqa\n # These imports might be slow, in which case the only <easy> solution is to line this code\n field = json_ai.__getattribute__(field_name)\n if field is None:\n # This if is to only populated timeseries-specific implicit fields for implicit problems\n if is_timeseries or field_name not in (\n \"timeseries_analyzer\",\n \"timeseries_transformer\",\n ):\n field = implicit_value\n\n # If the user specified one or more subfields in a field that's a list\n # Populate them with implicit arguments form the implicit values from that subfield\n elif isinstance(field, list) and isinstance(implicit_value, list):\n for i in range(len(field)):\n sub_field_implicit = [\n x for x in implicit_value if x[\"module\"] == field[i][\"module\"]\n ]\n if len(sub_field_implicit) == 1:\n field[i] = _merge_implicit_values(field[i], sub_field_implicit[0])\n for sub_field_implicit in implicit_value:\n if (\n len([x for x in field if x[\"module\"] == sub_field_implicit[\"module\"]])\n == 0\n ):\n field.append(sub_field_implicit)\n # If the user specified the field, add implicit arguments which we didn't specify\n else:\n field = _merge_implicit_values(field, implicit_value)\n json_ai.__setattr__(field_name, field)\n\n\ndef _add_implicit_values(json_ai: JsonAI) -> JsonAI:\n \"\"\"\n To enable brevity in writing, auto-generate the \"unspecified/missing\" details required in the ML pipeline.\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n\n :returns: ``JSONAI`` object with all necessary parameters that were previously left unmentioned filled in.\n \"\"\"\n problem_definition = json_ai.problem_definition\n tss = problem_definition.timeseries_settings\n is_ts = tss.is_timeseries\n\n # Add implicit arguments\n # @TODO: Consider removing once we have a proper editor in studio\n mixers = json_ai.model['args']['submodels']\n for i in range(len(mixers)):\n if mixers[i][\"module\"] == \"Unit\":\n pass\n elif mixers[i][\"module\"] == \"Neural\":\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"timeseries_settings\"] = mixers[i][\"args\"].get(\n \"timeseries_settings\", \"$problem_definition.timeseries_settings\"\n )\n mixers[i][\"args\"][\"net\"] = mixers[i][\"args\"].get(\n \"net\",\n '\"DefaultNet\"'\n if not tss.is_timeseries or not tss.use_previous_target\n else '\"ArNet\"',\n )\n\n elif mixers[i][\"module\"] == \"LightGBM\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"use_optuna\"] = True\n elif mixers[i][\"module\"] == \"Regression\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"LightGBMArray\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"SkTime\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"ts_analysis\"] = mixers[i][\"args\"].get(\n \"ts_analysis\", \"$ts_analysis\"\n )\n # enforce fit_on_all if this mixer is specified\n problem_definition.fit_on_all = True\n\n json_ai.model[\"args\"][\"target\"] = json_ai.model[\"args\"].get(\"target\", \"$target\")\n json_ai.model[\"args\"][\"data\"] = json_ai.model[\"args\"].get(\"data\", \"encoded_test_data\")\n json_ai.model[\"args\"][\"mixers\"] = json_ai.model[\"args\"].get(\"mixers\", \"$mixers\")\n\n for name in json_ai.encoders:\n if name not in json_ai.dependency_dict:\n json_ai.dependency_dict[name] = []\n\n # Add \"hidden\" fields\n hidden_fields = {\n \"cleaner\": {\n \"module\": \"cleaner\",\n \"args\": {\n \"pct_invalid\": \"$problem_definition.pct_invalid\",\n \"identifiers\": \"$identifiers\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n \"imputers\": \"$imputers\",\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n },\n },\n \"splitter\": {\n \"module\": \"splitter\",\n \"args\": {\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"seed\": 1,\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"pct_train\": 0.8,\n \"pct_dev\": 0.1,\n \"pct_test\": 0.1,\n },\n },\n \"analyzer\": {\n \"module\": \"model_analyzer\",\n \"args\": {\n \"stats_info\": \"$statistical_analysis\",\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"predictor\": \"$ensemble\",\n \"data\": \"encoded_test_data\",\n \"train_data\": \"encoded_train_data\",\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"analysis_blocks\": \"$analysis_blocks\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n },\n },\n \"explainer\": {\n \"module\": \"explain\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n \"data\": \"data\",\n \"encoded_data\": \"encoded_data\",\n \"predictions\": \"df\",\n \"analysis\": \"$runtime_analyzer\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n \"target_name\": \"$target\",\n \"target_dtype\": \"$dtype_dict[self.target]\",\n \"explainer_blocks\": \"$analysis_blocks\",\n \"pred_args\": \"$pred_args\",\n },\n },\n \"analysis_blocks\": [\n {\n \"module\": \"ICP\",\n \"args\": {\n \"fixed_significance\": None,\n \"confidence_normalizer\": False,\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n },\n },\n {\n \"module\": \"AccStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n {\n \"module\": \"ConfStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n ] if problem_definition.use_default_analysis else [],\n \"timeseries_transformer\": {\n \"module\": \"transform_timeseries\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n },\n },\n \"timeseries_analyzer\": {\n \"module\": \"timeseries_analyzer\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n },\n },\n }\n\n for field_name, implicit_value in hidden_fields.items():\n _populate_implicit_field(json_ai, field_name, implicit_value, tss.is_timeseries)\n\n return json_ai\n\n\ndef code_from_json_ai(json_ai: JsonAI) -> str:\n \"\"\"\n Generates a custom ``PredictorInterface`` given the specifications from ``JsonAI`` object.\n\n :param json_ai: ``JsonAI`` object with fully specified parameters\n\n :returns: Automated syntax of the ``PredictorInterface`` object.\n \"\"\"\n json_ai = deepcopy(json_ai)\n # ----------------- #\n # Fill in any missing values\n json_ai = _add_implicit_values(json_ai)\n\n # ----------------- #\n\n # Instantiate data types\n dtype_dict = {}\n\n for k in json_ai.dtype_dict:\n if json_ai.dtype_dict[k] not in (dtype.invalid, dtype.empty):\n dtype_dict[k] = json_ai.dtype_dict[k]\n\n # Populate imputers\n imputer_dict = {}\n if json_ai.imputers:\n for imputer in json_ai.imputers:\n imputer_dict[imputer['args']['target'].replace('\\'', '').replace('\\\"', '')] = call(imputer)\n json_ai.imputers = imputer_dict\n imputers = inline_dict(json_ai.imputers)\n\n # Populate encoders\n encoder_dict = {}\n for col_name, encoder in json_ai.encoders.items():\n encoder_dict[col_name] = call(encoder)\n\n # Populate time-series specific details\n tss = json_ai.problem_definition.timeseries_settings\n if tss.is_timeseries and tss.use_previous_target:\n col_name = f\"__mdb_ts_previous_{json_ai.problem_definition.target}\"\n target_type = json_ai.dtype_dict[json_ai.problem_definition.target]\n json_ai.problem_definition.timeseries_settings.target_type = target_type\n encoder_dict[col_name] = call(\n lookup_encoder(\n target_type,\n col_name,\n False,\n json_ai.problem_definition,\n False,\n None,\n )\n )\n\n dtype_dict[col_name] = target_type\n # @TODO: Is populating the json_ai at this stage even necessary?\n json_ai.encoders[col_name] = encoder_dict[col_name]\n json_ai.dtype_dict[col_name] = target_type\n json_ai.dependency_dict[col_name] = []\n\n # ----------------- #\n\n input_cols = [x.replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"') for x in json_ai.encoders\n if x != json_ai.problem_definition.target]\n input_cols = \",\".join([f\"\"\"'{name}'\"\"\" for name in input_cols])\n\n # ----------------- #\n # Time-series specific code blocks\n # ----------------- #\n\n ts_transform_code = \"\"\n ts_analyze_code = None\n ts_encoder_code = \"\"\n if json_ai.timeseries_transformer is not None:\n ts_transform_code = f\"\"\"\nlog.info('Transforming timeseries data')\ndata = {call(json_ai.timeseries_transformer)}\n\"\"\"\n ts_analyze_code = f\"\"\"\nself.ts_analysis = {call(json_ai.timeseries_analyzer)}\n\"\"\"\n # @TODO: set these kwargs/properties in the json ai construction (if possible)\n if json_ai.timeseries_analyzer is not None:\n ts_encoder_code = \"\"\"\nif encoder.is_timeseries_encoder:\n kwargs['ts_analysis'] = self.ts_analysis\n\"\"\"\n\n if json_ai.problem_definition.timeseries_settings.is_timeseries:\n ts_target_code = \"\"\"\nif encoder.is_target:\n encoder.normalizers = self.ts_analysis['target_normalizers']\n encoder.group_combinations = self.ts_analysis['group_combinations']\n\"\"\"\n else:\n ts_target_code = \"\"\n\n # ----------------- #\n # Statistical Analysis Body\n # ----------------- #\n\n analyze_data_body = f\"\"\"\nlog.info(\"Performing statistical analysis on data\")\nself.statistical_analysis = lightwood.data.statistical_analysis(data,\n self.dtype_dict,\n {json_ai.identifiers},\n self.problem_definition)\n\n# Instantiate post-training evaluation\nself.analysis_blocks = [{', '.join([call(block) for block in json_ai.analysis_blocks])}]\n \"\"\"\n\n analyze_data_body = align(analyze_data_body, 2)\n\n # ----------------- #\n # Pre-processing Body\n # ----------------- #\n\n clean_body = f\"\"\"\nlog.info('Cleaning the data')\nself.imputers = {imputers}\ndata = {call(json_ai.cleaner)}\n\n# Time-series blocks\n{ts_transform_code}\n\"\"\"\n if ts_analyze_code is not None:\n clean_body += f\"\"\"\nif self.mode != 'predict':\n{align(ts_analyze_code,1)}\n\"\"\"\n\n clean_body += '\\nreturn data'\n\n clean_body = align(clean_body, 2)\n\n # ----------------- #\n # Train-Test Splitter Body\n # ----------------- #\n\n split_body = f\"\"\"\nlog.info(\"Splitting the data into train/test\")\ntrain_test_data = {call(json_ai.splitter)}\n\nreturn train_test_data\n \"\"\"\n\n split_body = align(split_body, 2)\n\n # ----------------- #\n # Prepare features Body\n # ----------------- #\n\n prepare_body = f\"\"\"\nself.mode = 'train'\n\nif self.statistical_analysis is None:\n raise Exception(\"Please run analyze_data first\")\n\n# Column to encoder mapping\nself.encoders = {inline_dict(encoder_dict)}\n\n# Prepare the training + dev data\nconcatenated_train_dev = pd.concat([data['train'], data['dev']])\n\nlog.info('Preparing the encoders')\n\nencoder_prepping_dict = {{}}\n\n# Prepare encoders that do not require learned strategies\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and not encoder.is_trainable_encoder:\n encoder_prepping_dict[col_name] = [encoder, concatenated_train_dev[col_name], 'prepare']\n log.info(f'Encoder prepping dict length of: {{len(encoder_prepping_dict)}}')\n\n# Setup parallelization\nparallel_prepped_encoders = mut_method_call(encoder_prepping_dict)\nfor col_name, encoder in parallel_prepped_encoders.items():\n self.encoders[col_name] = encoder\n\n# Prepare the target\nif self.target not in parallel_prepped_encoders:\n if self.encoders[self.target].is_trainable_encoder:\n self.encoders[self.target].prepare(data['train'][self.target], data['dev'][self.target])\n else:\n self.encoders[self.target].prepare(pd.concat([data['train'], data['dev']])[self.target])\n\n# Prepare any non-target encoders that are learned\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and encoder.is_trainable_encoder:\n priming_data = pd.concat([data['train'], data['dev']])\n kwargs = {{}}\n if self.dependencies[col_name]:\n kwargs['dependency_data'] = {{}}\n for col in self.dependencies[col_name]:\n kwargs['dependency_data'][col] = {{\n 'original_type': self.dtype_dict[col],\n 'data': priming_data[col]\n }}\n {align(ts_encoder_code, 3)}\n\n # If an encoder representation requires the target, provide priming data\n if hasattr(encoder, 'uses_target'):\n kwargs['encoded_target_values'] = self.encoders[self.target].encode(priming_data[self.target])\n\n encoder.prepare(data['train'][col_name], data['dev'][col_name], **kwargs)\n\n {align(ts_target_code, 1)}\n\"\"\"\n prepare_body = align(prepare_body, 2)\n\n # ----------------- #\n # Featurize Data Body\n # ----------------- #\n\n feature_body = f\"\"\"\nlog.info('Featurizing the data')\n\nfeature_data = {{ key: EncodedDs(self.encoders, data, self.target) for key, data in split_data.items() if key != \"stratified_on\"}}\n\nreturn feature_data\n\n\"\"\" # noqa\n\n feature_body = align(feature_body, 2)\n\n # ----------------- #\n # Fit Mixer Body\n # ----------------- #\n\n fit_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\nlog.info('Training the mixers')\n\n# --------------- #\n# Fit Models\n# --------------- #\n# Assign list of mixers\nself.mixers = [{', '.join([call(x) for x in json_ai.model[\"args\"][\"submodels\"]])}]\n\n# Train mixers\ntrained_mixers = []\nfor mixer in self.mixers:\n try:\n self.fit_mixer(mixer, encoded_train_data, encoded_dev_data)\n trained_mixers.append(mixer)\n except Exception as e:\n log.warning(f'Exception: {{e}} when training mixer: {{mixer}}')\n if {json_ai.problem_definition.strict_mode} and mixer.stable:\n raise e\n\n# Update mixers to trained versions\nself.mixers = trained_mixers\n\n# --------------- #\n# Create Ensembles\n# --------------- #\nlog.info('Ensembling the mixer')\n# Create an ensemble of mixers to identify best performing model\nself.pred_args = PredictionArguments()\n# Dirty hack\nself.ensemble = {call(json_ai.model)}\nself.supports_proba = self.ensemble.supports_proba\n\"\"\"\n fit_body = align(fit_body, 2)\n\n # ----------------- #\n # Analyze Ensemble Body\n # ----------------- #\n\n analyze_ensemble = f\"\"\"\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\n# --------------- #\n# Analyze Ensembles\n# --------------- #\nlog.info('Analyzing the ensemble of mixers')\nself.model_analysis, self.runtime_analyzer = {call(json_ai.analyzer)}\n\"\"\"\n analyze_ensemble = align(analyze_ensemble, 2)\n\n # ----------------- #\n # Adjust Ensemble Body\n # ----------------- #\n\n adjust_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Prepare data\n# --------------- #\nif old_data is None:\n old_data = pd.DataFrame()\n\nif isinstance(old_data, pd.DataFrame):\n old_data = EncodedDs(self.encoders, old_data, self.target)\n\nif isinstance(new_data, pd.DataFrame):\n new_data = EncodedDs(self.encoders, new_data, self.target)\n\n# --------------- #\n# Update/Adjust Mixers\n# --------------- #\nlog.info('Updating the mixers')\n\nfor mixer in self.mixers:\n mixer.partial_fit(new_data, old_data)\n\"\"\" # noqa\n\n adjust_body = align(adjust_body, 2)\n\n # ----------------- #\n # Learn Body\n # ----------------- #\n\n learn_body = \"\"\"\nself.mode = 'train'\n\n# Perform stats analysis\nself.analyze_data(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Create train/test (dev) split\ntrain_dev_test = self.split(data)\n\n# Prepare encoders\nself.prepare(train_dev_test)\n\n# Create feature vectors from data\nenc_train_test = self.featurize(train_dev_test)\n\n# Prepare mixers\nself.fit(enc_train_test)\n\n# Analyze the ensemble\nself.analyze_ensemble(enc_train_test)\n\n# ------------------------ #\n# Enable model partial fit AFTER it is trained and evaluated for performance with the appropriate train/dev/test splits.\n# This assumes the predictor could continuously evolve, hence including reserved testing data may improve predictions.\n# SET `json_ai.problem_definition.fit_on_all=False` TO TURN THIS BLOCK OFF.\n\n# Update the mixers with partial fit\nif self.problem_definition.fit_on_all:\n\n log.info(\"Adjustment on validation requested.\")\n self.adjust(enc_train_test[\"test\"], ConcatedEncodedDs([enc_train_test[\"train\"], enc_train_test[\"dev\"]]))\n\n\"\"\"\n learn_body = align(learn_body, 2)\n # ----------------- #\n # Predict Body\n # ----------------- #\n\n predict_body = f\"\"\"\nself.mode = 'predict'\n\nif len(data) == 0:\n raise Exception(\"Empty input, aborting prediction. Please try again with some input data.\")\n\n# Remove columns that user specifies to ignore\nlog.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\ndata = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\nfor col in self.input_cols:\n if col not in data.columns:\n data[col] = [None] * len(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Featurize the data\nencoded_ds = self.featurize({{\"predict_data\": data}})[\"predict_data\"]\nencoded_data = encoded_ds.get_encoded_data(include_target=False)\n\nself.pred_args = PredictionArguments.from_dict(args)\ndf = self.ensemble(encoded_ds, args=self.pred_args)\n\nif self.pred_args.all_mixers:\n return df\nelse:\n insights, global_insights = {call(json_ai.explainer)}\n return insights\n\"\"\"\n\n predict_body = align(predict_body, 2)\n\n predictor_code = f\"\"\"\n{IMPORTS}\n{IMPORT_EXTERNAL_DIRS}\n\nclass Predictor(PredictorInterface):\n target: str\n mixers: List[BaseMixer]\n encoders: Dict[str, BaseEncoder]\n ensemble: BaseEnsemble\n mode: str\n\n def __init__(self):\n seed({json_ai.problem_definition.seed_nr})\n self.target = '{json_ai.problem_definition.target}'\n self.mode = 'inactive'\n self.problem_definition = ProblemDefinition.from_dict({json_ai.problem_definition.to_dict()})\n self.accuracy_functions = {json_ai.accuracy_functions}\n self.identifiers = {json_ai.identifiers}\n self.dtype_dict = {inline_dict(dtype_dict)}\n\n # Any feature-column dependencies\n self.dependencies = {inline_dict(json_ai.dependency_dict)}\n\n self.input_cols = [{input_cols}]\n\n # Initial stats analysis\n self.statistical_analysis = None\n self.runtime_log = dict()\n\n @timed\n def analyze_data(self, data: pd.DataFrame) -> None:\n # Perform a statistical analysis on the unprocessed data\n{analyze_data_body}\n\n @timed\n def preprocess(self, data: pd.DataFrame) -> pd.DataFrame:\n # Preprocess and clean data\n{clean_body}\n\n @timed\n def split(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]:\n # Split the data into training/testing splits\n{split_body}\n\n @timed\n def prepare(self, data: Dict[str, pd.DataFrame]) -> None:\n # Prepare encoders to featurize data\n{prepare_body}\n\n @timed\n def featurize(self, split_data: Dict[str, pd.DataFrame]):\n # Featurize data into numerical representations for models\n{feature_body}\n\n @timed\n def fit(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Fit predictors to estimate target\n{fit_body}\n\n @timed\n def fit_mixer(self, mixer, encoded_train_data, encoded_dev_data) -> None:\n mixer.fit(encoded_train_data, encoded_dev_data)\n\n @timed\n def analyze_ensemble(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Evaluate quality of fit for the ensemble of mixers\n{analyze_ensemble}\n\n @timed\n def learn(self, data: pd.DataFrame) -> None:\n log.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\n data = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\n{learn_body}\n\n @timed\n def adjust(self, new_data: Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame],\n old_data: Optional[Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame]] = None) -> None:\n # Update mixers with new information\n{adjust_body}\n\n @timed\n def predict(self, data: pd.DataFrame, args: Dict = {{}}) -> pd.DataFrame:\n{predict_body}\n\"\"\"\n\n try:\n import black\n except Exception:\n black = None\n\n if black is not None:\n log.info('Unable to import black formatter, predictor code might be a bit ugly.')\n predictor_code = black.format_str(predictor_code, mode=black.FileMode())\n\n return predictor_code\n\n\ndef validate_json_ai(json_ai: JsonAI) -> bool:\n \"\"\"\n Checks the validity of a ``JsonAI`` object\n\n :param json_ai: A ``JsonAI`` object\n\n :returns: Whether the JsonAI is valid, i.e. doesn't contain prohibited values, unknown values and can be turned into code.\n \"\"\" # noqa\n from lightwood.api.high_level import predictor_from_code, code_from_json_ai\n\n try:\n predictor_from_code(code_from_json_ai(json_ai))\n return True\n except Exception:\n return False\n", "path": "lightwood/api/json_ai.py"}], "after_files": [{"content": "# TODO: _add_implicit_values unit test ensures NO changes for a fully specified file.\nfrom copy import deepcopy\nfrom lightwood.helpers.templating import call, inline_dict, align\nfrom lightwood.api import dtype\nfrom lightwood.api.types import (\n JsonAI,\n TypeInformation,\n StatisticalAnalysis,\n ProblemDefinition,\n)\nimport inspect\nfrom lightwood.helpers.log import log\n\n\n# For custom modules, we create a module loader with necessary imports below\nIMPORT_EXTERNAL_DIRS = \"\"\"\nfor import_dir in [os.path.join(os.path.expanduser('~/lightwood_modules'), lightwood_version.replace('.', '_')), os.path.join('/etc/lightwood_modules', lightwood_version.replace('.', '_'))]:\n if os.path.exists(import_dir) and os.access(import_dir, os.R_OK):\n for file_name in list(os.walk(import_dir))[0][2]:\n if file_name[-3:] != '.py':\n continue\n mod_name = file_name[:-3]\n loader = importlib.machinery.SourceFileLoader(mod_name,\n os.path.join(import_dir, file_name))\n module = ModuleType(loader.name)\n loader.exec_module(module)\n sys.modules[mod_name] = module\n exec(f'import {mod_name}')\n\"\"\" # noqa\n\nIMPORTS = \"\"\"\nimport lightwood\nfrom lightwood import __version__ as lightwood_version\nfrom lightwood.analysis import *\nfrom lightwood.api import *\nfrom lightwood.data import *\nfrom lightwood.encoder import *\nfrom lightwood.ensemble import *\nfrom lightwood.helpers.device import *\nfrom lightwood.helpers.general import *\nfrom lightwood.helpers.log import *\nfrom lightwood.helpers.numeric import *\nfrom lightwood.helpers.imputers import *\nfrom lightwood.helpers.parallelism import *\nfrom lightwood.helpers.seed import *\nfrom lightwood.helpers.text import *\nfrom lightwood.helpers.torch import *\nfrom lightwood.mixer import *\nimport pandas as pd\nfrom typing import Dict, List, Union\nimport os\nfrom types import ModuleType\nimport importlib.machinery\nimport sys\nimport time\n\"\"\"\n\n\ndef lookup_encoder(\n col_dtype: str,\n col_name: str,\n is_target: bool,\n problem_defintion: ProblemDefinition,\n is_target_predicting_encoder: bool,\n statistical_analysis: StatisticalAnalysis,\n):\n \"\"\"\n Assign a default encoder for a given column based on its data type, and whether it is a target. Encoders intake raw (but cleaned) data and return an feature representation. This function assigns, per data type, what the featurizer should be. This function runs on each column within the dataset available for model building to assign how it should be featurized.\n\n Users may override to create a custom encoder to enable their own featurization process. However, in order to generate template JSON-AI, this code runs automatically. Users may edit the generated syntax and use custom approaches while model building.\n\n For each encoder, \"args\" may be passed. These args depend an encoder requires during its preparation call.\n\n :param col_dtype: A data-type of a column specified\n :param col_name: The name of the column\n :param is_target: Whether the column is the target for prediction. If true, only certain possible feature representations are allowed, particularly for complex data types.\n :param problem_definition: The ``ProblemDefinition`` criteria; this populates specifics on how models and encoders may be trained.\n :param is_target_predicting_encoder:\n \"\"\" # noqa\n\n tss = problem_defintion.timeseries_settings\n encoder_lookup = {\n dtype.integer: \"NumericEncoder\",\n dtype.float: \"NumericEncoder\",\n dtype.binary: \"BinaryEncoder\",\n dtype.categorical: \"CategoricalAutoEncoder\"\n if statistical_analysis is None\n or len(statistical_analysis.histograms[col_name]) > 100\n else \"OneHotEncoder\",\n dtype.tags: \"MultiHotEncoder\",\n dtype.date: \"DatetimeEncoder\",\n dtype.datetime: \"DatetimeEncoder\",\n dtype.image: \"Img2VecEncoder\",\n dtype.rich_text: \"PretrainedLangEncoder\",\n dtype.short_text: \"CategoricalAutoEncoder\",\n dtype.quantity: \"NumericEncoder\",\n dtype.audio: \"MFCCEncoder\",\n dtype.num_array: \"NumArrayEncoder\",\n dtype.cat_array: \"CatArrayEncoder\",\n dtype.num_tsarray: \"TimeSeriesEncoder\",\n dtype.cat_tsarray: \"TimeSeriesEncoder\",\n }\n\n # If column is a target, only specific feature representations are allowed that enable supervised tasks\n target_encoder_lookup_override = {\n dtype.rich_text: \"VocabularyEncoder\",\n dtype.categorical: \"OneHotEncoder\",\n }\n\n # Assign a default encoder to each column.\n encoder_dict = {\"module\": encoder_lookup[col_dtype], \"args\": {}}\n\n # If the column is a target, ensure that the feature representation can enable supervised tasks\n if is_target:\n encoder_dict[\"args\"] = {\"is_target\": \"True\"}\n\n if col_dtype in target_encoder_lookup_override:\n encoder_dict[\"module\"] = target_encoder_lookup_override[col_dtype]\n\n if col_dtype in (dtype.categorical, dtype.binary):\n if problem_defintion.unbias_target:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = \"$statistical_analysis.target_weights\"\n if problem_defintion.target_weights is not None:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = problem_defintion.target_weights\n\n if col_dtype in (dtype.integer, dtype.float, dtype.num_array, dtype.num_tsarray):\n encoder_dict[\"args\"][\n \"positive_domain\"\n ] = \"$statistical_analysis.positive_domain\"\n\n # Time-series representations require more advanced flags\n if tss.is_timeseries:\n gby = tss.group_by if tss.group_by is not None else []\n if col_name in tss.order_by:\n encoder_dict[\"module\"] = \"ArrayEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n if is_target:\n if col_dtype in [dtype.integer]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if col_dtype in [dtype.float]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if tss.horizon > 1:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"args\"][\"timesteps\"] = f\"{tss.horizon}\"\n if col_dtype in [dtype.num_tsarray]:\n encoder_dict[\"module\"] = \"TsArrayNumericEncoder\"\n elif col_dtype in [dtype.cat_tsarray]:\n encoder_dict[\"module\"] = \"TsCatArrayEncoder\"\n\n if \"__mdb_ts_previous\" in col_name or col_name in tss.historical_columns:\n encoder_dict[\"module\"] = \"TimeSeriesEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n # Set arguments for the encoder\n if encoder_dict[\"module\"] == \"PretrainedLangEncoder\" and not is_target:\n encoder_dict[\"args\"][\"output_type\"] = \"$dtype_dict[$target]\"\n\n if eval(encoder_dict[\"module\"]).is_trainable_encoder:\n encoder_dict[\"args\"][\"stop_after\"] = \"$problem_definition.seconds_per_encoder\"\n\n if is_target_predicting_encoder:\n encoder_dict[\"args\"][\"embed_mode\"] = \"False\"\n return encoder_dict\n\n\ndef generate_json_ai(\n type_information: TypeInformation,\n statistical_analysis: StatisticalAnalysis,\n problem_definition: ProblemDefinition,\n) -> JsonAI:\n \"\"\"\n Given ``TypeInformation``, ``StatisticalAnalysis``, and the ``ProblemDefinition``, generate a JSON config file with the necessary elements of the ML pipeline populated.\n\n :param TypeInformation: Specifies what data types each column within the dataset are\n :param statistical_analysis:\n :param problem_definition: Specifies details of the model training/building procedure, as defined by ``ProblemDefinition``\n\n :returns: JSON-AI object with fully populated details of the ML pipeline\n \"\"\" # noqaexec\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n target = problem_definition.target\n input_cols = []\n tss = problem_definition.timeseries_settings\n dtype_dict = type_information.dtypes\n for k in type_information.identifiers:\n if not (tss.is_timeseries and tss.group_by and k in tss.group_by):\n del dtype_dict[k]\n dependency_dict = {}\n\n for col_name, col_dtype in dtype_dict.items():\n if (\n (col_name not in type_information.identifiers\n and col_dtype not in (dtype.invalid, dtype.empty)\n and col_name != target)\n or\n (tss.group_by is not None and col_name in tss.group_by)\n ):\n if col_name != problem_definition.target:\n input_cols.append(col_name)\n\n is_target_predicting_encoder = False\n is_ts = problem_definition.timeseries_settings.is_timeseries\n\n # Single text column classification\n if (\n len(input_cols) == 1\n and type_information.dtypes[input_cols[0]] in (dtype.rich_text)\n and type_information.dtypes[target] in (dtype.categorical, dtype.binary)\n ):\n is_target_predicting_encoder = True\n\n if is_target_predicting_encoder:\n submodels = [\n {\n \"module\": \"Unit\",\n \"args\": {\n \"target_encoder\": \"$encoders[self.target]\",\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n }\n ]\n else:\n submodels = [\n {\n \"module\": \"Neural\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"search_hyperparameters\": True,\n },\n }\n ]\n\n if (not tss.is_timeseries or tss.horizon == 1) and dtype_dict[target] not in (dtype.num_array, dtype.cat_array):\n submodels.extend(\n [\n {\n \"module\": \"LightGBM\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"fit_on_dev\": True,\n },\n },\n {\n \"module\": \"Regression\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n },\n ]\n )\n elif tss.is_timeseries and tss.horizon > 1:\n submodels.extend(\n [\n {\n \"module\": \"LightGBMArray\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n if tss.use_previous_target and dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n submodels.extend(\n [\n {\n \"module\": \"SkTime\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n model = {\n \"module\": \"BestOf\",\n \"args\": {\n \"submodels\": submodels,\n \"args\": \"$pred_args\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"ts_analysis\": \"self.ts_analysis\" if is_ts else None,\n }\n }\n\n if tss.is_timeseries and tss.horizon > 1:\n if dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n dtype_dict[target] = dtype.num_tsarray\n else:\n dtype_dict[target] = dtype.cat_tsarray\n\n encoders = {\n target: lookup_encoder(\n dtype_dict[target],\n target,\n True,\n problem_definition,\n False,\n statistical_analysis,\n )\n }\n\n for col in input_cols:\n encoders[col] = lookup_encoder(\n dtype_dict[col],\n col,\n False,\n problem_definition,\n is_target_predicting_encoder,\n statistical_analysis,\n )\n\n # Decide on the accuracy functions to use\n output_dtype = dtype_dict[target]\n if output_dtype in [\n dtype.integer,\n dtype.float,\n dtype.date,\n dtype.datetime,\n dtype.quantity,\n ]:\n accuracy_functions = [\"r2_score\"]\n elif output_dtype in [dtype.categorical, dtype.tags, dtype.binary]:\n accuracy_functions = [\"balanced_accuracy_score\"]\n elif output_dtype in (dtype.num_array, dtype.num_tsarray):\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n elif output_dtype in (dtype.cat_array, dtype.cat_tsarray):\n accuracy_functions = [\"evaluate_cat_array_accuracy\"]\n else:\n raise Exception(\n f\"Please specify a custom accuracy function for output type {output_dtype}\"\n )\n\n # special dispatch for t+1 time series forecasters\n if is_ts:\n if output_dtype in [dtype.integer, dtype.float]:\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n\n if problem_definition.time_aim is None:\n # 5 days\n problem_definition.time_aim = 3 * 24 * 3600\n\n # Encoders are assigned 1/3 of the time unless a user overrides this (equal time per encoder)\n if problem_definition.seconds_per_encoder is None:\n nr_trainable_encoders = len(\n [\n x\n for x in encoders.values()\n if eval(x[\"module\"]).is_trainable_encoder\n ]\n )\n if nr_trainable_encoders > 0:\n problem_definition.seconds_per_encoder = 0.33 * problem_definition.time_aim / nr_trainable_encoders\n\n # Mixers are assigned 1/3 of the time aim (or 2/3 if there are no trainable encoders )\\\n # unless a user overrides this (equal time per mixer)\n if problem_definition.seconds_per_mixer is None:\n if problem_definition.seconds_per_encoder is None:\n problem_definition.seconds_per_mixer = 0.66 * problem_definition.time_aim / len(model['args']['submodels'])\n else:\n problem_definition.seconds_per_mixer = 0.33 * problem_definition.time_aim / len(model['args']['submodels'])\n\n return JsonAI(\n cleaner=None,\n splitter=None,\n analyzer=None,\n explainer=None,\n encoders=encoders,\n dtype_dict=dtype_dict,\n dependency_dict=dependency_dict,\n model=model,\n problem_definition=problem_definition,\n identifiers=type_information.identifiers,\n timeseries_transformer=None,\n timeseries_analyzer=None,\n accuracy_functions=accuracy_functions,\n )\n\n\ndef _merge_implicit_values(field: dict, implicit_value: dict) -> dict:\n \"\"\"\n Helper function for `_populate_implicit_field`.\n Takes a user-defined field along with its implicit value, and merges them together.\n\n :param field: JsonAI field with user-defined parameters.\n :param implicit_value: implicit values for the field.\n :return: original field with implicit values merged into it.\n \"\"\"\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n module = eval(field[\"module\"])\n\n if inspect.isclass(module):\n args = list(inspect.signature(module.__init__).parameters.keys())[1:]\n else:\n args = module.__code__.co_varnames\n\n for arg in args:\n if \"args\" not in field:\n field[\"args\"] = implicit_value[\"args\"]\n else:\n if arg not in field[\"args\"]:\n if arg in implicit_value[\"args\"]:\n field[\"args\"][arg] = implicit_value[\"args\"][arg]\n\n return field\n\n\ndef _populate_implicit_field(\n json_ai: JsonAI, field_name: str, implicit_value: dict, is_timeseries: bool\n) -> None:\n \"\"\"\n Populate the implicit field of the JsonAI, either by filling it in entirely if missing, or by introspecting the class or function and assigning default values to the args in it's signature that are in the implicit default but haven't been populated by the user\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n :params: field_name: Name of the field the implicit field in ``JsonAI``\n :params: implicit_value: The dictionary containing implicit values for the module and arg in the field\n :params: is_timeseries: Whether or not this is a timeseries problem\n\n :returns: nothing, this method mutates the respective field of the ``JsonAI`` object it receives\n \"\"\" # noqa\n # These imports might be slow, in which case the only <easy> solution is to line this code\n field = json_ai.__getattribute__(field_name)\n if field is None:\n # This if is to only populated timeseries-specific implicit fields for implicit problems\n if is_timeseries or field_name not in (\n \"timeseries_analyzer\",\n \"timeseries_transformer\",\n ):\n field = implicit_value\n\n # If the user specified one or more subfields in a field that's a list\n # Populate them with implicit arguments form the implicit values from that subfield\n elif isinstance(field, list) and isinstance(implicit_value, list):\n for i in range(len(field)):\n sub_field_implicit = [\n x for x in implicit_value if x[\"module\"] == field[i][\"module\"]\n ]\n if len(sub_field_implicit) == 1:\n field[i] = _merge_implicit_values(field[i], sub_field_implicit[0])\n for sub_field_implicit in implicit_value:\n if (\n len([x for x in field if x[\"module\"] == sub_field_implicit[\"module\"]])\n == 0\n ):\n field.append(sub_field_implicit)\n # If the user specified the field, add implicit arguments which we didn't specify\n else:\n field = _merge_implicit_values(field, implicit_value)\n json_ai.__setattr__(field_name, field)\n\n\ndef _add_implicit_values(json_ai: JsonAI) -> JsonAI:\n \"\"\"\n To enable brevity in writing, auto-generate the \"unspecified/missing\" details required in the ML pipeline.\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n\n :returns: ``JSONAI`` object with all necessary parameters that were previously left unmentioned filled in.\n \"\"\"\n problem_definition = json_ai.problem_definition\n tss = problem_definition.timeseries_settings\n is_ts = tss.is_timeseries\n\n # Add implicit arguments\n # @TODO: Consider removing once we have a proper editor in studio\n mixers = json_ai.model['args']['submodels']\n for i in range(len(mixers)):\n if mixers[i][\"module\"] == \"Unit\":\n pass\n elif mixers[i][\"module\"] == \"Neural\":\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"timeseries_settings\"] = mixers[i][\"args\"].get(\n \"timeseries_settings\", \"$problem_definition.timeseries_settings\"\n )\n mixers[i][\"args\"][\"net\"] = mixers[i][\"args\"].get(\n \"net\",\n '\"DefaultNet\"'\n if not tss.is_timeseries or not tss.use_previous_target\n else '\"ArNet\"',\n )\n\n elif mixers[i][\"module\"] == \"LightGBM\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"use_optuna\"] = True\n elif mixers[i][\"module\"] == \"Regression\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"LightGBMArray\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"SkTime\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"ts_analysis\"] = mixers[i][\"args\"].get(\n \"ts_analysis\", \"$ts_analysis\"\n )\n # enforce fit_on_all if this mixer is specified\n problem_definition.fit_on_all = True\n\n json_ai.model[\"args\"][\"target\"] = json_ai.model[\"args\"].get(\"target\", \"$target\")\n json_ai.model[\"args\"][\"data\"] = json_ai.model[\"args\"].get(\"data\", \"encoded_test_data\")\n json_ai.model[\"args\"][\"mixers\"] = json_ai.model[\"args\"].get(\"mixers\", \"$mixers\")\n\n for name in json_ai.encoders:\n if name not in json_ai.dependency_dict:\n json_ai.dependency_dict[name] = []\n\n # Add \"hidden\" fields\n hidden_fields = {\n \"cleaner\": {\n \"module\": \"cleaner\",\n \"args\": {\n \"pct_invalid\": \"$problem_definition.pct_invalid\",\n \"identifiers\": \"$identifiers\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n \"imputers\": \"$imputers\",\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n },\n },\n \"splitter\": {\n \"module\": \"splitter\",\n \"args\": {\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"seed\": 1,\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"pct_train\": 0.8,\n \"pct_dev\": 0.1,\n \"pct_test\": 0.1,\n },\n },\n \"analyzer\": {\n \"module\": \"model_analyzer\",\n \"args\": {\n \"stats_info\": \"$statistical_analysis\",\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"predictor\": \"$ensemble\",\n \"data\": \"encoded_test_data\",\n \"train_data\": \"encoded_train_data\",\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"analysis_blocks\": \"$analysis_blocks\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n },\n },\n \"explainer\": {\n \"module\": \"explain\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n \"data\": \"data\",\n \"encoded_data\": \"encoded_data\",\n \"predictions\": \"df\",\n \"analysis\": \"$runtime_analyzer\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n \"target_name\": \"$target\",\n \"target_dtype\": \"$dtype_dict[self.target]\",\n \"explainer_blocks\": \"$analysis_blocks\",\n \"pred_args\": \"$pred_args\",\n },\n },\n \"analysis_blocks\": [\n {\n \"module\": \"ICP\",\n \"args\": {\n \"fixed_significance\": None,\n \"confidence_normalizer\": False,\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n },\n },\n {\n \"module\": \"AccStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n {\n \"module\": \"ConfStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n ] if problem_definition.use_default_analysis else [],\n \"timeseries_transformer\": {\n \"module\": \"transform_timeseries\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n },\n },\n \"timeseries_analyzer\": {\n \"module\": \"timeseries_analyzer\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n },\n },\n }\n\n for field_name, implicit_value in hidden_fields.items():\n _populate_implicit_field(json_ai, field_name, implicit_value, tss.is_timeseries)\n\n return json_ai\n\n\ndef code_from_json_ai(json_ai: JsonAI) -> str:\n \"\"\"\n Generates a custom ``PredictorInterface`` given the specifications from ``JsonAI`` object.\n\n :param json_ai: ``JsonAI`` object with fully specified parameters\n\n :returns: Automated syntax of the ``PredictorInterface`` object.\n \"\"\"\n json_ai = deepcopy(json_ai)\n # ----------------- #\n # Fill in any missing values\n json_ai = _add_implicit_values(json_ai)\n\n # ----------------- #\n\n # Instantiate data types\n dtype_dict = {}\n\n for k in json_ai.dtype_dict:\n if json_ai.dtype_dict[k] not in (dtype.invalid, dtype.empty):\n dtype_dict[k] = json_ai.dtype_dict[k]\n\n # Populate imputers\n imputer_dict = {}\n if json_ai.imputers:\n for imputer in json_ai.imputers:\n imputer_dict[imputer['args']['target'].replace('\\'', '').replace('\\\"', '')] = call(imputer)\n json_ai.imputers = imputer_dict\n imputers = inline_dict(json_ai.imputers)\n\n # Populate encoders\n encoder_dict = {}\n for col_name, encoder in json_ai.encoders.items():\n encoder_dict[col_name] = call(encoder)\n\n # Populate time-series specific details\n tss = json_ai.problem_definition.timeseries_settings\n if tss.is_timeseries and tss.use_previous_target:\n col_name = f\"__mdb_ts_previous_{json_ai.problem_definition.target}\"\n target_type = json_ai.dtype_dict[json_ai.problem_definition.target]\n json_ai.problem_definition.timeseries_settings.target_type = target_type\n encoder_dict[col_name] = call(\n lookup_encoder(\n target_type,\n col_name,\n False,\n json_ai.problem_definition,\n False,\n None,\n )\n )\n\n dtype_dict[col_name] = target_type\n # @TODO: Is populating the json_ai at this stage even necessary?\n json_ai.encoders[col_name] = encoder_dict[col_name]\n json_ai.dtype_dict[col_name] = target_type\n json_ai.dependency_dict[col_name] = []\n\n # ----------------- #\n\n input_cols = [x.replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"') for x in json_ai.encoders\n if x != json_ai.problem_definition.target]\n input_cols = \",\".join([f\"\"\"'{name}'\"\"\" for name in input_cols])\n\n # ----------------- #\n # Time-series specific code blocks\n # ----------------- #\n\n ts_transform_code = \"\"\n ts_analyze_code = None\n ts_encoder_code = \"\"\n if json_ai.timeseries_transformer is not None:\n ts_transform_code = f\"\"\"\nlog.info('Transforming timeseries data')\ndata = {call(json_ai.timeseries_transformer)}\n\"\"\"\n ts_analyze_code = f\"\"\"\nself.ts_analysis = {call(json_ai.timeseries_analyzer)}\n\"\"\"\n # @TODO: set these kwargs/properties in the json ai construction (if possible)\n if json_ai.timeseries_analyzer is not None:\n ts_encoder_code = \"\"\"\nif encoder.is_timeseries_encoder:\n kwargs['ts_analysis'] = self.ts_analysis\n\"\"\"\n\n if json_ai.problem_definition.timeseries_settings.is_timeseries:\n ts_target_code = \"\"\"\nif encoder.is_target:\n encoder.normalizers = self.ts_analysis['target_normalizers']\n encoder.group_combinations = self.ts_analysis['group_combinations']\n\"\"\"\n else:\n ts_target_code = \"\"\n\n # ----------------- #\n # Statistical Analysis Body\n # ----------------- #\n\n analyze_data_body = f\"\"\"\nlog.info(\"Performing statistical analysis on data\")\nself.statistical_analysis = lightwood.data.statistical_analysis(data,\n self.dtype_dict,\n {json_ai.identifiers},\n self.problem_definition)\n\n# Instantiate post-training evaluation\nself.analysis_blocks = [{', '.join([call(block) for block in json_ai.analysis_blocks])}]\n \"\"\"\n\n analyze_data_body = align(analyze_data_body, 2)\n\n # ----------------- #\n # Pre-processing Body\n # ----------------- #\n\n clean_body = f\"\"\"\nlog.info('Cleaning the data')\nself.imputers = {imputers}\ndata = {call(json_ai.cleaner)}\n\n# Time-series blocks\n{ts_transform_code}\n\"\"\"\n if ts_analyze_code is not None:\n clean_body += f\"\"\"\nif self.mode != 'predict':\n{align(ts_analyze_code,1)}\n\"\"\"\n\n clean_body += '\\nreturn data'\n\n clean_body = align(clean_body, 2)\n\n # ----------------- #\n # Train-Test Splitter Body\n # ----------------- #\n\n split_body = f\"\"\"\nlog.info(\"Splitting the data into train/test\")\ntrain_test_data = {call(json_ai.splitter)}\n\nreturn train_test_data\n \"\"\"\n\n split_body = align(split_body, 2)\n\n # ----------------- #\n # Prepare features Body\n # ----------------- #\n\n prepare_body = f\"\"\"\nself.mode = 'train'\n\nif self.statistical_analysis is None:\n raise Exception(\"Please run analyze_data first\")\n\n# Column to encoder mapping\nself.encoders = {inline_dict(encoder_dict)}\n\n# Prepare the training + dev data\nconcatenated_train_dev = pd.concat([data['train'], data['dev']])\n\nlog.info('Preparing the encoders')\n\nencoder_prepping_dict = {{}}\n\n# Prepare encoders that do not require learned strategies\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and not encoder.is_trainable_encoder:\n encoder_prepping_dict[col_name] = [encoder, concatenated_train_dev[col_name], 'prepare']\n log.info(f'Encoder prepping dict length of: {{len(encoder_prepping_dict)}}')\n\n# Setup parallelization\nparallel_prepped_encoders = mut_method_call(encoder_prepping_dict)\nfor col_name, encoder in parallel_prepped_encoders.items():\n self.encoders[col_name] = encoder\n\n# Prepare the target\nif self.target not in parallel_prepped_encoders:\n if self.encoders[self.target].is_trainable_encoder:\n self.encoders[self.target].prepare(data['train'][self.target], data['dev'][self.target])\n else:\n self.encoders[self.target].prepare(pd.concat([data['train'], data['dev']])[self.target])\n\n# Prepare any non-target encoders that are learned\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and encoder.is_trainable_encoder:\n priming_data = pd.concat([data['train'], data['dev']])\n kwargs = {{}}\n if self.dependencies[col_name]:\n kwargs['dependency_data'] = {{}}\n for col in self.dependencies[col_name]:\n kwargs['dependency_data'][col] = {{\n 'original_type': self.dtype_dict[col],\n 'data': priming_data[col]\n }}\n {align(ts_encoder_code, 3)}\n\n # If an encoder representation requires the target, provide priming data\n if hasattr(encoder, 'uses_target'):\n kwargs['encoded_target_values'] = self.encoders[self.target].encode(priming_data[self.target])\n\n encoder.prepare(data['train'][col_name], data['dev'][col_name], **kwargs)\n\n {align(ts_target_code, 1)}\n\"\"\"\n prepare_body = align(prepare_body, 2)\n\n # ----------------- #\n # Featurize Data Body\n # ----------------- #\n\n feature_body = f\"\"\"\nlog.info('Featurizing the data')\n\nfeature_data = {{ key: EncodedDs(self.encoders, data, self.target) for key, data in split_data.items() if key != \"stratified_on\"}}\n\nreturn feature_data\n\n\"\"\" # noqa\n\n feature_body = align(feature_body, 2)\n\n # ----------------- #\n # Fit Mixer Body\n # ----------------- #\n\n fit_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\nlog.info('Training the mixers')\n\n# --------------- #\n# Fit Models\n# --------------- #\n# Assign list of mixers\nself.mixers = [{', '.join([call(x) for x in json_ai.model[\"args\"][\"submodels\"]])}]\n\n# Train mixers\ntrained_mixers = []\nfor mixer in self.mixers:\n try:\n self.fit_mixer(mixer, encoded_train_data, encoded_dev_data)\n trained_mixers.append(mixer)\n except Exception as e:\n log.warning(f'Exception: {{e}} when training mixer: {{mixer}}')\n if {json_ai.problem_definition.strict_mode} and mixer.stable:\n raise e\n\n# Update mixers to trained versions\nself.mixers = trained_mixers\n\n# --------------- #\n# Create Ensembles\n# --------------- #\nlog.info('Ensembling the mixer')\n# Create an ensemble of mixers to identify best performing model\nself.pred_args = PredictionArguments()\n# Dirty hack\nself.ensemble = {call(json_ai.model)}\nself.supports_proba = self.ensemble.supports_proba\n\"\"\"\n fit_body = align(fit_body, 2)\n\n # ----------------- #\n # Analyze Ensemble Body\n # ----------------- #\n\n analyze_ensemble = f\"\"\"\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\n# --------------- #\n# Analyze Ensembles\n# --------------- #\nlog.info('Analyzing the ensemble of mixers')\nself.model_analysis, self.runtime_analyzer = {call(json_ai.analyzer)}\n\"\"\"\n analyze_ensemble = align(analyze_ensemble, 2)\n\n # ----------------- #\n # Adjust Ensemble Body\n # ----------------- #\n\n adjust_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Prepare data\n# --------------- #\nif old_data is None:\n old_data = pd.DataFrame()\n\nif isinstance(old_data, pd.DataFrame):\n old_data = EncodedDs(self.encoders, old_data, self.target)\n\nif isinstance(new_data, pd.DataFrame):\n new_data = EncodedDs(self.encoders, new_data, self.target)\n\n# --------------- #\n# Update/Adjust Mixers\n# --------------- #\nlog.info('Updating the mixers')\n\nfor mixer in self.mixers:\n mixer.partial_fit(new_data, old_data)\n\"\"\" # noqa\n\n adjust_body = align(adjust_body, 2)\n\n # ----------------- #\n # Learn Body\n # ----------------- #\n\n learn_body = \"\"\"\nself.mode = 'train'\n\n# Perform stats analysis\nself.analyze_data(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Create train/test (dev) split\ntrain_dev_test = self.split(data)\n\n# Prepare encoders\nself.prepare(train_dev_test)\n\n# Create feature vectors from data\nenc_train_test = self.featurize(train_dev_test)\n\n# Prepare mixers\nself.fit(enc_train_test)\n\n# Analyze the ensemble\nself.analyze_ensemble(enc_train_test)\n\n# ------------------------ #\n# Enable model partial fit AFTER it is trained and evaluated for performance with the appropriate train/dev/test splits.\n# This assumes the predictor could continuously evolve, hence including reserved testing data may improve predictions.\n# SET `json_ai.problem_definition.fit_on_all=False` TO TURN THIS BLOCK OFF.\n\n# Update the mixers with partial fit\nif self.problem_definition.fit_on_all:\n\n log.info(\"Adjustment on validation requested.\")\n self.adjust(enc_train_test[\"test\"], ConcatedEncodedDs([enc_train_test[\"train\"], enc_train_test[\"dev\"]]))\n\n\"\"\"\n learn_body = align(learn_body, 2)\n # ----------------- #\n # Predict Body\n # ----------------- #\n\n predict_body = f\"\"\"\nself.mode = 'predict'\n\nif len(data) == 0:\n raise Exception(\"Empty input, aborting prediction. Please try again with some input data.\")\n\n# Remove columns that user specifies to ignore\nlog.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\ndata = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\nfor col in self.input_cols:\n if col not in data.columns:\n data[col] = [None] * len(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Featurize the data\nencoded_ds = self.featurize({{\"predict_data\": data}})[\"predict_data\"]\nencoded_data = encoded_ds.get_encoded_data(include_target=False)\n\nself.pred_args = PredictionArguments.from_dict(args)\ndf = self.ensemble(encoded_ds, args=self.pred_args)\n\nif self.pred_args.all_mixers:\n return df\nelse:\n insights, global_insights = {call(json_ai.explainer)}\n return insights\n\"\"\"\n\n predict_body = align(predict_body, 2)\n\n predictor_code = f\"\"\"\n{IMPORTS}\n{IMPORT_EXTERNAL_DIRS}\n\nclass Predictor(PredictorInterface):\n target: str\n mixers: List[BaseMixer]\n encoders: Dict[str, BaseEncoder]\n ensemble: BaseEnsemble\n mode: str\n\n def __init__(self):\n seed({json_ai.problem_definition.seed_nr})\n self.target = '{json_ai.problem_definition.target}'\n self.mode = 'inactive'\n self.problem_definition = ProblemDefinition.from_dict({json_ai.problem_definition.to_dict()})\n self.accuracy_functions = {json_ai.accuracy_functions}\n self.identifiers = {json_ai.identifiers}\n self.dtype_dict = {inline_dict(dtype_dict)}\n\n # Any feature-column dependencies\n self.dependencies = {inline_dict(json_ai.dependency_dict)}\n\n self.input_cols = [{input_cols}]\n\n # Initial stats analysis\n self.statistical_analysis = None\n self.runtime_log = dict()\n\n @timed\n def analyze_data(self, data: pd.DataFrame) -> None:\n # Perform a statistical analysis on the unprocessed data\n{analyze_data_body}\n\n @timed\n def preprocess(self, data: pd.DataFrame) -> pd.DataFrame:\n # Preprocess and clean data\n{clean_body}\n\n @timed\n def split(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]:\n # Split the data into training/testing splits\n{split_body}\n\n @timed\n def prepare(self, data: Dict[str, pd.DataFrame]) -> None:\n # Prepare encoders to featurize data\n{prepare_body}\n\n @timed\n def featurize(self, split_data: Dict[str, pd.DataFrame]):\n # Featurize data into numerical representations for models\n{feature_body}\n\n @timed\n def fit(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Fit predictors to estimate target\n{fit_body}\n\n @timed\n def fit_mixer(self, mixer, encoded_train_data, encoded_dev_data) -> None:\n mixer.fit(encoded_train_data, encoded_dev_data)\n\n @timed\n def analyze_ensemble(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Evaluate quality of fit for the ensemble of mixers\n{analyze_ensemble}\n\n @timed\n def learn(self, data: pd.DataFrame) -> None:\n log.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\n data = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\n{learn_body}\n\n @timed\n def adjust(self, new_data: Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame],\n old_data: Optional[Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame]] = None) -> None:\n # Update mixers with new information\n{adjust_body}\n\n @timed\n def predict(self, data: pd.DataFrame, args: Dict = {{}}) -> pd.DataFrame:\n{predict_body}\n\"\"\"\n\n try:\n import black\n except Exception:\n black = None\n\n if black is not None:\n log.info('Unable to import black formatter, predictor code might be a bit ugly.')\n predictor_code = black.format_str(predictor_code, mode=black.FileMode())\n\n return predictor_code\n\n\ndef validate_json_ai(json_ai: JsonAI) -> bool:\n \"\"\"\n Checks the validity of a ``JsonAI`` object\n\n :param json_ai: A ``JsonAI`` object\n\n :returns: Whether the JsonAI is valid, i.e. doesn't contain prohibited values, unknown values and can be turned into code.\n \"\"\" # noqa\n from lightwood.api.high_level import predictor_from_code, code_from_json_ai\n\n try:\n predictor_from_code(code_from_json_ai(json_ai))\n return True\n except Exception:\n return False\n", "path": "lightwood/api/json_ai.py"}]} |
gh_patches_debug_1446 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-2822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Plone 5.1.4 to 5.1.5 update: resource registry meta bundle generator comments first css construct of individual bundles
In one of our projects, after upgrading from Plone 5.1.4 to Plone 5.1.5 A very small part of the css became broken in plone.app.mosaic layouts . Images inside a tile no longer had a "height: auto" on them. This is normally included in mosaic-styles.css , and the mosaic styles were included in default.css.
We quickly patched the missing statement into our theme file and did a patch release, but the underlying problem was vague. The problem would only appear on production, running the site locally did not show the problem, so my attention was pulled to the metabundle generation. This was modified between 5.1.4 and 5.1.5 in https://github.com/plone/Products.CMFPlone/commit/397918cd39ba0be4e2e150df5f5f2220e6ecc828 by @vangheem
The problematic code is in this part:
https://github.com/plone/Products.CMFPlone/blob/2195c4a43ba100fb2b7973dccb4299dad2de42fe/Products/CMFPlone/resources/browser/combine.py#L123-L132
The individual bundles are separated by comment lines with // Start bundle and // End Bundle, but // actually comments out the first following css construct , more info at https://www.xanthir.com/b4U10
And the mosaic-styles.css individual bundle start with:
```
// Start Bundle: mosaic-styles
/* Images will never be bigger then a tile */
.mosaic-tile img {
max-width: 100%;
height: auto;
}
```
If even skips over the /* */ comment on the next line and comments the first {} it sees
So that is how only our height: auto; got disabled in production .
This is at the moment only broken in Plone 5.1 , In Plone 5.2 the whole bundler got rewritten again, partly because of Python3 support and doesn't seem to insert these comments. I have swapped the // comment for /* */ comments and this also solves the problem. I'll create a pull request shortly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/resources/browser/combine.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from Acquisition import aq_base
3 from datetime import datetime
4 from plone.registry.interfaces import IRegistry
5 from plone.resource.file import FilesystemFile
6 from plone.resource.interfaces import IResourceDirectory
7 from Products.CMFPlone.interfaces import IBundleRegistry
8 from Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa
9 from StringIO import StringIO
10 from zExceptions import NotFound
11 from zope.component import getUtility
12 from zope.component import queryUtility
13
14 from collections import OrderedDict
15 import logging
16 import re
17
18
19 PRODUCTION_RESOURCE_DIRECTORY = 'production'
20 logger = logging.getLogger(__name__)
21
22
23 def get_production_resource_directory():
24 persistent_directory = queryUtility(IResourceDirectory, name='persistent')
25 if persistent_directory is None:
26 return ''
27 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
28 try:
29 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
30 except NotFound:
31 return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY
32 if 'timestamp.txt' not in production_folder:
33 return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY
34 timestamp = production_folder.readFile('timestamp.txt')
35 return '%s/++unique++%s' % (
36 PRODUCTION_RESOURCE_DIRECTORY, timestamp)
37
38
39 def get_resource(context, path):
40 if path.startswith('++plone++'):
41 # ++plone++ resources can be customized, we return their override
42 # value if any
43 overrides = get_override_directory(context)
44 filepath = path[9:]
45 if overrides.isFile(filepath):
46 return overrides.readFile(filepath)
47
48 try:
49 resource = context.unrestrictedTraverse(path)
50 except NotFound:
51 logger.warn(u'Could not find resource {0}. You may have to create it first.'.format(path)) # noqa
52 return
53
54 if isinstance(resource, FilesystemFile):
55 (directory, sep, filename) = path.rpartition('/')
56 return context.unrestrictedTraverse(directory).readFile(filename)
57
58 # calling the resource may modify the header, i.e. the content-type.
59 # we do not want this, so keep the original header intact.
60 response_before = context.REQUEST.response
61 context.REQUEST.response = response_before.__class__()
62 if hasattr(aq_base(resource), 'GET'):
63 # for FileResource
64 result = resource.GET()
65 else:
66 # any BrowserView
67 result = resource()
68 context.REQUEST.response = response_before
69 return result
70
71
72 class MetaBundleWriter(object):
73
74 def __init__(self, context, folder, name):
75 self.context = context
76 self.folder = folder
77 self.name = name
78 self.js_resources = OrderedDict()
79 self.css_resources = OrderedDict()
80 self.registry = getUtility(IRegistry)
81 self.bundles = self.registry.collectionOfInterface(
82 IBundleRegistry, prefix='plone.bundles', check=False)
83
84 def write_js(self):
85
86 # default resources
87 if self.name == 'default' and self.registry.records.get(
88 'plone.resources/jquery.js'
89 ):
90 self.js_resources['_jquery'] = get_resource(
91 self.context,
92 self.registry.records['plone.resources/jquery.js'].value)
93 self.js_resources['_requirejs'] = get_resource(
94 self.context,
95 self.registry.records['plone.resources.requirejs'].value)
96 self.js_resources['_configjs'] = get_resource(
97 self.context,
98 self.registry.records['plone.resources.configjs'].value)
99
100 # bundles
101 for name, bundle in self.bundles.items():
102 self.load_js_bundle(name, bundle)
103
104 self._write_out(self.js_resources, '.js')
105
106 def load_js_bundle(self, name, bundle, depth=0):
107 if depth > 10:
108 # recursion detection
109 return
110 if bundle.merge_with != self.name:
111 return
112 if bundle.jscompilation:
113 if bundle.depends and bundle.depends in self.bundles:
114 self.load_js_bundle(
115 bundle.depends, self.bundles[bundle.depends], depth + 1)
116 if name in self.js_resources:
117 return
118 resource = get_resource(self.context, bundle.jscompilation)
119 if not resource:
120 return
121 self.js_resources[name] = resource
122
123 def _write_out(self, resources, postfix):
124 fi = StringIO()
125 for bname, script in resources.items():
126 fi.write('''
127 // Start Bundle: {0}
128 {1}
129 // End Bundle: {2}
130 '''.format(bname, script, bname))
131 self.folder.writeFile(self.name + postfix, fi)
132 resources.clear()
133
134 def load_css_bundle(self, name, bundle, depth=0):
135 if depth > 10:
136 # recursion detection
137 return
138
139 if bundle.merge_with != self.name:
140 return
141
142 if bundle.csscompilation:
143 if bundle.depends and bundle.depends in self.bundles:
144 self.load_css_bundle(
145 bundle.depends, self.bundles[bundle.depends], depth + 1)
146 if name in self.css_resources:
147 return
148
149 css = get_resource(self.context, bundle.csscompilation)
150 if not css:
151 return
152 (path, sep, filename) = bundle.csscompilation.rpartition('/')
153 # Process relative urls:
154 # we prefix with current resource path any url not starting with
155 # '/' or http: or data:
156 css = re.sub(
157 r'''(url\(['"]?(?!['"]?([a-z]+:|\/)))''',
158 r'\1%s/' % path,
159 css)
160 self.css_resources[name] = css
161
162 def write_css(self):
163 for name, bundle in self.bundles.items():
164 self.load_css_bundle(name, bundle)
165
166 self._write_out(self.css_resources, '.css')
167
168
169 def get_override_directory(context):
170 persistent_directory = queryUtility(IResourceDirectory, name='persistent')
171 if persistent_directory is None:
172 return
173 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:
174 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)
175 return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
176
177
178 def combine_bundles(context):
179 container = get_override_directory(context)
180 if PRODUCTION_RESOURCE_DIRECTORY not in container:
181 container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)
182 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
183
184 # store timestamp
185 fi = StringIO()
186 fi.write(datetime.now().isoformat())
187 production_folder.writeFile('timestamp.txt', fi)
188
189 # generate new combined bundles
190 default_writer = MetaBundleWriter(
191 context, production_folder, 'default')
192 default_writer.write_js()
193 logged_in_writer = MetaBundleWriter(
194 context, production_folder, 'logged-in')
195 logged_in_writer.write_js()
196 default_writer.write_css()
197 logged_in_writer.write_css()
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py
--- a/Products/CMFPlone/resources/browser/combine.py
+++ b/Products/CMFPlone/resources/browser/combine.py
@@ -124,9 +124,9 @@
fi = StringIO()
for bname, script in resources.items():
fi.write('''
-// Start Bundle: {0}
+/* Start Bundle: {0} */
{1}
-// End Bundle: {2}
+/* End Bundle: {2} */
'''.format(bname, script, bname))
self.folder.writeFile(self.name + postfix, fi)
resources.clear()
| {"golden_diff": "diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py\n--- a/Products/CMFPlone/resources/browser/combine.py\n+++ b/Products/CMFPlone/resources/browser/combine.py\n@@ -124,9 +124,9 @@\n fi = StringIO()\n for bname, script in resources.items():\n fi.write('''\n-// Start Bundle: {0}\n+/* Start Bundle: {0} */\n {1}\n-// End Bundle: {2}\n+/* End Bundle: {2} */\n '''.format(bname, script, bname))\n self.folder.writeFile(self.name + postfix, fi)\n resources.clear()\n", "issue": "Plone 5.1.4 to 5.1.5 update: resource registry meta bundle generator comments first css construct of individual bundles\nIn one of our projects, after upgrading from Plone 5.1.4 to Plone 5.1.5 A very small part of the css became broken in plone.app.mosaic layouts . Images inside a tile no longer had a \"height: auto\" on them. This is normally included in mosaic-styles.css , and the mosaic styles were included in default.css. \r\n\r\nWe quickly patched the missing statement into our theme file and did a patch release, but the underlying problem was vague. The problem would only appear on production, running the site locally did not show the problem, so my attention was pulled to the metabundle generation. This was modified between 5.1.4 and 5.1.5 in https://github.com/plone/Products.CMFPlone/commit/397918cd39ba0be4e2e150df5f5f2220e6ecc828 by @vangheem \r\n\r\nThe problematic code is in this part:\r\n\r\nhttps://github.com/plone/Products.CMFPlone/blob/2195c4a43ba100fb2b7973dccb4299dad2de42fe/Products/CMFPlone/resources/browser/combine.py#L123-L132\r\n\r\nThe individual bundles are separated by comment lines with // Start bundle and // End Bundle, but // actually comments out the first following css construct , more info at https://www.xanthir.com/b4U10\r\n\r\nAnd the mosaic-styles.css individual bundle start with:\r\n\r\n```\r\n// Start Bundle: mosaic-styles\r\n/* Images will never be bigger then a tile */\r\n.mosaic-tile img {\r\n max-width: 100%;\r\n height: auto;\r\n}\r\n```\r\n\r\nIf even skips over the /* */ comment on the next line and comments the first {} it sees\r\nSo that is how only our height: auto; got disabled in production . \r\n\r\nThis is at the moment only broken in Plone 5.1 , In Plone 5.2 the whole bundler got rewritten again, partly because of Python3 support and doesn't seem to insert these comments. I have swapped the // comment for /* */ comments and this also solves the problem. I'll create a pull request shortly. \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\nfrom StringIO import StringIO\nfrom zExceptions import NotFound\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nfrom collections import OrderedDict\nimport logging\nimport re\n\n\nPRODUCTION_RESOURCE_DIRECTORY = 'production'\nlogger = logging.getLogger(__name__)\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n if 'timestamp.txt' not in production_folder:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return '%s/++unique++%s' % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n try:\n resource = context.unrestrictedTraverse(path)\n except NotFound:\n logger.warn(u'Could not find resource {0}. You may have to create it first.'.format(path)) # noqa\n return\n\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n\n # calling the resource may modify the header, i.e. the content-type.\n # we do not want this, so keep the original header intact.\n response_before = context.REQUEST.response\n context.REQUEST.response = response_before.__class__()\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n result = resource.GET()\n else:\n # any BrowserView\n result = resource()\n context.REQUEST.response = response_before\n return result\n\n\nclass MetaBundleWriter(object):\n\n def __init__(self, context, folder, name):\n self.context = context\n self.folder = folder\n self.name = name\n self.js_resources = OrderedDict()\n self.css_resources = OrderedDict()\n self.registry = getUtility(IRegistry)\n self.bundles = self.registry.collectionOfInterface(\n IBundleRegistry, prefix='plone.bundles', check=False)\n\n def write_js(self):\n\n # default resources\n if self.name == 'default' and self.registry.records.get(\n 'plone.resources/jquery.js'\n ):\n self.js_resources['_jquery'] = get_resource(\n self.context,\n self.registry.records['plone.resources/jquery.js'].value)\n self.js_resources['_requirejs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.requirejs'].value)\n self.js_resources['_configjs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.configjs'].value)\n\n # bundles\n for name, bundle in self.bundles.items():\n self.load_js_bundle(name, bundle)\n\n self._write_out(self.js_resources, '.js')\n\n def load_js_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n if bundle.merge_with != self.name:\n return\n if bundle.jscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_js_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.js_resources:\n return\n resource = get_resource(self.context, bundle.jscompilation)\n if not resource:\n return\n self.js_resources[name] = resource\n\n def _write_out(self, resources, postfix):\n fi = StringIO()\n for bname, script in resources.items():\n fi.write('''\n// Start Bundle: {0}\n{1}\n// End Bundle: {2}\n'''.format(bname, script, bname))\n self.folder.writeFile(self.name + postfix, fi)\n resources.clear()\n\n def load_css_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n\n if bundle.merge_with != self.name:\n return\n\n if bundle.csscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_css_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.css_resources:\n return\n\n css = get_resource(self.context, bundle.csscompilation)\n if not css:\n return\n (path, sep, filename) = bundle.csscompilation.rpartition('/')\n # Process relative urls:\n # we prefix with current resource path any url not starting with\n # '/' or http: or data:\n css = re.sub(\n r'''(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))''',\n r'\\1%s/' % path,\n css)\n self.css_resources[name] = css\n\n def write_css(self):\n for name, bundle in self.bundles.items():\n self.load_css_bundle(name, bundle)\n\n self._write_out(self.css_resources, '.css')\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile('timestamp.txt', fi)\n\n # generate new combined bundles\n default_writer = MetaBundleWriter(\n context, production_folder, 'default')\n default_writer.write_js()\n logged_in_writer = MetaBundleWriter(\n context, production_folder, 'logged-in')\n logged_in_writer.write_js()\n default_writer.write_css()\n logged_in_writer.write_css()\n", "path": "Products/CMFPlone/resources/browser/combine.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\nfrom StringIO import StringIO\nfrom zExceptions import NotFound\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nfrom collections import OrderedDict\nimport logging\nimport re\n\n\nPRODUCTION_RESOURCE_DIRECTORY = 'production'\nlogger = logging.getLogger(__name__)\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n if 'timestamp.txt' not in production_folder:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return '%s/++unique++%s' % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n try:\n resource = context.unrestrictedTraverse(path)\n except NotFound:\n logger.warn(u'Could not find resource {0}. You may have to create it first.'.format(path)) # noqa\n return\n\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n\n # calling the resource may modify the header, i.e. the content-type.\n # we do not want this, so keep the original header intact.\n response_before = context.REQUEST.response\n context.REQUEST.response = response_before.__class__()\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n result = resource.GET()\n else:\n # any BrowserView\n result = resource()\n context.REQUEST.response = response_before\n return result\n\n\nclass MetaBundleWriter(object):\n\n def __init__(self, context, folder, name):\n self.context = context\n self.folder = folder\n self.name = name\n self.js_resources = OrderedDict()\n self.css_resources = OrderedDict()\n self.registry = getUtility(IRegistry)\n self.bundles = self.registry.collectionOfInterface(\n IBundleRegistry, prefix='plone.bundles', check=False)\n\n def write_js(self):\n\n # default resources\n if self.name == 'default' and self.registry.records.get(\n 'plone.resources/jquery.js'\n ):\n self.js_resources['_jquery'] = get_resource(\n self.context,\n self.registry.records['plone.resources/jquery.js'].value)\n self.js_resources['_requirejs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.requirejs'].value)\n self.js_resources['_configjs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.configjs'].value)\n\n # bundles\n for name, bundle in self.bundles.items():\n self.load_js_bundle(name, bundle)\n\n self._write_out(self.js_resources, '.js')\n\n def load_js_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n if bundle.merge_with != self.name:\n return\n if bundle.jscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_js_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.js_resources:\n return\n resource = get_resource(self.context, bundle.jscompilation)\n if not resource:\n return\n self.js_resources[name] = resource\n\n def _write_out(self, resources, postfix):\n fi = StringIO()\n for bname, script in resources.items():\n fi.write('''\n/* Start Bundle: {0} */\n{1}\n/* End Bundle: {2} */\n'''.format(bname, script, bname))\n self.folder.writeFile(self.name + postfix, fi)\n resources.clear()\n\n def load_css_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n\n if bundle.merge_with != self.name:\n return\n\n if bundle.csscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_css_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.css_resources:\n return\n\n css = get_resource(self.context, bundle.csscompilation)\n if not css:\n return\n (path, sep, filename) = bundle.csscompilation.rpartition('/')\n # Process relative urls:\n # we prefix with current resource path any url not starting with\n # '/' or http: or data:\n css = re.sub(\n r'''(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))''',\n r'\\1%s/' % path,\n css)\n self.css_resources[name] = css\n\n def write_css(self):\n for name, bundle in self.bundles.items():\n self.load_css_bundle(name, bundle)\n\n self._write_out(self.css_resources, '.css')\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile('timestamp.txt', fi)\n\n # generate new combined bundles\n default_writer = MetaBundleWriter(\n context, production_folder, 'default')\n default_writer.write_js()\n logged_in_writer = MetaBundleWriter(\n context, production_folder, 'logged-in')\n logged_in_writer.write_js()\n default_writer.write_css()\n logged_in_writer.write_css()\n", "path": "Products/CMFPlone/resources/browser/combine.py"}]} |
gh_patches_debug_1447 | rasdani/github-patches | git_diff | lightly-ai__lightly-1177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CUDA errors in NTXentLoss with gloo backend in multi-gpu training
I was wondering if the `gloo` distributed communication package for multi-gpu training is officially supported by lightly. It seems like e.g. NTXentLoss doesn't work with `gloo` (I'm using pytorch lightning): I get CUDA errors, even when setting `gather_distributed = False`.
I can fix the issue when using `gather_distributed = False` by replacing the line
https://github.com/lightly-ai/lightly/blob/master/lightly/loss/ntx_ent_loss.py#L164
by
```python
labels = labels + batch_size * (dist.rank() if gather_distributed else 0)
```
but then of course I can't use `gather_distributed = True` anymore.
Using the `nccl` backend, everything works fine, but `nccl` is not working well on some of our machines, so unfortunately I'm stuck with `gloo`. I think using `gloo` might be too much of an exception to fix the problem for `gather_distributed = True`, but maybe it'd help to just replace the line above and mention somewhere in the documentation that `gather_distributed` is not supported for `gloo`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightly/loss/ntx_ent_loss.py`
Content:
```
1 """ Contrastive Loss Functions """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import torch
7 from torch import nn
8
9 from lightly.loss.memory_bank import MemoryBankModule
10 from lightly.utils import dist
11
12
13 class NTXentLoss(MemoryBankModule):
14 """Implementation of the Contrastive Cross Entropy Loss.
15
16 This implementation follows the SimCLR[0] paper. If you enable the memory
17 bank by setting the `memory_bank_size` value > 0 the loss behaves like
18 the one described in the MoCo[1] paper.
19
20 - [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709
21 - [1] MoCo, 2020, https://arxiv.org/abs/1911.05722
22
23 Attributes:
24 temperature:
25 Scale logits by the inverse of the temperature.
26 memory_bank_size:
27 Number of negative samples to store in the memory bank.
28 Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.
29 gather_distributed:
30 If True then negatives from all gpus are gathered before the
31 loss calculation. This flag has no effect if memory_bank_size > 0.
32
33 Raises:
34 ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.
35
36 Examples:
37
38 >>> # initialize loss function without memory bank
39 >>> loss_fn = NTXentLoss(memory_bank_size=0)
40 >>>
41 >>> # generate two random transforms of images
42 >>> t0 = transforms(images)
43 >>> t1 = transforms(images)
44 >>>
45 >>> # feed through SimCLR or MoCo model
46 >>> batch = torch.cat((t0, t1), dim=0)
47 >>> output = model(batch)
48 >>>
49 >>> # calculate loss
50 >>> loss = loss_fn(output)
51
52 """
53
54 def __init__(
55 self,
56 temperature: float = 0.5,
57 memory_bank_size: int = 0,
58 gather_distributed: bool = False,
59 ):
60 super(NTXentLoss, self).__init__(size=memory_bank_size)
61 self.temperature = temperature
62 self.gather_distributed = gather_distributed
63 self.cross_entropy = nn.CrossEntropyLoss(reduction="mean")
64 self.eps = 1e-8
65
66 if abs(self.temperature) < self.eps:
67 raise ValueError(
68 "Illegal temperature: abs({}) < 1e-8".format(self.temperature)
69 )
70
71 def forward(self, out0: torch.Tensor, out1: torch.Tensor):
72 """Forward pass through Contrastive Cross-Entropy Loss.
73
74 If used with a memory bank, the samples from the memory bank are used
75 as negative examples. Otherwise, within-batch samples are used as
76 negative samples.
77
78 Args:
79 out0:
80 Output projections of the first set of transformed images.
81 Shape: (batch_size, embedding_size)
82 out1:
83 Output projections of the second set of transformed images.
84 Shape: (batch_size, embedding_size)
85
86 Returns:
87 Contrastive Cross Entropy Loss value.
88
89 """
90
91 device = out0.device
92 batch_size, _ = out0.shape
93
94 # normalize the output to length 1
95 out0 = nn.functional.normalize(out0, dim=1)
96 out1 = nn.functional.normalize(out1, dim=1)
97
98 # ask memory bank for negative samples and extend it with out1 if
99 # out1 requires a gradient, otherwise keep the same vectors in the
100 # memory bank (this allows for keeping the memory bank constant e.g.
101 # for evaluating the loss on the test set)
102 # out1: shape: (batch_size, embedding_size)
103 # negatives: shape: (embedding_size, memory_bank_size)
104 out1, negatives = super(NTXentLoss, self).forward(
105 out1, update=out0.requires_grad
106 )
107
108 # We use the cosine similarity, which is a dot product (einsum) here,
109 # as all vectors are already normalized to unit length.
110 # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.
111
112 if negatives is not None:
113 # use negatives from memory bank
114 negatives = negatives.to(device)
115
116 # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity
117 # of the i-th sample in the batch to its positive pair
118 sim_pos = torch.einsum("nc,nc->n", out0, out1).unsqueeze(-1)
119
120 # sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity
121 # of the i-th sample to the j-th negative sample
122 sim_neg = torch.einsum("nc,ck->nk", out0, negatives)
123
124 # set the labels to the first "class", i.e. sim_pos,
125 # so that it is maximized in relation to sim_neg
126 logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature
127 labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)
128
129 else:
130 # user other samples from batch as negatives
131 # and create diagonal mask that only selects similarities between
132 # views of the same image
133 if self.gather_distributed and dist.world_size() > 1:
134 # gather hidden representations from other processes
135 out0_large = torch.cat(dist.gather(out0), 0)
136 out1_large = torch.cat(dist.gather(out1), 0)
137 diag_mask = dist.eye_rank(batch_size, device=out0.device)
138 else:
139 # single process
140 out0_large = out0
141 out1_large = out1
142 diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)
143
144 # calculate similiarities
145 # here n = batch_size and m = batch_size * world_size
146 # the resulting vectors have shape (n, m)
147 logits_00 = torch.einsum("nc,mc->nm", out0, out0_large) / self.temperature
148 logits_01 = torch.einsum("nc,mc->nm", out0, out1_large) / self.temperature
149 logits_10 = torch.einsum("nc,mc->nm", out1, out0_large) / self.temperature
150 logits_11 = torch.einsum("nc,mc->nm", out1, out1_large) / self.temperature
151
152 # remove simliarities between same views of the same image
153 logits_00 = logits_00[~diag_mask].view(batch_size, -1)
154 logits_11 = logits_11[~diag_mask].view(batch_size, -1)
155
156 # concatenate logits
157 # the logits tensor in the end has shape (2*n, 2*m-1)
158 logits_0100 = torch.cat([logits_01, logits_00], dim=1)
159 logits_1011 = torch.cat([logits_10, logits_11], dim=1)
160 logits = torch.cat([logits_0100, logits_1011], dim=0)
161
162 # create labels
163 labels = torch.arange(batch_size, device=device, dtype=torch.long)
164 labels = labels + dist.rank() * batch_size
165 labels = labels.repeat(2)
166
167 loss = self.cross_entropy(logits, labels)
168
169 return loss
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightly/loss/ntx_ent_loss.py b/lightly/loss/ntx_ent_loss.py
--- a/lightly/loss/ntx_ent_loss.py
+++ b/lightly/loss/ntx_ent_loss.py
@@ -161,7 +161,8 @@
# create labels
labels = torch.arange(batch_size, device=device, dtype=torch.long)
- labels = labels + dist.rank() * batch_size
+ if self.gather_distributed:
+ labels = labels + dist.rank() * batch_size
labels = labels.repeat(2)
loss = self.cross_entropy(logits, labels)
| {"golden_diff": "diff --git a/lightly/loss/ntx_ent_loss.py b/lightly/loss/ntx_ent_loss.py\n--- a/lightly/loss/ntx_ent_loss.py\n+++ b/lightly/loss/ntx_ent_loss.py\n@@ -161,7 +161,8 @@\n \n # create labels\n labels = torch.arange(batch_size, device=device, dtype=torch.long)\n- labels = labels + dist.rank() * batch_size\n+ if self.gather_distributed:\n+ labels = labels + dist.rank() * batch_size\n labels = labels.repeat(2)\n \n loss = self.cross_entropy(logits, labels)\n", "issue": "CUDA errors in NTXentLoss with gloo backend in multi-gpu training \nI was wondering if the `gloo` distributed communication package for multi-gpu training is officially supported by lightly. It seems like e.g. NTXentLoss doesn't work with `gloo` (I'm using pytorch lightning): I get CUDA errors, even when setting `gather_distributed = False`.\r\nI can fix the issue when using `gather_distributed = False` by replacing the line\r\nhttps://github.com/lightly-ai/lightly/blob/master/lightly/loss/ntx_ent_loss.py#L164\r\nby\r\n```python\r\nlabels = labels + batch_size * (dist.rank() if gather_distributed else 0)\r\n```\r\nbut then of course I can't use `gather_distributed = True` anymore. \r\n\r\nUsing the `nccl` backend, everything works fine, but `nccl` is not working well on some of our machines, so unfortunately I'm stuck with `gloo`. I think using `gloo` might be too much of an exception to fix the problem for `gather_distributed = True`, but maybe it'd help to just replace the line above and mention somewhere in the documentation that `gather_distributed` is not supported for `gloo`?\n", "before_files": [{"content": "\"\"\" Contrastive Loss Functions \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nfrom torch import nn\n\nfrom lightly.loss.memory_bank import MemoryBankModule\nfrom lightly.utils import dist\n\n\nclass NTXentLoss(MemoryBankModule):\n \"\"\"Implementation of the Contrastive Cross Entropy Loss.\n\n This implementation follows the SimCLR[0] paper. If you enable the memory\n bank by setting the `memory_bank_size` value > 0 the loss behaves like\n the one described in the MoCo[1] paper.\n\n - [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709\n - [1] MoCo, 2020, https://arxiv.org/abs/1911.05722\n\n Attributes:\n temperature:\n Scale logits by the inverse of the temperature.\n memory_bank_size:\n Number of negative samples to store in the memory bank.\n Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.\n gather_distributed:\n If True then negatives from all gpus are gathered before the\n loss calculation. This flag has no effect if memory_bank_size > 0.\n\n Raises:\n ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.\n\n Examples:\n\n >>> # initialize loss function without memory bank\n >>> loss_fn = NTXentLoss(memory_bank_size=0)\n >>>\n >>> # generate two random transforms of images\n >>> t0 = transforms(images)\n >>> t1 = transforms(images)\n >>>\n >>> # feed through SimCLR or MoCo model\n >>> batch = torch.cat((t0, t1), dim=0)\n >>> output = model(batch)\n >>>\n >>> # calculate loss\n >>> loss = loss_fn(output)\n\n \"\"\"\n\n def __init__(\n self,\n temperature: float = 0.5,\n memory_bank_size: int = 0,\n gather_distributed: bool = False,\n ):\n super(NTXentLoss, self).__init__(size=memory_bank_size)\n self.temperature = temperature\n self.gather_distributed = gather_distributed\n self.cross_entropy = nn.CrossEntropyLoss(reduction=\"mean\")\n self.eps = 1e-8\n\n if abs(self.temperature) < self.eps:\n raise ValueError(\n \"Illegal temperature: abs({}) < 1e-8\".format(self.temperature)\n )\n\n def forward(self, out0: torch.Tensor, out1: torch.Tensor):\n \"\"\"Forward pass through Contrastive Cross-Entropy Loss.\n\n If used with a memory bank, the samples from the memory bank are used\n as negative examples. Otherwise, within-batch samples are used as\n negative samples.\n\n Args:\n out0:\n Output projections of the first set of transformed images.\n Shape: (batch_size, embedding_size)\n out1:\n Output projections of the second set of transformed images.\n Shape: (batch_size, embedding_size)\n\n Returns:\n Contrastive Cross Entropy Loss value.\n\n \"\"\"\n\n device = out0.device\n batch_size, _ = out0.shape\n\n # normalize the output to length 1\n out0 = nn.functional.normalize(out0, dim=1)\n out1 = nn.functional.normalize(out1, dim=1)\n\n # ask memory bank for negative samples and extend it with out1 if\n # out1 requires a gradient, otherwise keep the same vectors in the\n # memory bank (this allows for keeping the memory bank constant e.g.\n # for evaluating the loss on the test set)\n # out1: shape: (batch_size, embedding_size)\n # negatives: shape: (embedding_size, memory_bank_size)\n out1, negatives = super(NTXentLoss, self).forward(\n out1, update=out0.requires_grad\n )\n\n # We use the cosine similarity, which is a dot product (einsum) here,\n # as all vectors are already normalized to unit length.\n # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.\n\n if negatives is not None:\n # use negatives from memory bank\n negatives = negatives.to(device)\n\n # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity\n # of the i-th sample in the batch to its positive pair\n sim_pos = torch.einsum(\"nc,nc->n\", out0, out1).unsqueeze(-1)\n\n # sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity\n # of the i-th sample to the j-th negative sample\n sim_neg = torch.einsum(\"nc,ck->nk\", out0, negatives)\n\n # set the labels to the first \"class\", i.e. sim_pos,\n # so that it is maximized in relation to sim_neg\n logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature\n labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)\n\n else:\n # user other samples from batch as negatives\n # and create diagonal mask that only selects similarities between\n # views of the same image\n if self.gather_distributed and dist.world_size() > 1:\n # gather hidden representations from other processes\n out0_large = torch.cat(dist.gather(out0), 0)\n out1_large = torch.cat(dist.gather(out1), 0)\n diag_mask = dist.eye_rank(batch_size, device=out0.device)\n else:\n # single process\n out0_large = out0\n out1_large = out1\n diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)\n\n # calculate similiarities\n # here n = batch_size and m = batch_size * world_size\n # the resulting vectors have shape (n, m)\n logits_00 = torch.einsum(\"nc,mc->nm\", out0, out0_large) / self.temperature\n logits_01 = torch.einsum(\"nc,mc->nm\", out0, out1_large) / self.temperature\n logits_10 = torch.einsum(\"nc,mc->nm\", out1, out0_large) / self.temperature\n logits_11 = torch.einsum(\"nc,mc->nm\", out1, out1_large) / self.temperature\n\n # remove simliarities between same views of the same image\n logits_00 = logits_00[~diag_mask].view(batch_size, -1)\n logits_11 = logits_11[~diag_mask].view(batch_size, -1)\n\n # concatenate logits\n # the logits tensor in the end has shape (2*n, 2*m-1)\n logits_0100 = torch.cat([logits_01, logits_00], dim=1)\n logits_1011 = torch.cat([logits_10, logits_11], dim=1)\n logits = torch.cat([logits_0100, logits_1011], dim=0)\n\n # create labels\n labels = torch.arange(batch_size, device=device, dtype=torch.long)\n labels = labels + dist.rank() * batch_size\n labels = labels.repeat(2)\n\n loss = self.cross_entropy(logits, labels)\n\n return loss\n", "path": "lightly/loss/ntx_ent_loss.py"}], "after_files": [{"content": "\"\"\" Contrastive Loss Functions \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nfrom torch import nn\n\nfrom lightly.loss.memory_bank import MemoryBankModule\nfrom lightly.utils import dist\n\n\nclass NTXentLoss(MemoryBankModule):\n \"\"\"Implementation of the Contrastive Cross Entropy Loss.\n\n This implementation follows the SimCLR[0] paper. If you enable the memory\n bank by setting the `memory_bank_size` value > 0 the loss behaves like\n the one described in the MoCo[1] paper.\n\n - [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709\n - [1] MoCo, 2020, https://arxiv.org/abs/1911.05722\n\n Attributes:\n temperature:\n Scale logits by the inverse of the temperature.\n memory_bank_size:\n Number of negative samples to store in the memory bank.\n Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.\n gather_distributed:\n If True then negatives from all gpus are gathered before the\n loss calculation. This flag has no effect if memory_bank_size > 0.\n\n Raises:\n ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.\n\n Examples:\n\n >>> # initialize loss function without memory bank\n >>> loss_fn = NTXentLoss(memory_bank_size=0)\n >>>\n >>> # generate two random transforms of images\n >>> t0 = transforms(images)\n >>> t1 = transforms(images)\n >>>\n >>> # feed through SimCLR or MoCo model\n >>> batch = torch.cat((t0, t1), dim=0)\n >>> output = model(batch)\n >>>\n >>> # calculate loss\n >>> loss = loss_fn(output)\n\n \"\"\"\n\n def __init__(\n self,\n temperature: float = 0.5,\n memory_bank_size: int = 0,\n gather_distributed: bool = False,\n ):\n super(NTXentLoss, self).__init__(size=memory_bank_size)\n self.temperature = temperature\n self.gather_distributed = gather_distributed\n self.cross_entropy = nn.CrossEntropyLoss(reduction=\"mean\")\n self.eps = 1e-8\n\n if abs(self.temperature) < self.eps:\n raise ValueError(\n \"Illegal temperature: abs({}) < 1e-8\".format(self.temperature)\n )\n\n def forward(self, out0: torch.Tensor, out1: torch.Tensor):\n \"\"\"Forward pass through Contrastive Cross-Entropy Loss.\n\n If used with a memory bank, the samples from the memory bank are used\n as negative examples. Otherwise, within-batch samples are used as\n negative samples.\n\n Args:\n out0:\n Output projections of the first set of transformed images.\n Shape: (batch_size, embedding_size)\n out1:\n Output projections of the second set of transformed images.\n Shape: (batch_size, embedding_size)\n\n Returns:\n Contrastive Cross Entropy Loss value.\n\n \"\"\"\n\n device = out0.device\n batch_size, _ = out0.shape\n\n # normalize the output to length 1\n out0 = nn.functional.normalize(out0, dim=1)\n out1 = nn.functional.normalize(out1, dim=1)\n\n # ask memory bank for negative samples and extend it with out1 if\n # out1 requires a gradient, otherwise keep the same vectors in the\n # memory bank (this allows for keeping the memory bank constant e.g.\n # for evaluating the loss on the test set)\n # out1: shape: (batch_size, embedding_size)\n # negatives: shape: (embedding_size, memory_bank_size)\n out1, negatives = super(NTXentLoss, self).forward(\n out1, update=out0.requires_grad\n )\n\n # We use the cosine similarity, which is a dot product (einsum) here,\n # as all vectors are already normalized to unit length.\n # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.\n\n if negatives is not None:\n # use negatives from memory bank\n negatives = negatives.to(device)\n\n # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity\n # of the i-th sample in the batch to its positive pair\n sim_pos = torch.einsum(\"nc,nc->n\", out0, out1).unsqueeze(-1)\n\n # sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity\n # of the i-th sample to the j-th negative sample\n sim_neg = torch.einsum(\"nc,ck->nk\", out0, negatives)\n\n # set the labels to the first \"class\", i.e. sim_pos,\n # so that it is maximized in relation to sim_neg\n logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature\n labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)\n\n else:\n # user other samples from batch as negatives\n # and create diagonal mask that only selects similarities between\n # views of the same image\n if self.gather_distributed and dist.world_size() > 1:\n # gather hidden representations from other processes\n out0_large = torch.cat(dist.gather(out0), 0)\n out1_large = torch.cat(dist.gather(out1), 0)\n diag_mask = dist.eye_rank(batch_size, device=out0.device)\n else:\n # single process\n out0_large = out0\n out1_large = out1\n diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)\n\n # calculate similiarities\n # here n = batch_size and m = batch_size * world_size\n # the resulting vectors have shape (n, m)\n logits_00 = torch.einsum(\"nc,mc->nm\", out0, out0_large) / self.temperature\n logits_01 = torch.einsum(\"nc,mc->nm\", out0, out1_large) / self.temperature\n logits_10 = torch.einsum(\"nc,mc->nm\", out1, out0_large) / self.temperature\n logits_11 = torch.einsum(\"nc,mc->nm\", out1, out1_large) / self.temperature\n\n # remove simliarities between same views of the same image\n logits_00 = logits_00[~diag_mask].view(batch_size, -1)\n logits_11 = logits_11[~diag_mask].view(batch_size, -1)\n\n # concatenate logits\n # the logits tensor in the end has shape (2*n, 2*m-1)\n logits_0100 = torch.cat([logits_01, logits_00], dim=1)\n logits_1011 = torch.cat([logits_10, logits_11], dim=1)\n logits = torch.cat([logits_0100, logits_1011], dim=0)\n\n # create labels\n labels = torch.arange(batch_size, device=device, dtype=torch.long)\n if self.gather_distributed:\n labels = labels + dist.rank() * batch_size\n labels = labels.repeat(2)\n\n loss = self.cross_entropy(logits, labels)\n\n return loss\n", "path": "lightly/loss/ntx_ent_loss.py"}]} |
gh_patches_debug_1448 | rasdani/github-patches | git_diff | mars-project__mars-291 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Chinese document layout has a link error.
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
A clear and concise description of what the bug is.
Chinese document layout has a connection error.
doc link:[https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html](https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/norm_zh.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """
18 This file folds Chinese po files by hacking babel.messages.pofile.normalize
19 using jieba text segment library instead of regex
20 """
21
22 import datetime
23 import os
24
25 from babel.messages import pofile
26 from babel.messages.pofile import escape
27
28
29 def _zh_len(s):
30 """
31 Calculate text length in Chinese
32 """
33 try:
34 return len(s.encode('gb2312'))
35 except ValueError:
36 return len(s)
37
38
39 def _zh_split(s):
40 """
41 Split text length in Chinese
42 """
43 import jieba
44 try:
45 s.encode('ascii')
46 has_zh = False
47 except ValueError:
48 has_zh = True
49
50 if has_zh:
51 return list(jieba.cut(s))
52 else:
53 return pofile.WORD_SEP.split(s)
54
55
56 # code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)
57 def _normalize(string, prefix='', width=76):
58 r"""Convert a string into a format that is appropriate for .po files.
59 >>> print(normalize('''Say:
60 ... "hello, world!"
61 ... ''', width=None))
62 ""
63 "Say:\n"
64 " \"hello, world!\"\n"
65 >>> print(normalize('''Say:
66 ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
67 ... ''', width=32))
68 ""
69 "Say:\n"
70 " \"Lorem ipsum dolor sit "
71 "amet, consectetur adipisicing"
72 " elit, \"\n"
73 :param string: the string to normalize
74 :param prefix: a string that should be prepended to every line
75 :param width: the maximum line width; use `None`, 0, or a negative number
76 to completely disable line wrapping
77 """
78
79 if width and width > 0:
80 prefixlen = _zh_len(prefix)
81 lines = []
82 for line in string.splitlines(True):
83 if _zh_len(escape(line)) + prefixlen > width:
84 chunks = _zh_split(line)
85 chunks.reverse()
86 while chunks:
87 buf = []
88 size = 2
89 while chunks:
90 l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741
91 if size + l < width:
92 buf.append(chunks.pop())
93 size += l
94 else:
95 if not buf:
96 # handle long chunks by putting them on a
97 # separate line
98 buf.append(chunks.pop())
99 break
100 lines.append(u''.join(buf))
101 else:
102 lines.append(line)
103 else:
104 lines = string.splitlines(True)
105
106 if len(lines) <= 1:
107 return escape(string)
108
109 # Remove empty trailing line
110 if lines and not lines[-1]:
111 del lines[-1]
112 lines[-1] += '\n'
113 return u'""\n' + u'\n'.join([(prefix + escape(line)) for line in lines])
114
115
116 def main():
117 try:
118 import jieba # noqa: F401
119 except ImportError:
120 return
121
122 pofile.normalize = _normalize
123 for root, dirs, files in os.walk('.'):
124 if 'zh' not in root:
125 continue
126 for f in files:
127 if not f.endswith('.po'):
128 continue
129 path = os.path.join(root, f)
130
131 # only modify recent-changed files
132 modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))
133 if (datetime.datetime.now() - modify_time).total_seconds() > 1800:
134 continue
135
136 with open(path, 'rb') as inpf:
137 catalog = pofile.read_po(inpf)
138 with open(path, 'wb') as outf:
139 pofile.write_po(outf, catalog)
140
141
142 if __name__ == '__main__':
143 main()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/norm_zh.py b/docs/source/norm_zh.py
--- a/docs/source/norm_zh.py
+++ b/docs/source/norm_zh.py
@@ -130,7 +130,7 @@
# only modify recent-changed files
modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))
- if (datetime.datetime.now() - modify_time).total_seconds() > 1800:
+ if (datetime.datetime.now() - modify_time).total_seconds() > 120:
continue
with open(path, 'rb') as inpf:
| {"golden_diff": "diff --git a/docs/source/norm_zh.py b/docs/source/norm_zh.py\n--- a/docs/source/norm_zh.py\n+++ b/docs/source/norm_zh.py\n@@ -130,7 +130,7 @@\n \n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n- if (datetime.datetime.now() - modify_time).total_seconds() > 1800:\n+ if (datetime.datetime.now() - modify_time).total_seconds() > 120:\n continue\n \n with open(path, 'rb') as inpf:\n", "issue": "[BUG] Chinese document layout has a link error.\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\nChinese document layout has a connection error.\r\ndoc link:[https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html](https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html)\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file folds Chinese po files by hacking babel.messages.pofile.normalize\nusing jieba text segment library instead of regex\n\"\"\"\n\nimport datetime\nimport os\n\nfrom babel.messages import pofile\nfrom babel.messages.pofile import escape\n\n\ndef _zh_len(s):\n \"\"\"\n Calculate text length in Chinese\n \"\"\"\n try:\n return len(s.encode('gb2312'))\n except ValueError:\n return len(s)\n\n\ndef _zh_split(s):\n \"\"\"\n Split text length in Chinese\n \"\"\"\n import jieba\n try:\n s.encode('ascii')\n has_zh = False\n except ValueError:\n has_zh = True\n\n if has_zh:\n return list(jieba.cut(s))\n else:\n return pofile.WORD_SEP.split(s)\n\n\n# code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)\ndef _normalize(string, prefix='', width=76):\n r\"\"\"Convert a string into a format that is appropriate for .po files.\n >>> print(normalize('''Say:\n ... \"hello, world!\"\n ... ''', width=None))\n \"\"\n \"Say:\\n\"\n \" \\\"hello, world!\\\"\\n\"\n >>> print(normalize('''Say:\n ... \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n ... ''', width=32))\n \"\"\n \"Say:\\n\"\n \" \\\"Lorem ipsum dolor sit \"\n \"amet, consectetur adipisicing\"\n \" elit, \\\"\\n\"\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n \"\"\"\n\n if width and width > 0:\n prefixlen = _zh_len(prefix)\n lines = []\n for line in string.splitlines(True):\n if _zh_len(escape(line)) + prefixlen > width:\n chunks = _zh_split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741\n if size + l < width:\n buf.append(chunks.pop())\n size += l\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(u''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\\n'\n return u'\"\"\\n' + u'\\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef main():\n try:\n import jieba # noqa: F401\n except ImportError:\n return\n\n pofile.normalize = _normalize\n for root, dirs, files in os.walk('.'):\n if 'zh' not in root:\n continue\n for f in files:\n if not f.endswith('.po'):\n continue\n path = os.path.join(root, f)\n\n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n if (datetime.datetime.now() - modify_time).total_seconds() > 1800:\n continue\n\n with open(path, 'rb') as inpf:\n catalog = pofile.read_po(inpf)\n with open(path, 'wb') as outf:\n pofile.write_po(outf, catalog)\n\n\nif __name__ == '__main__':\n main()\n", "path": "docs/source/norm_zh.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file folds Chinese po files by hacking babel.messages.pofile.normalize\nusing jieba text segment library instead of regex\n\"\"\"\n\nimport datetime\nimport os\n\nfrom babel.messages import pofile\nfrom babel.messages.pofile import escape\n\n\ndef _zh_len(s):\n \"\"\"\n Calculate text length in Chinese\n \"\"\"\n try:\n return len(s.encode('gb2312'))\n except ValueError:\n return len(s)\n\n\ndef _zh_split(s):\n \"\"\"\n Split text length in Chinese\n \"\"\"\n import jieba\n try:\n s.encode('ascii')\n has_zh = False\n except ValueError:\n has_zh = True\n\n if has_zh:\n return list(jieba.cut(s))\n else:\n return pofile.WORD_SEP.split(s)\n\n\n# code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)\ndef _normalize(string, prefix='', width=76):\n r\"\"\"Convert a string into a format that is appropriate for .po files.\n >>> print(normalize('''Say:\n ... \"hello, world!\"\n ... ''', width=None))\n \"\"\n \"Say:\\n\"\n \" \\\"hello, world!\\\"\\n\"\n >>> print(normalize('''Say:\n ... \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n ... ''', width=32))\n \"\"\n \"Say:\\n\"\n \" \\\"Lorem ipsum dolor sit \"\n \"amet, consectetur adipisicing\"\n \" elit, \\\"\\n\"\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n \"\"\"\n\n if width and width > 0:\n prefixlen = _zh_len(prefix)\n lines = []\n for line in string.splitlines(True):\n if _zh_len(escape(line)) + prefixlen > width:\n chunks = _zh_split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741\n if size + l < width:\n buf.append(chunks.pop())\n size += l\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(u''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\\n'\n return u'\"\"\\n' + u'\\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef main():\n try:\n import jieba # noqa: F401\n except ImportError:\n return\n\n pofile.normalize = _normalize\n for root, dirs, files in os.walk('.'):\n if 'zh' not in root:\n continue\n for f in files:\n if not f.endswith('.po'):\n continue\n path = os.path.join(root, f)\n\n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n if (datetime.datetime.now() - modify_time).total_seconds() > 120:\n continue\n\n with open(path, 'rb') as inpf:\n catalog = pofile.read_po(inpf)\n with open(path, 'wb') as outf:\n pofile.write_po(outf, catalog)\n\n\nif __name__ == '__main__':\n main()\n", "path": "docs/source/norm_zh.py"}]} |
gh_patches_debug_1449 | rasdani/github-patches | git_diff | keras-team__keras-7552 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Masking a layer that has an integer dtype raises an error in TensorFlow but not Theano.
The following:
```python
from keras.layers import Input, Masking
document = Input(shape = (10, ), dtype = "int32")
mask = Masking(mask_value = 21)
document_mask = mask(document)
```
produces this error:
```
----> 5 document_mask = mask(document)
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/keras/engine/topology.py in __call__(self, inputs, **kwargs)
594
595 # Actually call the layer, collecting output(s), mask(s), and shape(s).
--> 596 output = self.call(inputs, **kwargs)
597 output_mask = self.compute_mask(inputs, previous_mask)
598
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/keras/layers/core.py in call(self, inputs)
62 boolean_mask = K.any(K.not_equal(inputs, self.mask_value),
63 axis=-1, keepdims=True)
---> 64 return inputs * K.cast(boolean_mask, K.floatx())
65
66 def get_config(self):
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y)
827 if not isinstance(y, sparse_tensor.SparseTensor):
828 try:
--> 829 y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
830 except TypeError:
831 # If the RHS is not a tensor, it might be a tensor aware object
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)
674 name=name,
675 preferred_dtype=preferred_dtype,
--> 676 as_ref=False)
677
678
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
739
740 if ret is None:
--> 741 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
742
743 if ret is NotImplemented:
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)
612 raise ValueError(
613 "Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
--> 614 % (dtype.name, t.dtype.name, str(t)))
615 return t
616
ValueError: Tensor conversion requested dtype int32 for Tensor with dtype float32: 'Tensor("masking_1/Cast_1:0", shape=(?, 1), dtype=float32)'
```
when using TensorFlow as the backend, but works fine with Theano. The issue seems to be that [Keras casts the mask to a float](https://github.com/fchollet/keras/blob/master/keras/layers/core.py#L64), even when the inputs are not floats themselves. Changing the return value to:
```python
inputs * K.cast(boolean_mask, inputs.dtype)
```
fixes the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/layers/core.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3 from __future__ import division
4
5 import numpy as np
6
7 import copy
8 import types as python_types
9 import warnings
10
11 from .. import backend as K
12 from .. import activations
13 from .. import initializers
14 from .. import regularizers
15 from .. import constraints
16 from ..engine import InputSpec
17 from ..engine import Layer
18 from ..utils.generic_utils import func_dump
19 from ..utils.generic_utils import func_load
20 from ..utils.generic_utils import deserialize_keras_object
21 from ..utils.generic_utils import has_arg
22 from ..legacy import interfaces
23
24
25 class Masking(Layer):
26 """Masks a sequence by using a mask value to skip timesteps.
27
28 For each timestep in the input tensor (dimension #1 in the tensor),
29 if all values in the input tensor at that timestep
30 are equal to `mask_value`, then the timestep will be masked (skipped)
31 in all downstream layers (as long as they support masking).
32
33 If any downstream layer does not support masking yet receives such
34 an input mask, an exception will be raised.
35
36 # Example
37
38 Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
39 to be fed to a LSTM layer.
40 You want to mask timestep #3 and #5 because you lack data for
41 these timesteps. You can:
42
43 - set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
44 - insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
45
46 ```python
47 model = Sequential()
48 model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
49 model.add(LSTM(32))
50 ```
51 """
52
53 def __init__(self, mask_value=0., **kwargs):
54 super(Masking, self).__init__(**kwargs)
55 self.supports_masking = True
56 self.mask_value = mask_value
57
58 def compute_mask(self, inputs, mask=None):
59 return K.any(K.not_equal(inputs, self.mask_value), axis=-1)
60
61 def call(self, inputs):
62 boolean_mask = K.any(K.not_equal(inputs, self.mask_value),
63 axis=-1, keepdims=True)
64 return inputs * K.cast(boolean_mask, K.floatx())
65
66 def get_config(self):
67 config = {'mask_value': self.mask_value}
68 base_config = super(Masking, self).get_config()
69 return dict(list(base_config.items()) + list(config.items()))
70
71
72 class Dropout(Layer):
73 """Applies Dropout to the input.
74
75 Dropout consists in randomly setting
76 a fraction `rate` of input units to 0 at each update during training time,
77 which helps prevent overfitting.
78
79 # Arguments
80 rate: float between 0 and 1. Fraction of the input units to drop.
81 noise_shape: 1D integer tensor representing the shape of the
82 binary dropout mask that will be multiplied with the input.
83 For instance, if your inputs have shape
84 `(batch_size, timesteps, features)` and
85 you want the dropout mask to be the same for all timesteps,
86 you can use `noise_shape=(batch_size, 1, features)`.
87 seed: A Python integer to use as random seed.
88
89 # References
90 - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
91 """
92 @interfaces.legacy_dropout_support
93 def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
94 super(Dropout, self).__init__(**kwargs)
95 self.rate = min(1., max(0., rate))
96 self.noise_shape = noise_shape
97 self.seed = seed
98 self.supports_masking = True
99
100 def _get_noise_shape(self, _):
101 return self.noise_shape
102
103 def call(self, inputs, training=None):
104 if 0. < self.rate < 1.:
105 noise_shape = self._get_noise_shape(inputs)
106
107 def dropped_inputs():
108 return K.dropout(inputs, self.rate, noise_shape,
109 seed=self.seed)
110 return K.in_train_phase(dropped_inputs, inputs,
111 training=training)
112 return inputs
113
114 def get_config(self):
115 config = {'rate': self.rate}
116 base_config = super(Dropout, self).get_config()
117 return dict(list(base_config.items()) + list(config.items()))
118
119
120 class SpatialDropout1D(Dropout):
121 """Spatial 1D version of Dropout.
122
123 This version performs the same function as Dropout, however it drops
124 entire 1D feature maps instead of individual elements. If adjacent frames
125 within feature maps are strongly correlated (as is normally the case in
126 early convolution layers) then regular dropout will not regularize the
127 activations and will otherwise just result in an effective learning rate
128 decrease. In this case, SpatialDropout1D will help promote independence
129 between feature maps and should be used instead.
130
131 # Arguments
132 rate: float between 0 and 1. Fraction of the input units to drop.
133
134 # Input shape
135 3D tensor with shape:
136 `(samples, timesteps, channels)`
137
138 # Output shape
139 Same as input
140
141 # References
142 - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
143 """
144
145 @interfaces.legacy_spatialdropout1d_support
146 def __init__(self, rate, **kwargs):
147 super(SpatialDropout1D, self).__init__(rate, **kwargs)
148 self.input_spec = InputSpec(ndim=3)
149
150 def _get_noise_shape(self, inputs):
151 input_shape = K.shape(inputs)
152 noise_shape = (input_shape[0], 1, input_shape[2])
153 return noise_shape
154
155
156 class SpatialDropout2D(Dropout):
157 """Spatial 2D version of Dropout.
158
159 This version performs the same function as Dropout, however it drops
160 entire 2D feature maps instead of individual elements. If adjacent pixels
161 within feature maps are strongly correlated (as is normally the case in
162 early convolution layers) then regular dropout will not regularize the
163 activations and will otherwise just result in an effective learning rate
164 decrease. In this case, SpatialDropout2D will help promote independence
165 between feature maps and should be used instead.
166
167 # Arguments
168 rate: float between 0 and 1. Fraction of the input units to drop.
169 data_format: 'channels_first' or 'channels_last'.
170 In 'channels_first' mode, the channels dimension
171 (the depth) is at index 1,
172 in 'channels_last' mode is it at index 3.
173 It defaults to the `image_data_format` value found in your
174 Keras config file at `~/.keras/keras.json`.
175 If you never set it, then it will be "channels_last".
176
177 # Input shape
178 4D tensor with shape:
179 `(samples, channels, rows, cols)` if data_format='channels_first'
180 or 4D tensor with shape:
181 `(samples, rows, cols, channels)` if data_format='channels_last'.
182
183 # Output shape
184 Same as input
185
186 # References
187 - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
188 """
189
190 @interfaces.legacy_spatialdropoutNd_support
191 def __init__(self, rate, data_format=None, **kwargs):
192 super(SpatialDropout2D, self).__init__(rate, **kwargs)
193 if data_format is None:
194 data_format = K.image_data_format()
195 if data_format not in {'channels_last', 'channels_first'}:
196 raise ValueError('`data_format` must be in '
197 '{`"channels_last"`, `"channels_first"`}')
198 self.data_format = data_format
199 self.input_spec = InputSpec(ndim=4)
200
201 def _get_noise_shape(self, inputs):
202 input_shape = K.shape(inputs)
203 if self.data_format == 'channels_first':
204 noise_shape = (input_shape[0], input_shape[1], 1, 1)
205 else:
206 noise_shape = (input_shape[0], 1, 1, input_shape[3])
207 return noise_shape
208
209
210 class SpatialDropout3D(Dropout):
211 """Spatial 3D version of Dropout.
212
213 This version performs the same function as Dropout, however it drops
214 entire 3D feature maps instead of individual elements. If adjacent voxels
215 within feature maps are strongly correlated (as is normally the case in
216 early convolution layers) then regular dropout will not regularize the
217 activations and will otherwise just result in an effective learning rate
218 decrease. In this case, SpatialDropout3D will help promote independence
219 between feature maps and should be used instead.
220
221 # Arguments
222 rate: float between 0 and 1. Fraction of the input units to drop.
223 data_format: 'channels_first' or 'channels_last'.
224 In 'channels_first' mode, the channels dimension (the depth)
225 is at index 1, in 'channels_last' mode is it at index 4.
226 It defaults to the `image_data_format` value found in your
227 Keras config file at `~/.keras/keras.json`.
228 If you never set it, then it will be "channels_last".
229
230 # Input shape
231 5D tensor with shape:
232 `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
233 or 5D tensor with shape:
234 `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
235
236 # Output shape
237 Same as input
238
239 # References
240 - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
241 """
242
243 @interfaces.legacy_spatialdropoutNd_support
244 def __init__(self, rate, data_format=None, **kwargs):
245 super(SpatialDropout3D, self).__init__(rate, **kwargs)
246 if data_format is None:
247 data_format = K.image_data_format()
248 if data_format not in {'channels_last', 'channels_first'}:
249 raise ValueError('`data_format` must be in '
250 '{`"channels_last"`, `"channels_first"`}')
251 self.data_format = data_format
252 self.input_spec = InputSpec(ndim=5)
253
254 def _get_noise_shape(self, inputs):
255 input_shape = K.shape(inputs)
256 if self.data_format == 'channels_first':
257 noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)
258 else:
259 noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])
260 return noise_shape
261
262
263 class Activation(Layer):
264 """Applies an activation function to an output.
265
266 # Arguments
267 activation: name of activation function to use
268 (see: [activations](../activations.md)),
269 or alternatively, a Theano or TensorFlow operation.
270
271 # Input shape
272 Arbitrary. Use the keyword argument `input_shape`
273 (tuple of integers, does not include the samples axis)
274 when using this layer as the first layer in a model.
275
276 # Output shape
277 Same shape as input.
278 """
279
280 def __init__(self, activation, **kwargs):
281 super(Activation, self).__init__(**kwargs)
282 self.supports_masking = True
283 self.activation = activations.get(activation)
284
285 def call(self, inputs):
286 return self.activation(inputs)
287
288 def get_config(self):
289 config = {'activation': activations.serialize(self.activation)}
290 base_config = super(Activation, self).get_config()
291 return dict(list(base_config.items()) + list(config.items()))
292
293
294 class Reshape(Layer):
295 """Reshapes an output to a certain shape.
296
297 # Arguments
298 target_shape: target shape. Tuple of integers.
299 Does not include the batch axis.
300
301 # Input shape
302 Arbitrary, although all dimensions in the input shaped must be fixed.
303 Use the keyword argument `input_shape`
304 (tuple of integers, does not include the batch axis)
305 when using this layer as the first layer in a model.
306
307 # Output shape
308 `(batch_size,) + target_shape`
309
310 # Example
311
312 ```python
313 # as first layer in a Sequential model
314 model = Sequential()
315 model.add(Reshape((3, 4), input_shape=(12,)))
316 # now: model.output_shape == (None, 3, 4)
317 # note: `None` is the batch dimension
318
319 # as intermediate layer in a Sequential model
320 model.add(Reshape((6, 2)))
321 # now: model.output_shape == (None, 6, 2)
322
323 # also supports shape inference using `-1` as dimension
324 model.add(Reshape((-1, 2, 2)))
325 # now: model.output_shape == (None, 3, 2, 2)
326 ```
327 """
328
329 def __init__(self, target_shape, **kwargs):
330 super(Reshape, self).__init__(**kwargs)
331 self.target_shape = tuple(target_shape)
332
333 def _fix_unknown_dimension(self, input_shape, output_shape):
334 """Finds and replaces a missing dimension in an output shape.
335
336 This is a near direct port of the internal Numpy function
337 `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
338
339 # Arguments
340 input_shape: original shape of array being reshaped
341 output_shape: target shape of the array, with at most
342 a single -1 which indicates a dimension that should be
343 derived from the input shape.
344
345 # Returns
346 The new output shape with a `-1` replaced with its computed value.
347
348 # Raises
349 ValueError: if `input_shape` and `output_shape` do not match.
350 """
351 output_shape = list(output_shape)
352 msg = 'total size of new array must be unchanged'
353
354 known, unknown = 1, None
355 for index, dim in enumerate(output_shape):
356 if dim < 0:
357 if unknown is None:
358 unknown = index
359 else:
360 raise ValueError('Can only specify one unknown dimension.')
361 else:
362 known *= dim
363
364 original = np.prod(input_shape, dtype=int)
365 if unknown is not None:
366 if known == 0 or original % known != 0:
367 raise ValueError(msg)
368 output_shape[unknown] = original // known
369 elif original != known:
370 raise ValueError(msg)
371
372 return tuple(output_shape)
373
374 def compute_output_shape(self, input_shape):
375 return (input_shape[0],) + self._fix_unknown_dimension(
376 input_shape[1:], self.target_shape)
377
378 def call(self, inputs):
379 # In case the target shape is not fully defined,
380 # we need access to the shape of `inputs`.
381 # solution: rely on `K.int_shape`.
382 target_shape = self.target_shape
383 if -1 in target_shape:
384 # Target shape not fully defined.
385 input_shape = None
386 try:
387 input_shape = K.int_shape(inputs)
388 except TypeError:
389 pass
390 if input_shape is not None:
391 target_shape = self.compute_output_shape(input_shape)[1:]
392 return K.reshape(inputs, (-1,) + target_shape)
393
394 def get_config(self):
395 config = {'target_shape': self.target_shape}
396 base_config = super(Reshape, self).get_config()
397 return dict(list(base_config.items()) + list(config.items()))
398
399
400 class Permute(Layer):
401 """Permutes the dimensions of the input according to a given pattern.
402
403 Useful for e.g. connecting RNNs and convnets together.
404
405 # Example
406
407 ```python
408 model = Sequential()
409 model.add(Permute((2, 1), input_shape=(10, 64)))
410 # now: model.output_shape == (None, 64, 10)
411 # note: `None` is the batch dimension
412 ```
413
414 # Arguments
415 dims: Tuple of integers. Permutation pattern, does not include the
416 samples dimension. Indexing starts at 1.
417 For instance, `(2, 1)` permutes the first and second dimension
418 of the input.
419
420 # Input shape
421 Arbitrary. Use the keyword argument `input_shape`
422 (tuple of integers, does not include the samples axis)
423 when using this layer as the first layer in a model.
424
425 # Output shape
426 Same as the input shape, but with the dimensions re-ordered according
427 to the specified pattern.
428 """
429
430 def __init__(self, dims, **kwargs):
431 super(Permute, self).__init__(**kwargs)
432 self.dims = tuple(dims)
433 self.input_spec = InputSpec(ndim=len(self.dims) + 1)
434
435 def compute_output_shape(self, input_shape):
436 input_shape = list(input_shape)
437 output_shape = copy.copy(input_shape)
438 for i, dim in enumerate(self.dims):
439 target_dim = input_shape[dim]
440 output_shape[i + 1] = target_dim
441 return tuple(output_shape)
442
443 def call(self, inputs):
444 return K.permute_dimensions(inputs, (0,) + self.dims)
445
446 def get_config(self):
447 config = {'dims': self.dims}
448 base_config = super(Permute, self).get_config()
449 return dict(list(base_config.items()) + list(config.items()))
450
451
452 class Flatten(Layer):
453 """Flattens the input. Does not affect the batch size.
454
455 # Example
456
457 ```python
458 model = Sequential()
459 model.add(Conv2D(64, 3, 3,
460 border_mode='same',
461 input_shape=(3, 32, 32)))
462 # now: model.output_shape == (None, 64, 32, 32)
463
464 model.add(Flatten())
465 # now: model.output_shape == (None, 65536)
466 ```
467 """
468
469 def __init__(self, **kwargs):
470 super(Flatten, self).__init__(**kwargs)
471 self.input_spec = InputSpec(min_ndim=3)
472
473 def compute_output_shape(self, input_shape):
474 if not all(input_shape[1:]):
475 raise ValueError('The shape of the input to "Flatten" '
476 'is not fully defined '
477 '(got ' + str(input_shape[1:]) + '. '
478 'Make sure to pass a complete "input_shape" '
479 'or "batch_input_shape" argument to the first '
480 'layer in your model.')
481 return (input_shape[0], np.prod(input_shape[1:]))
482
483 def call(self, inputs):
484 return K.batch_flatten(inputs)
485
486
487 class RepeatVector(Layer):
488 """Repeats the input n times.
489
490 # Example
491
492 ```python
493 model = Sequential()
494 model.add(Dense(32, input_dim=32))
495 # now: model.output_shape == (None, 32)
496 # note: `None` is the batch dimension
497
498 model.add(RepeatVector(3))
499 # now: model.output_shape == (None, 3, 32)
500 ```
501
502 # Arguments
503 n: integer, repetition factor.
504
505 # Input shape
506 2D tensor of shape `(num_samples, features)`.
507
508 # Output shape
509 3D tensor of shape `(num_samples, n, features)`.
510 """
511
512 def __init__(self, n, **kwargs):
513 super(RepeatVector, self).__init__(**kwargs)
514 self.n = n
515 self.input_spec = InputSpec(ndim=2)
516
517 def compute_output_shape(self, input_shape):
518 return (input_shape[0], self.n, input_shape[1])
519
520 def call(self, inputs):
521 return K.repeat(inputs, self.n)
522
523 def get_config(self):
524 config = {'n': self.n}
525 base_config = super(RepeatVector, self).get_config()
526 return dict(list(base_config.items()) + list(config.items()))
527
528
529 class Lambda(Layer):
530 """Wraps arbitrary expression as a `Layer` object.
531
532 # Examples
533
534 ```python
535 # add a x -> x^2 layer
536 model.add(Lambda(lambda x: x ** 2))
537 ```
538 ```python
539 # add a layer that returns the concatenation
540 # of the positive part of the input and
541 # the opposite of the negative part
542
543 def antirectifier(x):
544 x -= K.mean(x, axis=1, keepdims=True)
545 x = K.l2_normalize(x, axis=1)
546 pos = K.relu(x)
547 neg = K.relu(-x)
548 return K.concatenate([pos, neg], axis=1)
549
550 def antirectifier_output_shape(input_shape):
551 shape = list(input_shape)
552 assert len(shape) == 2 # only valid for 2D tensors
553 shape[-1] *= 2
554 return tuple(shape)
555
556 model.add(Lambda(antirectifier,
557 output_shape=antirectifier_output_shape))
558 ```
559
560 # Arguments
561 function: The function to be evaluated.
562 Takes input tensor as first argument.
563 output_shape: Expected output shape from function.
564 Only relevant when using Theano.
565 Can be a tuple or function.
566 If a tuple, it only specifies the first dimension onward;
567 sample dimension is assumed either the same as the input:
568 `output_shape = (input_shape[0], ) + output_shape`
569 or, the input is `None` and
570 the sample dimension is also `None`:
571 `output_shape = (None, ) + output_shape`
572 If a function, it specifies the entire shape as a function of the
573 input shape: `output_shape = f(input_shape)`
574 arguments: optional dictionary of keyword arguments to be passed
575 to the function.
576
577 # Input shape
578 Arbitrary. Use the keyword argument input_shape
579 (tuple of integers, does not include the samples axis)
580 when using this layer as the first layer in a model.
581
582 # Output shape
583 Specified by `output_shape` argument
584 (or auto-inferred when using TensorFlow).
585 """
586
587 @interfaces.legacy_lambda_support
588 def __init__(self, function, output_shape=None,
589 mask=None, arguments=None, **kwargs):
590 super(Lambda, self).__init__(**kwargs)
591 self.function = function
592 self.arguments = arguments if arguments else {}
593 if mask is not None:
594 self.supports_masking = True
595 self.mask = mask
596
597 if output_shape is None:
598 self._output_shape = None
599 elif isinstance(output_shape, (tuple, list)):
600 self._output_shape = tuple(output_shape)
601 else:
602 if not callable(output_shape):
603 raise TypeError('In Lambda, `output_shape` '
604 'must be a list, a tuple, or a function.')
605 self._output_shape = output_shape
606
607 def compute_output_shape(self, input_shape):
608 if self._output_shape is None:
609 # With TensorFlow, we can infer the output shape directly:
610 if K.backend() == 'tensorflow':
611 if isinstance(input_shape, list):
612 xs = [K.placeholder(shape=shape) for shape in input_shape]
613 x = self.call(xs)
614 else:
615 x = K.placeholder(shape=input_shape)
616 x = self.call(x)
617 if isinstance(x, list):
618 return [K.int_shape(x_elem) for x_elem in x]
619 else:
620 return K.int_shape(x)
621 # Otherwise, we default to the input shape.
622 warnings.warn('`output_shape` argument not specified for layer {} '
623 'and cannot be automatically inferred '
624 'with the Theano backend. '
625 'Defaulting to output shape `{}` '
626 '(same as input shape). '
627 'If the expected output shape is different, '
628 'specify it via the `output_shape` argument.'
629 .format(self.name, input_shape))
630 return input_shape
631 elif isinstance(self._output_shape, (tuple, list)):
632 if isinstance(input_shape, list):
633 num_samples = input_shape[0][0]
634 else:
635 num_samples = input_shape[0] if input_shape else None
636 return (num_samples,) + tuple(self._output_shape)
637 else:
638 shape = self._output_shape(input_shape)
639 if not isinstance(shape, (list, tuple)):
640 raise ValueError('`output_shape` function must return a tuple or a list of tuples.')
641 if isinstance(shape, list):
642 if isinstance(shape[0], int) or shape[0] is None:
643 shape = tuple(shape)
644 return shape
645
646 def call(self, inputs, mask=None):
647 arguments = self.arguments
648 if has_arg(self.function, 'mask'):
649 arguments['mask'] = mask
650 return self.function(inputs, **arguments)
651
652 def compute_mask(self, inputs, mask=None):
653 if callable(self.mask):
654 return self.mask(inputs, mask)
655 return self.mask
656
657 def get_config(self):
658 if isinstance(self.function, python_types.LambdaType):
659 function = func_dump(self.function)
660 function_type = 'lambda'
661 else:
662 function = self.function.__name__
663 function_type = 'function'
664
665 if isinstance(self._output_shape, python_types.LambdaType):
666 output_shape = func_dump(self._output_shape)
667 output_shape_type = 'lambda'
668 elif callable(self._output_shape):
669 output_shape = self._output_shape.__name__
670 output_shape_type = 'function'
671 else:
672 output_shape = self._output_shape
673 output_shape_type = 'raw'
674
675 config = {'function': function,
676 'function_type': function_type,
677 'output_shape': output_shape,
678 'output_shape_type': output_shape_type,
679 'arguments': self.arguments}
680 base_config = super(Lambda, self).get_config()
681 return dict(list(base_config.items()) + list(config.items()))
682
683 @classmethod
684 def from_config(cls, config, custom_objects=None):
685 globs = globals()
686 if custom_objects:
687 globs = dict(list(globs.items()) + list(custom_objects.items()))
688 function_type = config.pop('function_type')
689 if function_type == 'function':
690 # Simple lookup in custom objects
691 function = deserialize_keras_object(
692 config['function'],
693 custom_objects=custom_objects,
694 printable_module_name='function in Lambda layer')
695 elif function_type == 'lambda':
696 # Unsafe deserialization from bytecode
697 function = func_load(config['function'], globs=globs)
698 else:
699 raise TypeError('Unknown function type:', function_type)
700
701 output_shape_type = config.pop('output_shape_type')
702 if output_shape_type == 'function':
703 # Simple lookup in custom objects
704 output_shape = deserialize_keras_object(
705 config['output_shape'],
706 custom_objects=custom_objects,
707 printable_module_name='output_shape function in Lambda layer')
708 elif output_shape_type == 'lambda':
709 # Unsafe deserialization from bytecode
710 output_shape = func_load(config['output_shape'], globs=globs)
711 else:
712 output_shape = config['output_shape']
713
714 # If arguments were numpy array, they have been saved as
715 # list. We need to recover the ndarray
716 if 'arguments' in config:
717 for key in config['arguments']:
718 if isinstance(config['arguments'][key], dict):
719 arg_dict = config['arguments'][key]
720 if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
721 # Overwrite the argument with its numpy translation
722 config['arguments'][key] = np.array(arg_dict['value'])
723
724 config['function'] = function
725 config['output_shape'] = output_shape
726 return cls(**config)
727
728
729 class Dense(Layer):
730 """Just your regular densely-connected NN layer.
731
732 `Dense` implements the operation:
733 `output = activation(dot(input, kernel) + bias)`
734 where `activation` is the element-wise activation function
735 passed as the `activation` argument, `kernel` is a weights matrix
736 created by the layer, and `bias` is a bias vector created by the layer
737 (only applicable if `use_bias` is `True`).
738
739 Note: if the input to the layer has a rank greater than 2, then
740 it is flattened prior to the initial dot product with `kernel`.
741
742 # Example
743
744 ```python
745 # as first layer in a sequential model:
746 model = Sequential()
747 model.add(Dense(32, input_shape=(16,)))
748 # now the model will take as input arrays of shape (*, 16)
749 # and output arrays of shape (*, 32)
750
751 # after the first layer, you don't need to specify
752 # the size of the input anymore:
753 model.add(Dense(32))
754 ```
755
756 # Arguments
757 units: Positive integer, dimensionality of the output space.
758 activation: Activation function to use
759 (see [activations](../activations.md)).
760 If you don't specify anything, no activation is applied
761 (ie. "linear" activation: `a(x) = x`).
762 use_bias: Boolean, whether the layer uses a bias vector.
763 kernel_initializer: Initializer for the `kernel` weights matrix
764 (see [initializers](../initializers.md)).
765 bias_initializer: Initializer for the bias vector
766 (see [initializers](../initializers.md)).
767 kernel_regularizer: Regularizer function applied to
768 the `kernel` weights matrix
769 (see [regularizer](../regularizers.md)).
770 bias_regularizer: Regularizer function applied to the bias vector
771 (see [regularizer](../regularizers.md)).
772 activity_regularizer: Regularizer function applied to
773 the output of the layer (its "activation").
774 (see [regularizer](../regularizers.md)).
775 kernel_constraint: Constraint function applied to
776 the `kernel` weights matrix
777 (see [constraints](../constraints.md)).
778 bias_constraint: Constraint function applied to the bias vector
779 (see [constraints](../constraints.md)).
780
781 # Input shape
782 nD tensor with shape: `(batch_size, ..., input_dim)`.
783 The most common situation would be
784 a 2D input with shape `(batch_size, input_dim)`.
785
786 # Output shape
787 nD tensor with shape: `(batch_size, ..., units)`.
788 For instance, for a 2D input with shape `(batch_size, input_dim)`,
789 the output would have shape `(batch_size, units)`.
790 """
791
792 @interfaces.legacy_dense_support
793 def __init__(self, units,
794 activation=None,
795 use_bias=True,
796 kernel_initializer='glorot_uniform',
797 bias_initializer='zeros',
798 kernel_regularizer=None,
799 bias_regularizer=None,
800 activity_regularizer=None,
801 kernel_constraint=None,
802 bias_constraint=None,
803 **kwargs):
804 if 'input_shape' not in kwargs and 'input_dim' in kwargs:
805 kwargs['input_shape'] = (kwargs.pop('input_dim'),)
806 super(Dense, self).__init__(**kwargs)
807 self.units = units
808 self.activation = activations.get(activation)
809 self.use_bias = use_bias
810 self.kernel_initializer = initializers.get(kernel_initializer)
811 self.bias_initializer = initializers.get(bias_initializer)
812 self.kernel_regularizer = regularizers.get(kernel_regularizer)
813 self.bias_regularizer = regularizers.get(bias_regularizer)
814 self.activity_regularizer = regularizers.get(activity_regularizer)
815 self.kernel_constraint = constraints.get(kernel_constraint)
816 self.bias_constraint = constraints.get(bias_constraint)
817 self.input_spec = InputSpec(min_ndim=2)
818 self.supports_masking = True
819
820 def build(self, input_shape):
821 assert len(input_shape) >= 2
822 input_dim = input_shape[-1]
823
824 self.kernel = self.add_weight(shape=(input_dim, self.units),
825 initializer=self.kernel_initializer,
826 name='kernel',
827 regularizer=self.kernel_regularizer,
828 constraint=self.kernel_constraint)
829 if self.use_bias:
830 self.bias = self.add_weight(shape=(self.units,),
831 initializer=self.bias_initializer,
832 name='bias',
833 regularizer=self.bias_regularizer,
834 constraint=self.bias_constraint)
835 else:
836 self.bias = None
837 self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
838 self.built = True
839
840 def call(self, inputs):
841 output = K.dot(inputs, self.kernel)
842 if self.use_bias:
843 output = K.bias_add(output, self.bias)
844 if self.activation is not None:
845 output = self.activation(output)
846 return output
847
848 def compute_output_shape(self, input_shape):
849 assert input_shape and len(input_shape) >= 2
850 assert input_shape[-1]
851 output_shape = list(input_shape)
852 output_shape[-1] = self.units
853 return tuple(output_shape)
854
855 def get_config(self):
856 config = {
857 'units': self.units,
858 'activation': activations.serialize(self.activation),
859 'use_bias': self.use_bias,
860 'kernel_initializer': initializers.serialize(self.kernel_initializer),
861 'bias_initializer': initializers.serialize(self.bias_initializer),
862 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
863 'bias_regularizer': regularizers.serialize(self.bias_regularizer),
864 'activity_regularizer': regularizers.serialize(self.activity_regularizer),
865 'kernel_constraint': constraints.serialize(self.kernel_constraint),
866 'bias_constraint': constraints.serialize(self.bias_constraint)
867 }
868 base_config = super(Dense, self).get_config()
869 return dict(list(base_config.items()) + list(config.items()))
870
871
872 class ActivityRegularization(Layer):
873 """Layer that applies an update to the cost function based input activity.
874
875 # Arguments
876 l1: L1 regularization factor (positive float).
877 l2: L2 regularization factor (positive float).
878
879 # Input shape
880 Arbitrary. Use the keyword argument `input_shape`
881 (tuple of integers, does not include the samples axis)
882 when using this layer as the first layer in a model.
883
884 # Output shape
885 Same shape as input.
886 """
887
888 def __init__(self, l1=0., l2=0., **kwargs):
889 super(ActivityRegularization, self).__init__(**kwargs)
890 self.supports_masking = True
891 self.l1 = l1
892 self.l2 = l2
893 self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)
894
895 def get_config(self):
896 config = {'l1': self.l1,
897 'l2': self.l2}
898 base_config = super(ActivityRegularization, self).get_config()
899 return dict(list(base_config.items()) + list(config.items()))
900
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras/layers/core.py b/keras/layers/core.py
--- a/keras/layers/core.py
+++ b/keras/layers/core.py
@@ -61,7 +61,7 @@
def call(self, inputs):
boolean_mask = K.any(K.not_equal(inputs, self.mask_value),
axis=-1, keepdims=True)
- return inputs * K.cast(boolean_mask, K.floatx())
+ return inputs * K.cast(boolean_mask, inputs.dtype)
def get_config(self):
config = {'mask_value': self.mask_value}
| {"golden_diff": "diff --git a/keras/layers/core.py b/keras/layers/core.py\n--- a/keras/layers/core.py\n+++ b/keras/layers/core.py\n@@ -61,7 +61,7 @@\n def call(self, inputs):\n boolean_mask = K.any(K.not_equal(inputs, self.mask_value),\n axis=-1, keepdims=True)\n- return inputs * K.cast(boolean_mask, K.floatx())\n+ return inputs * K.cast(boolean_mask, inputs.dtype)\n \n def get_config(self):\n config = {'mask_value': self.mask_value}\n", "issue": "Masking a layer that has an integer dtype raises an error in TensorFlow but not Theano.\nThe following:\r\n```python\r\nfrom keras.layers import Input, Masking\r\n\r\ndocument = Input(shape = (10, ), dtype = \"int32\")\r\nmask = Masking(mask_value = 21)\r\ndocument_mask = mask(document)\r\n```\r\nproduces this error:\r\n\r\n```\r\n----> 5 document_mask = mask(document)\r\n\r\n/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/keras/engine/topology.py in __call__(self, inputs, **kwargs)\r\n 594 \r\n 595 # Actually call the layer, collecting output(s), mask(s), and shape(s).\r\n--> 596 output = self.call(inputs, **kwargs)\r\n 597 output_mask = self.compute_mask(inputs, previous_mask)\r\n 598 \r\n\r\n/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/keras/layers/core.py in call(self, inputs)\r\n 62 boolean_mask = K.any(K.not_equal(inputs, self.mask_value),\r\n 63 axis=-1, keepdims=True)\r\n---> 64 return inputs * K.cast(boolean_mask, K.floatx())\r\n 65 \r\n 66 def get_config(self):\r\n\r\n/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y)\r\n 827 if not isinstance(y, sparse_tensor.SparseTensor):\r\n 828 try:\r\n--> 829 y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name=\"y\")\r\n 830 except TypeError:\r\n 831 # If the RHS is not a tensor, it might be a tensor aware object\r\n\r\n/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)\r\n 674 name=name,\r\n 675 preferred_dtype=preferred_dtype,\r\n--> 676 as_ref=False)\r\n 677 \r\n 678 \r\n\r\n/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)\r\n 739 \r\n 740 if ret is None:\r\n--> 741 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\r\n 742 \r\n 743 if ret is NotImplemented:\r\n\r\n/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)\r\n 612 raise ValueError(\r\n 613 \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\"\r\n--> 614 % (dtype.name, t.dtype.name, str(t)))\r\n 615 return t\r\n 616 \r\n\r\nValueError: Tensor conversion requested dtype int32 for Tensor with dtype float32: 'Tensor(\"masking_1/Cast_1:0\", shape=(?, 1), dtype=float32)'\r\n```\r\n\r\nwhen using TensorFlow as the backend, but works fine with Theano. The issue seems to be that [Keras casts the mask to a float](https://github.com/fchollet/keras/blob/master/keras/layers/core.py#L64), even when the inputs are not floats themselves. Changing the return value to:\r\n\r\n```python\r\ninputs * K.cast(boolean_mask, inputs.dtype)\r\n```\r\n\r\nfixes the issue.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy as np\n\nimport copy\nimport types as python_types\nimport warnings\n\nfrom .. import backend as K\nfrom .. import activations\nfrom .. import initializers\nfrom .. import regularizers\nfrom .. import constraints\nfrom ..engine import InputSpec\nfrom ..engine import Layer\nfrom ..utils.generic_utils import func_dump\nfrom ..utils.generic_utils import func_load\nfrom ..utils.generic_utils import deserialize_keras_object\nfrom ..utils.generic_utils import has_arg\nfrom ..legacy import interfaces\n\n\nclass Masking(Layer):\n \"\"\"Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n # Example\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to a LSTM layer.\n You want to mask timestep #3 and #5 because you lack data for\n these timesteps. You can:\n\n - set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n model = Sequential()\n model.add(Masking(mask_value=0., input_shape=(timesteps, features)))\n model.add(LSTM(32))\n ```\n \"\"\"\n\n def __init__(self, mask_value=0., **kwargs):\n super(Masking, self).__init__(**kwargs)\n self.supports_masking = True\n self.mask_value = mask_value\n\n def compute_mask(self, inputs, mask=None):\n return K.any(K.not_equal(inputs, self.mask_value), axis=-1)\n\n def call(self, inputs):\n boolean_mask = K.any(K.not_equal(inputs, self.mask_value),\n axis=-1, keepdims=True)\n return inputs * K.cast(boolean_mask, K.floatx())\n\n def get_config(self):\n config = {'mask_value': self.mask_value}\n base_config = super(Masking, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Dropout(Layer):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting\n a fraction `rate` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n # References\n - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)\n \"\"\"\n @interfaces.legacy_dropout_support\n def __init__(self, rate, noise_shape=None, seed=None, **kwargs):\n super(Dropout, self).__init__(**kwargs)\n self.rate = min(1., max(0., rate))\n self.noise_shape = noise_shape\n self.seed = seed\n self.supports_masking = True\n\n def _get_noise_shape(self, _):\n return self.noise_shape\n\n def call(self, inputs, training=None):\n if 0. < self.rate < 1.:\n noise_shape = self._get_noise_shape(inputs)\n\n def dropped_inputs():\n return K.dropout(inputs, self.rate, noise_shape,\n seed=self.seed)\n return K.in_train_phase(dropped_inputs, inputs,\n training=training)\n return inputs\n\n def get_config(self):\n config = {'rate': self.rate}\n base_config = super(Dropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass SpatialDropout1D(Dropout):\n \"\"\"Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n\n # Input shape\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropout1d_support\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape\n\n\nclass SpatialDropout2D(Dropout):\n \"\"\"Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout2D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=4)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, input_shape[3])\n return noise_shape\n\n\nclass SpatialDropout3D(Dropout):\n \"\"\"Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout3D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=5)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])\n return noise_shape\n\n\nclass Activation(Layer):\n \"\"\"Applies an activation function to an output.\n\n # Arguments\n activation: name of activation function to use\n (see: [activations](../activations.md)),\n or alternatively, a Theano or TensorFlow operation.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, activation, **kwargs):\n super(Activation, self).__init__(**kwargs)\n self.supports_masking = True\n self.activation = activations.get(activation)\n\n def call(self, inputs):\n return self.activation(inputs)\n\n def get_config(self):\n config = {'activation': activations.serialize(self.activation)}\n base_config = super(Activation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Reshape(Layer):\n \"\"\"Reshapes an output to a certain shape.\n\n # Arguments\n target_shape: target shape. Tuple of integers.\n Does not include the batch axis.\n\n # Input shape\n Arbitrary, although all dimensions in the input shaped must be fixed.\n Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n `(batch_size,) + target_shape`\n\n # Example\n\n ```python\n # as first layer in a Sequential model\n model = Sequential()\n model.add(Reshape((3, 4), input_shape=(12,)))\n # now: model.output_shape == (None, 3, 4)\n # note: `None` is the batch dimension\n\n # as intermediate layer in a Sequential model\n model.add(Reshape((6, 2)))\n # now: model.output_shape == (None, 6, 2)\n\n # also supports shape inference using `-1` as dimension\n model.add(Reshape((-1, 2, 2)))\n # now: model.output_shape == (None, 3, 2, 2)\n ```\n \"\"\"\n\n def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = tuple(target_shape)\n\n def _fix_unknown_dimension(self, input_shape, output_shape):\n \"\"\"Finds and replaces a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n # Arguments\n input_shape: original shape of array being reshaped\n output_shape: target shape of the array, with at most\n a single -1 which indicates a dimension that should be\n derived from the input shape.\n\n # Returns\n The new output shape with a `-1` replaced with its computed value.\n\n # Raises\n ValueError: if `input_shape` and `output_shape` do not match.\n \"\"\"\n output_shape = list(output_shape)\n msg = 'total size of new array must be unchanged'\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError('Can only specify one unknown dimension.')\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n\n return tuple(output_shape)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0],) + self._fix_unknown_dimension(\n input_shape[1:], self.target_shape)\n\n def call(self, inputs):\n # In case the target shape is not fully defined,\n # we need access to the shape of `inputs`.\n # solution: rely on `K.int_shape`.\n target_shape = self.target_shape\n if -1 in target_shape:\n # Target shape not fully defined.\n input_shape = None\n try:\n input_shape = K.int_shape(inputs)\n except TypeError:\n pass\n if input_shape is not None:\n target_shape = self.compute_output_shape(input_shape)[1:]\n return K.reshape(inputs, (-1,) + target_shape)\n\n def get_config(self):\n config = {'target_shape': self.target_shape}\n base_config = super(Reshape, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Permute(Layer):\n \"\"\"Permutes the dimensions of the input according to a given pattern.\n\n Useful for e.g. connecting RNNs and convnets together.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n # Arguments\n dims: Tuple of integers. Permutation pattern, does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimension\n of the input.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n \"\"\"\n\n def __init__(self, dims, **kwargs):\n super(Permute, self).__init__(**kwargs)\n self.dims = tuple(dims)\n self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n output_shape = copy.copy(input_shape)\n for i, dim in enumerate(self.dims):\n target_dim = input_shape[dim]\n output_shape[i + 1] = target_dim\n return tuple(output_shape)\n\n def call(self, inputs):\n return K.permute_dimensions(inputs, (0,) + self.dims)\n\n def get_config(self):\n config = {'dims': self.dims}\n base_config = super(Permute, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Flatten(Layer):\n \"\"\"Flattens the input. Does not affect the batch size.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Conv2D(64, 3, 3,\n border_mode='same',\n input_shape=(3, 32, 32)))\n # now: model.output_shape == (None, 64, 32, 32)\n\n model.add(Flatten())\n # now: model.output_shape == (None, 65536)\n ```\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Flatten, self).__init__(**kwargs)\n self.input_spec = InputSpec(min_ndim=3)\n\n def compute_output_shape(self, input_shape):\n if not all(input_shape[1:]):\n raise ValueError('The shape of the input to \"Flatten\" '\n 'is not fully defined '\n '(got ' + str(input_shape[1:]) + '. '\n 'Make sure to pass a complete \"input_shape\" '\n 'or \"batch_input_shape\" argument to the first '\n 'layer in your model.')\n return (input_shape[0], np.prod(input_shape[1:]))\n\n def call(self, inputs):\n return K.batch_flatten(inputs)\n\n\nclass RepeatVector(Layer):\n \"\"\"Repeats the input n times.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n # Arguments\n n: integer, repetition factor.\n\n # Input shape\n 2D tensor of shape `(num_samples, features)`.\n\n # Output shape\n 3D tensor of shape `(num_samples, n, features)`.\n \"\"\"\n\n def __init__(self, n, **kwargs):\n super(RepeatVector, self).__init__(**kwargs)\n self.n = n\n self.input_spec = InputSpec(ndim=2)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.n, input_shape[1])\n\n def call(self, inputs):\n return K.repeat(inputs, self.n)\n\n def get_config(self):\n config = {'n': self.n}\n base_config = super(RepeatVector, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Lambda(Layer):\n \"\"\"Wraps arbitrary expression as a `Layer` object.\n\n # Examples\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n def antirectifier_output_shape(input_shape):\n shape = list(input_shape)\n assert len(shape) == 2 # only valid for 2D tensors\n shape[-1] *= 2\n return tuple(shape)\n\n model.add(Lambda(antirectifier,\n output_shape=antirectifier_output_shape))\n ```\n\n # Arguments\n function: The function to be evaluated.\n Takes input tensor as first argument.\n output_shape: Expected output shape from function.\n Only relevant when using Theano.\n Can be a tuple or function.\n If a tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input:\n `output_shape = (input_shape[0], ) + output_shape`\n or, the input is `None` and\n the sample dimension is also `None`:\n `output_shape = (None, ) + output_shape`\n If a function, it specifies the entire shape as a function of the\n input shape: `output_shape = f(input_shape)`\n arguments: optional dictionary of keyword arguments to be passed\n to the function.\n\n # Input shape\n Arbitrary. Use the keyword argument input_shape\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Specified by `output_shape` argument\n (or auto-inferred when using TensorFlow).\n \"\"\"\n\n @interfaces.legacy_lambda_support\n def __init__(self, function, output_shape=None,\n mask=None, arguments=None, **kwargs):\n super(Lambda, self).__init__(**kwargs)\n self.function = function\n self.arguments = arguments if arguments else {}\n if mask is not None:\n self.supports_masking = True\n self.mask = mask\n\n if output_shape is None:\n self._output_shape = None\n elif isinstance(output_shape, (tuple, list)):\n self._output_shape = tuple(output_shape)\n else:\n if not callable(output_shape):\n raise TypeError('In Lambda, `output_shape` '\n 'must be a list, a tuple, or a function.')\n self._output_shape = output_shape\n\n def compute_output_shape(self, input_shape):\n if self._output_shape is None:\n # With TensorFlow, we can infer the output shape directly:\n if K.backend() == 'tensorflow':\n if isinstance(input_shape, list):\n xs = [K.placeholder(shape=shape) for shape in input_shape]\n x = self.call(xs)\n else:\n x = K.placeholder(shape=input_shape)\n x = self.call(x)\n if isinstance(x, list):\n return [K.int_shape(x_elem) for x_elem in x]\n else:\n return K.int_shape(x)\n # Otherwise, we default to the input shape.\n warnings.warn('`output_shape` argument not specified for layer {} '\n 'and cannot be automatically inferred '\n 'with the Theano backend. '\n 'Defaulting to output shape `{}` '\n '(same as input shape). '\n 'If the expected output shape is different, '\n 'specify it via the `output_shape` argument.'\n .format(self.name, input_shape))\n return input_shape\n elif isinstance(self._output_shape, (tuple, list)):\n if isinstance(input_shape, list):\n num_samples = input_shape[0][0]\n else:\n num_samples = input_shape[0] if input_shape else None\n return (num_samples,) + tuple(self._output_shape)\n else:\n shape = self._output_shape(input_shape)\n if not isinstance(shape, (list, tuple)):\n raise ValueError('`output_shape` function must return a tuple or a list of tuples.')\n if isinstance(shape, list):\n if isinstance(shape[0], int) or shape[0] is None:\n shape = tuple(shape)\n return shape\n\n def call(self, inputs, mask=None):\n arguments = self.arguments\n if has_arg(self.function, 'mask'):\n arguments['mask'] = mask\n return self.function(inputs, **arguments)\n\n def compute_mask(self, inputs, mask=None):\n if callable(self.mask):\n return self.mask(inputs, mask)\n return self.mask\n\n def get_config(self):\n if isinstance(self.function, python_types.LambdaType):\n function = func_dump(self.function)\n function_type = 'lambda'\n else:\n function = self.function.__name__\n function_type = 'function'\n\n if isinstance(self._output_shape, python_types.LambdaType):\n output_shape = func_dump(self._output_shape)\n output_shape_type = 'lambda'\n elif callable(self._output_shape):\n output_shape = self._output_shape.__name__\n output_shape_type = 'function'\n else:\n output_shape = self._output_shape\n output_shape_type = 'raw'\n\n config = {'function': function,\n 'function_type': function_type,\n 'output_shape': output_shape,\n 'output_shape_type': output_shape_type,\n 'arguments': self.arguments}\n base_config = super(Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n globs = globals()\n if custom_objects:\n globs = dict(list(globs.items()) + list(custom_objects.items()))\n function_type = config.pop('function_type')\n if function_type == 'function':\n # Simple lookup in custom objects\n function = deserialize_keras_object(\n config['function'],\n custom_objects=custom_objects,\n printable_module_name='function in Lambda layer')\n elif function_type == 'lambda':\n # Unsafe deserialization from bytecode\n function = func_load(config['function'], globs=globs)\n else:\n raise TypeError('Unknown function type:', function_type)\n\n output_shape_type = config.pop('output_shape_type')\n if output_shape_type == 'function':\n # Simple lookup in custom objects\n output_shape = deserialize_keras_object(\n config['output_shape'],\n custom_objects=custom_objects,\n printable_module_name='output_shape function in Lambda layer')\n elif output_shape_type == 'lambda':\n # Unsafe deserialization from bytecode\n output_shape = func_load(config['output_shape'], globs=globs)\n else:\n output_shape = config['output_shape']\n\n # If arguments were numpy array, they have been saved as\n # list. We need to recover the ndarray\n if 'arguments' in config:\n for key in config['arguments']:\n if isinstance(config['arguments'][key], dict):\n arg_dict = config['arguments'][key]\n if 'type' in arg_dict and arg_dict['type'] == 'ndarray':\n # Overwrite the argument with its numpy translation\n config['arguments'][key] = np.array(arg_dict['value'])\n\n config['function'] = function\n config['output_shape'] = output_shape\n return cls(**config)\n\n\nclass Dense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: if the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n # Example\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n\n # Input shape\n nD tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n # Output shape\n nD tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n @interfaces.legacy_dense_support\n def __init__(self, units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(Dense, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(min_ndim=2)\n self.supports_masking = True\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs):\n output = K.dot(inputs, self.kernel)\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Dense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ActivityRegularization(Layer):\n \"\"\"Layer that applies an update to the cost function based input activity.\n\n # Arguments\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, l1=0., l2=0., **kwargs):\n super(ActivityRegularization, self).__init__(**kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)\n\n def get_config(self):\n config = {'l1': self.l1,\n 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/core.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy as np\n\nimport copy\nimport types as python_types\nimport warnings\n\nfrom .. import backend as K\nfrom .. import activations\nfrom .. import initializers\nfrom .. import regularizers\nfrom .. import constraints\nfrom ..engine import InputSpec\nfrom ..engine import Layer\nfrom ..utils.generic_utils import func_dump\nfrom ..utils.generic_utils import func_load\nfrom ..utils.generic_utils import deserialize_keras_object\nfrom ..utils.generic_utils import has_arg\nfrom ..legacy import interfaces\n\n\nclass Masking(Layer):\n \"\"\"Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n # Example\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to a LSTM layer.\n You want to mask timestep #3 and #5 because you lack data for\n these timesteps. You can:\n\n - set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n model = Sequential()\n model.add(Masking(mask_value=0., input_shape=(timesteps, features)))\n model.add(LSTM(32))\n ```\n \"\"\"\n\n def __init__(self, mask_value=0., **kwargs):\n super(Masking, self).__init__(**kwargs)\n self.supports_masking = True\n self.mask_value = mask_value\n\n def compute_mask(self, inputs, mask=None):\n return K.any(K.not_equal(inputs, self.mask_value), axis=-1)\n\n def call(self, inputs):\n boolean_mask = K.any(K.not_equal(inputs, self.mask_value),\n axis=-1, keepdims=True)\n return inputs * K.cast(boolean_mask, inputs.dtype)\n\n def get_config(self):\n config = {'mask_value': self.mask_value}\n base_config = super(Masking, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Dropout(Layer):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting\n a fraction `rate` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n # References\n - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)\n \"\"\"\n @interfaces.legacy_dropout_support\n def __init__(self, rate, noise_shape=None, seed=None, **kwargs):\n super(Dropout, self).__init__(**kwargs)\n self.rate = min(1., max(0., rate))\n self.noise_shape = noise_shape\n self.seed = seed\n self.supports_masking = True\n\n def _get_noise_shape(self, _):\n return self.noise_shape\n\n def call(self, inputs, training=None):\n if 0. < self.rate < 1.:\n noise_shape = self._get_noise_shape(inputs)\n\n def dropped_inputs():\n return K.dropout(inputs, self.rate, noise_shape,\n seed=self.seed)\n return K.in_train_phase(dropped_inputs, inputs,\n training=training)\n return inputs\n\n def get_config(self):\n config = {'rate': self.rate}\n base_config = super(Dropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass SpatialDropout1D(Dropout):\n \"\"\"Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n\n # Input shape\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropout1d_support\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape\n\n\nclass SpatialDropout2D(Dropout):\n \"\"\"Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout2D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=4)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, input_shape[3])\n return noise_shape\n\n\nclass SpatialDropout3D(Dropout):\n \"\"\"Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout3D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=5)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])\n return noise_shape\n\n\nclass Activation(Layer):\n \"\"\"Applies an activation function to an output.\n\n # Arguments\n activation: name of activation function to use\n (see: [activations](../activations.md)),\n or alternatively, a Theano or TensorFlow operation.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, activation, **kwargs):\n super(Activation, self).__init__(**kwargs)\n self.supports_masking = True\n self.activation = activations.get(activation)\n\n def call(self, inputs):\n return self.activation(inputs)\n\n def get_config(self):\n config = {'activation': activations.serialize(self.activation)}\n base_config = super(Activation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Reshape(Layer):\n \"\"\"Reshapes an output to a certain shape.\n\n # Arguments\n target_shape: target shape. Tuple of integers.\n Does not include the batch axis.\n\n # Input shape\n Arbitrary, although all dimensions in the input shaped must be fixed.\n Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n `(batch_size,) + target_shape`\n\n # Example\n\n ```python\n # as first layer in a Sequential model\n model = Sequential()\n model.add(Reshape((3, 4), input_shape=(12,)))\n # now: model.output_shape == (None, 3, 4)\n # note: `None` is the batch dimension\n\n # as intermediate layer in a Sequential model\n model.add(Reshape((6, 2)))\n # now: model.output_shape == (None, 6, 2)\n\n # also supports shape inference using `-1` as dimension\n model.add(Reshape((-1, 2, 2)))\n # now: model.output_shape == (None, 3, 2, 2)\n ```\n \"\"\"\n\n def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = tuple(target_shape)\n\n def _fix_unknown_dimension(self, input_shape, output_shape):\n \"\"\"Finds and replaces a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n # Arguments\n input_shape: original shape of array being reshaped\n output_shape: target shape of the array, with at most\n a single -1 which indicates a dimension that should be\n derived from the input shape.\n\n # Returns\n The new output shape with a `-1` replaced with its computed value.\n\n # Raises\n ValueError: if `input_shape` and `output_shape` do not match.\n \"\"\"\n output_shape = list(output_shape)\n msg = 'total size of new array must be unchanged'\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError('Can only specify one unknown dimension.')\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n\n return tuple(output_shape)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0],) + self._fix_unknown_dimension(\n input_shape[1:], self.target_shape)\n\n def call(self, inputs):\n # In case the target shape is not fully defined,\n # we need access to the shape of `inputs`.\n # solution: rely on `K.int_shape`.\n target_shape = self.target_shape\n if -1 in target_shape:\n # Target shape not fully defined.\n input_shape = None\n try:\n input_shape = K.int_shape(inputs)\n except TypeError:\n pass\n if input_shape is not None:\n target_shape = self.compute_output_shape(input_shape)[1:]\n return K.reshape(inputs, (-1,) + target_shape)\n\n def get_config(self):\n config = {'target_shape': self.target_shape}\n base_config = super(Reshape, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Permute(Layer):\n \"\"\"Permutes the dimensions of the input according to a given pattern.\n\n Useful for e.g. connecting RNNs and convnets together.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n # Arguments\n dims: Tuple of integers. Permutation pattern, does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimension\n of the input.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n \"\"\"\n\n def __init__(self, dims, **kwargs):\n super(Permute, self).__init__(**kwargs)\n self.dims = tuple(dims)\n self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n output_shape = copy.copy(input_shape)\n for i, dim in enumerate(self.dims):\n target_dim = input_shape[dim]\n output_shape[i + 1] = target_dim\n return tuple(output_shape)\n\n def call(self, inputs):\n return K.permute_dimensions(inputs, (0,) + self.dims)\n\n def get_config(self):\n config = {'dims': self.dims}\n base_config = super(Permute, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Flatten(Layer):\n \"\"\"Flattens the input. Does not affect the batch size.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Conv2D(64, 3, 3,\n border_mode='same',\n input_shape=(3, 32, 32)))\n # now: model.output_shape == (None, 64, 32, 32)\n\n model.add(Flatten())\n # now: model.output_shape == (None, 65536)\n ```\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Flatten, self).__init__(**kwargs)\n self.input_spec = InputSpec(min_ndim=3)\n\n def compute_output_shape(self, input_shape):\n if not all(input_shape[1:]):\n raise ValueError('The shape of the input to \"Flatten\" '\n 'is not fully defined '\n '(got ' + str(input_shape[1:]) + '. '\n 'Make sure to pass a complete \"input_shape\" '\n 'or \"batch_input_shape\" argument to the first '\n 'layer in your model.')\n return (input_shape[0], np.prod(input_shape[1:]))\n\n def call(self, inputs):\n return K.batch_flatten(inputs)\n\n\nclass RepeatVector(Layer):\n \"\"\"Repeats the input n times.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n # Arguments\n n: integer, repetition factor.\n\n # Input shape\n 2D tensor of shape `(num_samples, features)`.\n\n # Output shape\n 3D tensor of shape `(num_samples, n, features)`.\n \"\"\"\n\n def __init__(self, n, **kwargs):\n super(RepeatVector, self).__init__(**kwargs)\n self.n = n\n self.input_spec = InputSpec(ndim=2)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.n, input_shape[1])\n\n def call(self, inputs):\n return K.repeat(inputs, self.n)\n\n def get_config(self):\n config = {'n': self.n}\n base_config = super(RepeatVector, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Lambda(Layer):\n \"\"\"Wraps arbitrary expression as a `Layer` object.\n\n # Examples\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n def antirectifier_output_shape(input_shape):\n shape = list(input_shape)\n assert len(shape) == 2 # only valid for 2D tensors\n shape[-1] *= 2\n return tuple(shape)\n\n model.add(Lambda(antirectifier,\n output_shape=antirectifier_output_shape))\n ```\n\n # Arguments\n function: The function to be evaluated.\n Takes input tensor as first argument.\n output_shape: Expected output shape from function.\n Only relevant when using Theano.\n Can be a tuple or function.\n If a tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input:\n `output_shape = (input_shape[0], ) + output_shape`\n or, the input is `None` and\n the sample dimension is also `None`:\n `output_shape = (None, ) + output_shape`\n If a function, it specifies the entire shape as a function of the\n input shape: `output_shape = f(input_shape)`\n arguments: optional dictionary of keyword arguments to be passed\n to the function.\n\n # Input shape\n Arbitrary. Use the keyword argument input_shape\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Specified by `output_shape` argument\n (or auto-inferred when using TensorFlow).\n \"\"\"\n\n @interfaces.legacy_lambda_support\n def __init__(self, function, output_shape=None,\n mask=None, arguments=None, **kwargs):\n super(Lambda, self).__init__(**kwargs)\n self.function = function\n self.arguments = arguments if arguments else {}\n if mask is not None:\n self.supports_masking = True\n self.mask = mask\n\n if output_shape is None:\n self._output_shape = None\n elif isinstance(output_shape, (tuple, list)):\n self._output_shape = tuple(output_shape)\n else:\n if not callable(output_shape):\n raise TypeError('In Lambda, `output_shape` '\n 'must be a list, a tuple, or a function.')\n self._output_shape = output_shape\n\n def compute_output_shape(self, input_shape):\n if self._output_shape is None:\n # With TensorFlow, we can infer the output shape directly:\n if K.backend() == 'tensorflow':\n if isinstance(input_shape, list):\n xs = [K.placeholder(shape=shape) for shape in input_shape]\n x = self.call(xs)\n else:\n x = K.placeholder(shape=input_shape)\n x = self.call(x)\n if isinstance(x, list):\n return [K.int_shape(x_elem) for x_elem in x]\n else:\n return K.int_shape(x)\n # Otherwise, we default to the input shape.\n warnings.warn('`output_shape` argument not specified for layer {} '\n 'and cannot be automatically inferred '\n 'with the Theano backend. '\n 'Defaulting to output shape `{}` '\n '(same as input shape). '\n 'If the expected output shape is different, '\n 'specify it via the `output_shape` argument.'\n .format(self.name, input_shape))\n return input_shape\n elif isinstance(self._output_shape, (tuple, list)):\n if isinstance(input_shape, list):\n num_samples = input_shape[0][0]\n else:\n num_samples = input_shape[0] if input_shape else None\n return (num_samples,) + tuple(self._output_shape)\n else:\n shape = self._output_shape(input_shape)\n if not isinstance(shape, (list, tuple)):\n raise ValueError('`output_shape` function must return a tuple or a list of tuples.')\n if isinstance(shape, list):\n if isinstance(shape[0], int) or shape[0] is None:\n shape = tuple(shape)\n return shape\n\n def call(self, inputs, mask=None):\n arguments = self.arguments\n if has_arg(self.function, 'mask'):\n arguments['mask'] = mask\n return self.function(inputs, **arguments)\n\n def compute_mask(self, inputs, mask=None):\n if callable(self.mask):\n return self.mask(inputs, mask)\n return self.mask\n\n def get_config(self):\n if isinstance(self.function, python_types.LambdaType):\n function = func_dump(self.function)\n function_type = 'lambda'\n else:\n function = self.function.__name__\n function_type = 'function'\n\n if isinstance(self._output_shape, python_types.LambdaType):\n output_shape = func_dump(self._output_shape)\n output_shape_type = 'lambda'\n elif callable(self._output_shape):\n output_shape = self._output_shape.__name__\n output_shape_type = 'function'\n else:\n output_shape = self._output_shape\n output_shape_type = 'raw'\n\n config = {'function': function,\n 'function_type': function_type,\n 'output_shape': output_shape,\n 'output_shape_type': output_shape_type,\n 'arguments': self.arguments}\n base_config = super(Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n globs = globals()\n if custom_objects:\n globs = dict(list(globs.items()) + list(custom_objects.items()))\n function_type = config.pop('function_type')\n if function_type == 'function':\n # Simple lookup in custom objects\n function = deserialize_keras_object(\n config['function'],\n custom_objects=custom_objects,\n printable_module_name='function in Lambda layer')\n elif function_type == 'lambda':\n # Unsafe deserialization from bytecode\n function = func_load(config['function'], globs=globs)\n else:\n raise TypeError('Unknown function type:', function_type)\n\n output_shape_type = config.pop('output_shape_type')\n if output_shape_type == 'function':\n # Simple lookup in custom objects\n output_shape = deserialize_keras_object(\n config['output_shape'],\n custom_objects=custom_objects,\n printable_module_name='output_shape function in Lambda layer')\n elif output_shape_type == 'lambda':\n # Unsafe deserialization from bytecode\n output_shape = func_load(config['output_shape'], globs=globs)\n else:\n output_shape = config['output_shape']\n\n # If arguments were numpy array, they have been saved as\n # list. We need to recover the ndarray\n if 'arguments' in config:\n for key in config['arguments']:\n if isinstance(config['arguments'][key], dict):\n arg_dict = config['arguments'][key]\n if 'type' in arg_dict and arg_dict['type'] == 'ndarray':\n # Overwrite the argument with its numpy translation\n config['arguments'][key] = np.array(arg_dict['value'])\n\n config['function'] = function\n config['output_shape'] = output_shape\n return cls(**config)\n\n\nclass Dense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: if the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n # Example\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n\n # Input shape\n nD tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n # Output shape\n nD tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n @interfaces.legacy_dense_support\n def __init__(self, units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(Dense, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(min_ndim=2)\n self.supports_masking = True\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs):\n output = K.dot(inputs, self.kernel)\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Dense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ActivityRegularization(Layer):\n \"\"\"Layer that applies an update to the cost function based input activity.\n\n # Arguments\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, l1=0., l2=0., **kwargs):\n super(ActivityRegularization, self).__init__(**kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)\n\n def get_config(self):\n config = {'l1': self.l1,\n 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "keras/layers/core.py"}]} |
gh_patches_debug_1450 | rasdani/github-patches | git_diff | enthought__chaco-731 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ArrayDataSource get_mask_data() fails when data is None
See this test here:
https://github.com/enthought/chaco/blob/enh/data-source-tests/chaco/tests/arraydatasource_test_case.py#L108
More generally, I think that the behaviour for an empty data source is probably wrong (why a _scalar_ `0.0` instead of `array([])`?) but I'm not sure what will break if that is changed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/array_data_source.py`
Content:
```
1 """ Defines the ArrayDataSource class."""
2
3 # Major library imports
4 from numpy import array, empty, isfinite, ones, ndarray
5 import numpy as np
6
7 # Enthought library imports
8 from traits.api import Any, Constant, Int, Tuple
9
10 # Chaco imports
11 from .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait
12 from .abstract_data_source import AbstractDataSource
13
14
15 def bounded_nanargmin(arr):
16 """Find the index of the minimum value, ignoring NaNs.
17
18 If all NaNs, return 0.
19 """
20 # Different versions of numpy behave differently in the all-NaN case, so we
21 # catch this condition in two different ways.
22 try:
23 if np.issubdtype(arr.dtype, np.floating):
24 min = np.nanargmin(arr)
25 elif np.issubdtype(arr.dtype, np.number):
26 min = np.argmin(arr)
27 else:
28 min = 0
29 except ValueError:
30 return 0
31 if isfinite(min):
32 return min
33 else:
34 return 0
35
36
37 def bounded_nanargmax(arr):
38 """Find the index of the maximum value, ignoring NaNs.
39
40 If all NaNs, return -1.
41 """
42 try:
43 if np.issubdtype(arr.dtype, np.floating):
44 max = np.nanargmax(arr)
45 elif np.issubdtype(arr.dtype, np.number):
46 max = np.argmax(arr)
47 else:
48 max = -1
49 except ValueError:
50 return -1
51 if isfinite(max):
52 return max
53 else:
54 return -1
55
56
57 class ArrayDataSource(AbstractDataSource):
58 """A data source representing a single, continuous array of numerical data.
59
60 This class does not listen to the array for value changes; if you need that
61 behavior, create a subclass that hooks up the appropriate listeners.
62 """
63
64 # ------------------------------------------------------------------------
65 # AbstractDataSource traits
66 # ------------------------------------------------------------------------
67
68 #: The dimensionality of the indices into this data source (overrides
69 #: AbstractDataSource).
70 index_dimension = Constant("scalar")
71
72 #: The dimensionality of the value at each index point (overrides
73 #: AbstractDataSource).
74 value_dimension = Constant("scalar")
75
76 #: The sort order of the data.
77 #: This is a specialized optimization for 1-D arrays, but it's an important
78 #: one that's used everywhere.
79 sort_order = SortOrderTrait
80
81 # ------------------------------------------------------------------------
82 # Private traits
83 # ------------------------------------------------------------------------
84
85 # The data array itself.
86 _data = NumericalSequenceTrait
87
88 # Cached values of min and max as long as **_data** doesn't change.
89 _cached_bounds = Tuple
90
91 # Not necessary, since this is not a filter, but provided for convenience.
92 _cached_mask = Any
93
94 # The index of the (first) minimum value in self._data
95 # FIXME: This is an Any instead of an Int trait because of how Traits
96 # typechecks numpy.int64 on 64-bit Windows systems.
97 _min_index = Any
98
99 # The index of the (first) maximum value in self._data
100 # FIXME: This is an Any instead of an Int trait because of how Traits
101 # typechecks numpy.int64 on 64-bit Windows systems.
102 _max_index = Any
103
104 # ------------------------------------------------------------------------
105 # Public methods
106 # ------------------------------------------------------------------------
107
108 def __init__(self, data=array([]), sort_order="none", **kw):
109 AbstractDataSource.__init__(self, **kw)
110 self.set_data(data, sort_order)
111
112 def set_data(self, newdata, sort_order=None):
113 """Sets the data, and optionally the sort order, for this data source.
114
115 Parameters
116 ----------
117 newdata : array
118 The data to use.
119 sort_order : SortOrderTrait
120 The sort order of the data
121 """
122 self._data = newdata
123 if sort_order is not None:
124 self.sort_order = sort_order
125 self._compute_bounds()
126 self.data_changed = True
127
128 def set_mask(self, mask):
129 """Sets the mask for this data source."""
130 self._cached_mask = mask
131 self.data_changed = True
132
133 def remove_mask(self):
134 """Removes the mask on this data source."""
135 self._cached_mask = None
136 self.data_changed = True
137
138 # ------------------------------------------------------------------------
139 # AbstractDataSource interface
140 # ------------------------------------------------------------------------
141
142 def get_data(self):
143 """Returns the data for this data source, or 0.0 if it has no data.
144
145 Implements AbstractDataSource.
146 """
147 if self._data is not None:
148 return self._data
149 else:
150 return empty(shape=(0,))
151
152 def get_data_mask(self):
153 """get_data_mask() -> (data_array, mask_array)
154
155 Implements AbstractDataSource.
156 """
157 if self._cached_mask is None:
158 return self._data, ones(len(self._data), dtype=bool)
159 else:
160 return self._data, self._cached_mask
161
162 def is_masked(self):
163 """is_masked() -> bool
164
165 Implements AbstractDataSource.
166 """
167 if self._cached_mask is not None:
168 return True
169 else:
170 return False
171
172 def get_size(self):
173 """get_size() -> int
174
175 Implements AbstractDataSource.
176 """
177 if self._data is not None:
178 return len(self._data)
179 else:
180 return 0
181
182 def get_bounds(self):
183 """Returns the minimum and maximum values of the data source's data.
184
185 Implements AbstractDataSource.
186 """
187 if (
188 self._cached_bounds is None
189 or self._cached_bounds == ()
190 or self._cached_bounds == 0.0
191 ):
192 self._compute_bounds()
193 return self._cached_bounds
194
195 def reverse_map(self, pt, index=0, outside_returns_none=True):
196 """Returns the index of *pt* in the data source.
197
198 Parameters
199 ----------
200 pt : scalar value
201 value to find
202 index
203 ignored for data series with 1-D indices
204 outside_returns_none : Boolean
205 Whether the method returns None if *pt* is outside the range of
206 the data source; if False, the method returns the value of the
207 bound that *pt* is outside of.
208 """
209 if self.sort_order == "none":
210 raise NotImplementedError
211
212 # index is ignored for dataseries with 1-dimensional indices
213 minval, maxval = self._cached_bounds
214 if pt < minval:
215 if outside_returns_none:
216 return None
217 else:
218 return self._min_index
219 elif pt > maxval:
220 if outside_returns_none:
221 return None
222 else:
223 return self._max_index
224 else:
225 return reverse_map_1d(self._data, pt, self.sort_order)
226
227 # ------------------------------------------------------------------------
228 # Private methods
229 # ------------------------------------------------------------------------
230
231 def _compute_bounds(self, data=None):
232 """Computes the minimum and maximum values of self._data.
233
234 If a data array is passed in, then that is used instead of self._data.
235 This behavior is useful for subclasses.
236 """
237 # TODO: as an optimization, perhaps create and cache a sorted
238 # version of the dataset?
239
240 if data is None:
241 data = self.get_data()
242
243 data_len = len(data)
244
245 if data_len == 0:
246 self._min_index = 0
247 self._max_index = 0
248 self._cached_bounds = (0.0, 0.0)
249 elif data_len == 1:
250 self._min_index = 0
251 self._max_index = 0
252 self._cached_bounds = (data[0], data[0])
253 else:
254 if self.sort_order == "ascending":
255 self._min_index = 0
256 self._max_index = -1
257 elif self.sort_order == "descending":
258 self._min_index = -1
259 self._max_index = 0
260 else:
261 # ignore NaN values. This is probably a little slower,
262 # but also much safer.
263
264 # data might be an array of strings or objects that
265 # can't have argmin calculated on them.
266 try:
267 # the data may be in a subclass of numpy.array, viewing
268 # the data as a ndarray will remove side effects of
269 # the subclasses, such as different operator behaviors
270 self._min_index = bounded_nanargmin(data.view(ndarray))
271 self._max_index = bounded_nanargmax(data.view(ndarray))
272 except (TypeError, IndexError, NotImplementedError):
273 # For strings and objects, we punt... These show up in
274 # label-ish data sources.
275 self._cached_bounds = (0.0, 0.0)
276
277 self._cached_bounds = (
278 data[self._min_index],
279 data[self._max_index],
280 )
281
282 # ------------------------------------------------------------------------
283 # Event handlers
284 # ------------------------------------------------------------------------
285
286 def _metadata_changed(self, event):
287 self.metadata_changed = True
288
289 def _metadata_items_changed(self, event):
290 self.metadata_changed = True
291
292 # ------------------------------------------------------------------------
293 # Persistence-related methods
294 # ------------------------------------------------------------------------
295
296 def __getstate__(self):
297 state = super().__getstate__()
298 if not self.persist_data:
299 state.pop("_data", None)
300 state.pop("_cached_mask", None)
301 state.pop("_cached_bounds", None)
302 state.pop("_min_index", None)
303 state.pop("_max_index", None)
304 return state
305
306 def _post_load(self):
307 super()._post_load()
308 self._cached_bounds = ()
309 self._cached_mask = None
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/array_data_source.py b/chaco/array_data_source.py
--- a/chaco/array_data_source.py
+++ b/chaco/array_data_source.py
@@ -155,7 +155,10 @@
Implements AbstractDataSource.
"""
if self._cached_mask is None:
- return self._data, ones(len(self._data), dtype=bool)
+ if self._data is None:
+ return self._data, ones(0, dtype=bool)
+ else:
+ return self._data, ones(len(self._data), dtype=bool)
else:
return self._data, self._cached_mask
| {"golden_diff": "diff --git a/chaco/array_data_source.py b/chaco/array_data_source.py\n--- a/chaco/array_data_source.py\n+++ b/chaco/array_data_source.py\n@@ -155,7 +155,10 @@\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n- return self._data, ones(len(self._data), dtype=bool)\n+ if self._data is None:\n+ return self._data, ones(0, dtype=bool)\n+ else:\n+ return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n", "issue": "ArrayDataSource get_mask_data() fails when data is None\nSee this test here:\n\nhttps://github.com/enthought/chaco/blob/enh/data-source-tests/chaco/tests/arraydatasource_test_case.py#L108\n\nMore generally, I think that the behaviour for an empty data source is probably wrong (why a _scalar_ `0.0` instead of `array([])`?) but I'm not sure what will break if that is changed.\n\n", "before_files": [{"content": "\"\"\" Defines the ArrayDataSource class.\"\"\"\n\n# Major library imports\nfrom numpy import array, empty, isfinite, ones, ndarray\nimport numpy as np\n\n# Enthought library imports\nfrom traits.api import Any, Constant, Int, Tuple\n\n# Chaco imports\nfrom .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait\nfrom .abstract_data_source import AbstractDataSource\n\n\ndef bounded_nanargmin(arr):\n \"\"\"Find the index of the minimum value, ignoring NaNs.\n\n If all NaNs, return 0.\n \"\"\"\n # Different versions of numpy behave differently in the all-NaN case, so we\n # catch this condition in two different ways.\n try:\n if np.issubdtype(arr.dtype, np.floating):\n min = np.nanargmin(arr)\n elif np.issubdtype(arr.dtype, np.number):\n min = np.argmin(arr)\n else:\n min = 0\n except ValueError:\n return 0\n if isfinite(min):\n return min\n else:\n return 0\n\n\ndef bounded_nanargmax(arr):\n \"\"\"Find the index of the maximum value, ignoring NaNs.\n\n If all NaNs, return -1.\n \"\"\"\n try:\n if np.issubdtype(arr.dtype, np.floating):\n max = np.nanargmax(arr)\n elif np.issubdtype(arr.dtype, np.number):\n max = np.argmax(arr)\n else:\n max = -1\n except ValueError:\n return -1\n if isfinite(max):\n return max\n else:\n return -1\n\n\nclass ArrayDataSource(AbstractDataSource):\n \"\"\"A data source representing a single, continuous array of numerical data.\n\n This class does not listen to the array for value changes; if you need that\n behavior, create a subclass that hooks up the appropriate listeners.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # AbstractDataSource traits\n # ------------------------------------------------------------------------\n\n #: The dimensionality of the indices into this data source (overrides\n #: AbstractDataSource).\n index_dimension = Constant(\"scalar\")\n\n #: The dimensionality of the value at each index point (overrides\n #: AbstractDataSource).\n value_dimension = Constant(\"scalar\")\n\n #: The sort order of the data.\n #: This is a specialized optimization for 1-D arrays, but it's an important\n #: one that's used everywhere.\n sort_order = SortOrderTrait\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The data array itself.\n _data = NumericalSequenceTrait\n\n # Cached values of min and max as long as **_data** doesn't change.\n _cached_bounds = Tuple\n\n # Not necessary, since this is not a filter, but provided for convenience.\n _cached_mask = Any\n\n # The index of the (first) minimum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _min_index = Any\n\n # The index of the (first) maximum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _max_index = Any\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def __init__(self, data=array([]), sort_order=\"none\", **kw):\n AbstractDataSource.__init__(self, **kw)\n self.set_data(data, sort_order)\n\n def set_data(self, newdata, sort_order=None):\n \"\"\"Sets the data, and optionally the sort order, for this data source.\n\n Parameters\n ----------\n newdata : array\n The data to use.\n sort_order : SortOrderTrait\n The sort order of the data\n \"\"\"\n self._data = newdata\n if sort_order is not None:\n self.sort_order = sort_order\n self._compute_bounds()\n self.data_changed = True\n\n def set_mask(self, mask):\n \"\"\"Sets the mask for this data source.\"\"\"\n self._cached_mask = mask\n self.data_changed = True\n\n def remove_mask(self):\n \"\"\"Removes the mask on this data source.\"\"\"\n self._cached_mask = None\n self.data_changed = True\n\n # ------------------------------------------------------------------------\n # AbstractDataSource interface\n # ------------------------------------------------------------------------\n\n def get_data(self):\n \"\"\"Returns the data for this data source, or 0.0 if it has no data.\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return self._data\n else:\n return empty(shape=(0,))\n\n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n\n def is_masked(self):\n \"\"\"is_masked() -> bool\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is not None:\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"get_size() -> int\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return len(self._data)\n else:\n return 0\n\n def get_bounds(self):\n \"\"\"Returns the minimum and maximum values of the data source's data.\n\n Implements AbstractDataSource.\n \"\"\"\n if (\n self._cached_bounds is None\n or self._cached_bounds == ()\n or self._cached_bounds == 0.0\n ):\n self._compute_bounds()\n return self._cached_bounds\n\n def reverse_map(self, pt, index=0, outside_returns_none=True):\n \"\"\"Returns the index of *pt* in the data source.\n\n Parameters\n ----------\n pt : scalar value\n value to find\n index\n ignored for data series with 1-D indices\n outside_returns_none : Boolean\n Whether the method returns None if *pt* is outside the range of\n the data source; if False, the method returns the value of the\n bound that *pt* is outside of.\n \"\"\"\n if self.sort_order == \"none\":\n raise NotImplementedError\n\n # index is ignored for dataseries with 1-dimensional indices\n minval, maxval = self._cached_bounds\n if pt < minval:\n if outside_returns_none:\n return None\n else:\n return self._min_index\n elif pt > maxval:\n if outside_returns_none:\n return None\n else:\n return self._max_index\n else:\n return reverse_map_1d(self._data, pt, self.sort_order)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n def _compute_bounds(self, data=None):\n \"\"\"Computes the minimum and maximum values of self._data.\n\n If a data array is passed in, then that is used instead of self._data.\n This behavior is useful for subclasses.\n \"\"\"\n # TODO: as an optimization, perhaps create and cache a sorted\n # version of the dataset?\n\n if data is None:\n data = self.get_data()\n\n data_len = len(data)\n\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (0.0, 0.0)\n elif data_len == 1:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (data[0], data[0])\n else:\n if self.sort_order == \"ascending\":\n self._min_index = 0\n self._max_index = -1\n elif self.sort_order == \"descending\":\n self._min_index = -1\n self._max_index = 0\n else:\n # ignore NaN values. This is probably a little slower,\n # but also much safer.\n\n # data might be an array of strings or objects that\n # can't have argmin calculated on them.\n try:\n # the data may be in a subclass of numpy.array, viewing\n # the data as a ndarray will remove side effects of\n # the subclasses, such as different operator behaviors\n self._min_index = bounded_nanargmin(data.view(ndarray))\n self._max_index = bounded_nanargmax(data.view(ndarray))\n except (TypeError, IndexError, NotImplementedError):\n # For strings and objects, we punt... These show up in\n # label-ish data sources.\n self._cached_bounds = (0.0, 0.0)\n\n self._cached_bounds = (\n data[self._min_index],\n data[self._max_index],\n )\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _metadata_changed(self, event):\n self.metadata_changed = True\n\n def _metadata_items_changed(self, event):\n self.metadata_changed = True\n\n # ------------------------------------------------------------------------\n # Persistence-related methods\n # ------------------------------------------------------------------------\n\n def __getstate__(self):\n state = super().__getstate__()\n if not self.persist_data:\n state.pop(\"_data\", None)\n state.pop(\"_cached_mask\", None)\n state.pop(\"_cached_bounds\", None)\n state.pop(\"_min_index\", None)\n state.pop(\"_max_index\", None)\n return state\n\n def _post_load(self):\n super()._post_load()\n self._cached_bounds = ()\n self._cached_mask = None\n", "path": "chaco/array_data_source.py"}], "after_files": [{"content": "\"\"\" Defines the ArrayDataSource class.\"\"\"\n\n# Major library imports\nfrom numpy import array, empty, isfinite, ones, ndarray\nimport numpy as np\n\n# Enthought library imports\nfrom traits.api import Any, Constant, Int, Tuple\n\n# Chaco imports\nfrom .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait\nfrom .abstract_data_source import AbstractDataSource\n\n\ndef bounded_nanargmin(arr):\n \"\"\"Find the index of the minimum value, ignoring NaNs.\n\n If all NaNs, return 0.\n \"\"\"\n # Different versions of numpy behave differently in the all-NaN case, so we\n # catch this condition in two different ways.\n try:\n if np.issubdtype(arr.dtype, np.floating):\n min = np.nanargmin(arr)\n elif np.issubdtype(arr.dtype, np.number):\n min = np.argmin(arr)\n else:\n min = 0\n except ValueError:\n return 0\n if isfinite(min):\n return min\n else:\n return 0\n\n\ndef bounded_nanargmax(arr):\n \"\"\"Find the index of the maximum value, ignoring NaNs.\n\n If all NaNs, return -1.\n \"\"\"\n try:\n if np.issubdtype(arr.dtype, np.floating):\n max = np.nanargmax(arr)\n elif np.issubdtype(arr.dtype, np.number):\n max = np.argmax(arr)\n else:\n max = -1\n except ValueError:\n return -1\n if isfinite(max):\n return max\n else:\n return -1\n\n\nclass ArrayDataSource(AbstractDataSource):\n \"\"\"A data source representing a single, continuous array of numerical data.\n\n This class does not listen to the array for value changes; if you need that\n behavior, create a subclass that hooks up the appropriate listeners.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # AbstractDataSource traits\n # ------------------------------------------------------------------------\n\n #: The dimensionality of the indices into this data source (overrides\n #: AbstractDataSource).\n index_dimension = Constant(\"scalar\")\n\n #: The dimensionality of the value at each index point (overrides\n #: AbstractDataSource).\n value_dimension = Constant(\"scalar\")\n\n #: The sort order of the data.\n #: This is a specialized optimization for 1-D arrays, but it's an important\n #: one that's used everywhere.\n sort_order = SortOrderTrait\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The data array itself.\n _data = NumericalSequenceTrait\n\n # Cached values of min and max as long as **_data** doesn't change.\n _cached_bounds = Tuple\n\n # Not necessary, since this is not a filter, but provided for convenience.\n _cached_mask = Any\n\n # The index of the (first) minimum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _min_index = Any\n\n # The index of the (first) maximum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _max_index = Any\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def __init__(self, data=array([]), sort_order=\"none\", **kw):\n AbstractDataSource.__init__(self, **kw)\n self.set_data(data, sort_order)\n\n def set_data(self, newdata, sort_order=None):\n \"\"\"Sets the data, and optionally the sort order, for this data source.\n\n Parameters\n ----------\n newdata : array\n The data to use.\n sort_order : SortOrderTrait\n The sort order of the data\n \"\"\"\n self._data = newdata\n if sort_order is not None:\n self.sort_order = sort_order\n self._compute_bounds()\n self.data_changed = True\n\n def set_mask(self, mask):\n \"\"\"Sets the mask for this data source.\"\"\"\n self._cached_mask = mask\n self.data_changed = True\n\n def remove_mask(self):\n \"\"\"Removes the mask on this data source.\"\"\"\n self._cached_mask = None\n self.data_changed = True\n\n # ------------------------------------------------------------------------\n # AbstractDataSource interface\n # ------------------------------------------------------------------------\n\n def get_data(self):\n \"\"\"Returns the data for this data source, or 0.0 if it has no data.\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return self._data\n else:\n return empty(shape=(0,))\n\n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n if self._data is None:\n return self._data, ones(0, dtype=bool)\n else:\n return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n\n def is_masked(self):\n \"\"\"is_masked() -> bool\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is not None:\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"get_size() -> int\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return len(self._data)\n else:\n return 0\n\n def get_bounds(self):\n \"\"\"Returns the minimum and maximum values of the data source's data.\n\n Implements AbstractDataSource.\n \"\"\"\n if (\n self._cached_bounds is None\n or self._cached_bounds == ()\n or self._cached_bounds == 0.0\n ):\n self._compute_bounds()\n return self._cached_bounds\n\n def reverse_map(self, pt, index=0, outside_returns_none=True):\n \"\"\"Returns the index of *pt* in the data source.\n\n Parameters\n ----------\n pt : scalar value\n value to find\n index\n ignored for data series with 1-D indices\n outside_returns_none : Boolean\n Whether the method returns None if *pt* is outside the range of\n the data source; if False, the method returns the value of the\n bound that *pt* is outside of.\n \"\"\"\n if self.sort_order == \"none\":\n raise NotImplementedError\n\n # index is ignored for dataseries with 1-dimensional indices\n minval, maxval = self._cached_bounds\n if pt < minval:\n if outside_returns_none:\n return None\n else:\n return self._min_index\n elif pt > maxval:\n if outside_returns_none:\n return None\n else:\n return self._max_index\n else:\n return reverse_map_1d(self._data, pt, self.sort_order)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n def _compute_bounds(self, data=None):\n \"\"\"Computes the minimum and maximum values of self._data.\n\n If a data array is passed in, then that is used instead of self._data.\n This behavior is useful for subclasses.\n \"\"\"\n # TODO: as an optimization, perhaps create and cache a sorted\n # version of the dataset?\n\n if data is None:\n data = self.get_data()\n\n data_len = len(data)\n\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (0.0, 0.0)\n elif data_len == 1:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (data[0], data[0])\n else:\n if self.sort_order == \"ascending\":\n self._min_index = 0\n self._max_index = -1\n elif self.sort_order == \"descending\":\n self._min_index = -1\n self._max_index = 0\n else:\n # ignore NaN values. This is probably a little slower,\n # but also much safer.\n\n # data might be an array of strings or objects that\n # can't have argmin calculated on them.\n try:\n # the data may be in a subclass of numpy.array, viewing\n # the data as a ndarray will remove side effects of\n # the subclasses, such as different operator behaviors\n self._min_index = bounded_nanargmin(data.view(ndarray))\n self._max_index = bounded_nanargmax(data.view(ndarray))\n except (TypeError, IndexError, NotImplementedError):\n # For strings and objects, we punt... These show up in\n # label-ish data sources.\n self._cached_bounds = (0.0, 0.0)\n\n self._cached_bounds = (\n data[self._min_index],\n data[self._max_index],\n )\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _metadata_changed(self, event):\n self.metadata_changed = True\n\n def _metadata_items_changed(self, event):\n self.metadata_changed = True\n\n # ------------------------------------------------------------------------\n # Persistence-related methods\n # ------------------------------------------------------------------------\n\n def __getstate__(self):\n state = super().__getstate__()\n if not self.persist_data:\n state.pop(\"_data\", None)\n state.pop(\"_cached_mask\", None)\n state.pop(\"_cached_bounds\", None)\n state.pop(\"_min_index\", None)\n state.pop(\"_max_index\", None)\n return state\n\n def _post_load(self):\n super()._post_load()\n self._cached_bounds = ()\n self._cached_mask = None\n", "path": "chaco/array_data_source.py"}]} |
gh_patches_debug_1451 | rasdani/github-patches | git_diff | meltano__meltano-7343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature: Support arrow key text navigation during interactive config
### Feature scope
CLI (options, error messages, logging, etc.)
### Description
Currently when using interactive config, the arrow keys are interpreted as raw values, rather than as navigation controls:
Examples:
Pressing the up key to try to set the prompt to the last value entered:
```
New value: ^[[A
```
Pressing the left key repeatedly to try to add a missing quote:
```
New value: example"^[[D^[[D^[[D^[[D^[[D^[[D^[[D^[[D
```
Ideally arrow keys pressed during interactive config would result in typical text navigation behaviour.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/cli/interactive/config.py`
Content:
```
1 """Interactive configuration handler."""
2
3 from __future__ import annotations
4
5 import click
6 from jinja2 import BaseLoader, Environment
7 from rich.console import Console, Group
8 from rich.markdown import Markdown
9 from rich.panel import Panel
10 from rich.table import Table
11 from rich.text import Text
12
13 from meltano.cli.interactive.utils import InteractionStatus
14 from meltano.cli.utils import CliError
15 from meltano.core.environment_service import EnvironmentService
16 from meltano.core.project import Project
17 from meltano.core.settings_service import (
18 REDACTED_VALUE,
19 SettingKind,
20 SettingsService,
21 SettingValueStore,
22 )
23 from meltano.core.settings_store import StoreNotSupportedError
24 from meltano.core.tracking.contexts import CliEvent
25
26 PLUGIN_COLOR = "magenta"
27 ENVIRONMENT_COLOR = "orange1"
28 SETTING_COLOR = "blue1"
29 VALUE_COLOR = "green"
30
31 HOME_SCREEN_TEMPLATE = """[bold underline]Configuring [{{ plugin_color }}]{{ plugin_name.capitalize() | safe }}[/{{ plugin_color }}] {% if environment_name %}in Environment[{{ environment_color }}]{{ environment_name }}[/{{ environment_color }}] {% endif %}Interactively[/bold underline]
32
33 Following the prompts below, you will be guided through configuration of this plugin.
34
35 Meltano is responsible for managing the configuration of all of a project’s plugins.
36 It knows what settings are supported by each plugin, and how and when different types of plugins expect to be fed that configuration.
37
38 To determine the values of settings, Meltano will look in 4 main places, with each taking precedence over the next:
39
40 1. Environment variables
41 2. Your meltano.yml project file
42 3. Your project's system database
43 4. The default values set in the plugin's settings metadata
44
45 Within meltano.yml you can also associate configuration with a Meltano Environment, allowing you to define custom layers of configuration within your project.
46
47 To learn more about configuration options, see the [link=https://docs.meltano.com/guide/configuration]Meltano Configuration Guide[/link]
48
49 [bold underline]Settings[/bold underline]
50 {% for setting in settings %}
51 {{ loop.index }}. [blue]{{ setting["name"] }}[/blue]: {{ setting["description"] | safe }}
52 {%- endfor %}
53
54 {% if plugin_url %}To learn more about {{ plugin_name | safe }} and its settings, visit [link={{ plugin_url }}]{{ plugin_url }}[/link]{% endif %}
55 """
56
57
58 class InteractiveConfig: # noqa: WPS230, WPS214
59 """Manage Config interactively."""
60
61 def __init__(self, ctx, store, extras=False, max_width=None):
62 """Initialise InteractiveConfig instance."""
63 self.ctx = ctx
64 self.store = store
65 self.extras = extras
66 self.project: Project = self.ctx.obj["project"]
67 self.settings: SettingsService = self.ctx.obj["settings"]
68 self.session = self.ctx.obj["session"]
69 self.tracker = self.ctx.obj["tracker"]
70 self.environment_service = EnvironmentService(self.project)
71 self.max_width = max_width or 75 # noqa: WPS432
72 self.console = Console()
73
74 @property
75 def configurable_settings(self):
76 """Return settings available for interactive configuration."""
77 return self.settings.config_with_metadata(
78 session=self.session, extras=self.extras, redacted=True
79 )
80
81 @property
82 def setting_choices(self):
83 """Return simplified setting choices, for easy printing."""
84 setting_choices = []
85 for index, (name, config_metadata) in enumerate(
86 self.configurable_settings.items()
87 ):
88 description = config_metadata["setting"].description
89 description = "" if description is None else description
90 setting_choices.append((str(index + 1), name, description))
91 return setting_choices
92
93 def truncate(self, text: str) -> str:
94 """Truncate text."""
95 if len(text) >= self.max_width:
96 return f"{text[: self.max_width - 3]}..."
97 return text
98
99 def _print_home_screen(self):
100 """Print screen for this interactive."""
101 markdown_template = Environment(loader=BaseLoader, autoescape=True).from_string(
102 HOME_SCREEN_TEMPLATE
103 )
104 markdown_text = markdown_template.render(
105 {
106 "plugin_color": PLUGIN_COLOR,
107 "environment_color": ENVIRONMENT_COLOR,
108 "setting_color": SETTING_COLOR,
109 "plugin_name": self.settings.label,
110 "plugin_url": self.settings.docs_url,
111 "environment_name": self.project.environment.name
112 if self.project.environment
113 else None,
114 "settings": [
115 {
116 "name": name,
117 "description": self.truncate(description.replace("\n", " ")),
118 }
119 for _, name, description in self.setting_choices
120 ],
121 }
122 )
123 self.console.print(Panel(Text.from_markup(markdown_text)))
124
125 def _print_setting(self, name, config_metadata, index, last_index):
126 """Print setting."""
127 value = config_metadata["value"]
128 source = config_metadata["source"]
129 setting_def = config_metadata["setting"]
130 details = Table(show_header=False)
131 details.add_column("name", justify="right")
132 details.add_column("value")
133
134 pre = [
135 Text.from_markup(
136 f"[bold underline][{PLUGIN_COLOR}]{self.settings.label.capitalize()}[/{PLUGIN_COLOR}][/bold underline] Setting {index} of {last_index}"
137 )
138 ]
139
140 if setting_def.is_extra:
141 pre.append(
142 Text.from_markup(
143 "[yellow1]Custom Extra: plugin-specific options handled by Meltano[/yellow1]"
144 )
145 )
146
147 elif setting_def.is_custom:
148 pre.append(
149 Text.from_markup(
150 "[yellow1]Custom Setting: possibly unsupported by the plugin[/yellow1]"
151 )
152 )
153
154 details.add_row(
155 Text("Name"), Text.from_markup(f"[{SETTING_COLOR}]{name}[/{SETTING_COLOR}]")
156 )
157
158 if source is SettingValueStore.DEFAULT:
159 label = "default"
160 elif source is SettingValueStore.INHERITED:
161 label = f"inherited from '{self.settings.plugin.parent.name}'"
162 else:
163 label = f"from {source.label}"
164 expanded_value = value if value is not None else "(empty string)"
165 unexpanded_value = config_metadata.get("unexpanded_value")
166 if unexpanded_value:
167 current_value = (
168 unexpanded_value if unexpanded_value is not None else "(empty string)"
169 )
170
171 details.add_row(Text("Current Expanded Value"), Text(f"{expanded_value}"))
172 else:
173 current_value = value if value is not None else "(empty string)"
174 details.add_row(
175 Text(f"Current Value ({label})"),
176 Text.from_markup(f"[{VALUE_COLOR}]{current_value}[/{VALUE_COLOR}]"),
177 )
178
179 if setting_def.kind:
180 details.add_row(Text("Kind"), Text(f"{setting_def.kind}"))
181 if source is not SettingValueStore.DEFAULT:
182 default_value = setting_def.value
183 if default_value is not None:
184 details.add_row(Text("Default"), Text(f"{default_value!r}"))
185 env_keys = [
186 var.definition for var in self.settings.setting_env_vars(setting_def)
187 ]
188
189 details.add_row(Text("Env(s)"), Text(f"{', '.join(env_keys)}"))
190 post = []
191 if setting_def.description:
192 post.append(
193 Group(
194 Text(" Description:"),
195 Panel(Markdown(setting_def.description, justify="left")),
196 )
197 )
198
199 docs_url = self.settings.docs_url
200 if docs_url:
201 post.append(
202 Text.from_markup(
203 f" To learn more about {self.settings.label} and its settings, visit [link={docs_url}]{docs_url}[/link]"
204 )
205 )
206
207 self.console.print(Panel(Group(*pre, details, *post)))
208
209 @staticmethod
210 def _value_prompt(config_metadata):
211 if config_metadata["setting"].kind != SettingKind.OPTIONS:
212 return (
213 click.prompt(
214 "New value",
215 default="",
216 show_default=False,
217 hide_input=True,
218 confirmation_prompt=True,
219 )
220 if config_metadata["setting"].is_redacted
221 else click.prompt("New value", default="", show_default=False)
222 )
223
224 options_index = {
225 str(index + 1): value
226 for index, value in enumerate(
227 (chs["label"], chs["value"])
228 for chs in config_metadata["setting"].options
229 )
230 }
231
232 click.echo()
233 for index, value in options_index.items():
234 click.echo(f"{index}. {value[0]}")
235 click.echo()
236 chosen_index = click.prompt(
237 "Select value",
238 type=click.Choice(list(options_index.keys())),
239 show_default=False,
240 )
241 return options_index[chosen_index][1]
242
243 def configure(self, name, index=None, last_index=None, show_set_prompt=True):
244 """Configure a single setting interactively."""
245 config_metadata = next(
246 (
247 config_metadata
248 for nme, config_metadata in self.configurable_settings.items()
249 if nme == name
250 )
251 )
252 self._print_setting(
253 name=name,
254 config_metadata=config_metadata,
255 index=index,
256 last_index=last_index,
257 )
258
259 action = "y"
260 if show_set_prompt:
261 try:
262 click.echo()
263 action = click.prompt(
264 "Set this value (Y/n) or exit (e)?",
265 default="y",
266 type=click.Choice(["y", "n", "e"], case_sensitive=False),
267 )
268 except click.Abort:
269 action = "e"
270
271 if action.lower() == "y":
272 while True:
273 click.echo()
274 try:
275 new_value = self._value_prompt(config_metadata)
276 except click.Abort:
277 click.echo()
278 click.echo("Skipping...")
279 click.pause()
280 return InteractionStatus.SKIP
281
282 try:
283 click.echo()
284 self.set_value(
285 setting_name=tuple(name.split(".")),
286 value=new_value,
287 store=self.store,
288 interactive=True,
289 )
290 click.echo()
291 click.pause()
292 return InteractionStatus.SKIP
293 except Exception as e:
294 self.tracker.track_command_event(CliEvent.inflight)
295 click.secho(f"Failed to set value: {e}", fg="red")
296
297 elif action.lower() == "n":
298 return InteractionStatus.SKIP
299
300 elif action.lower() == "e":
301 return InteractionStatus.EXIT
302
303 def configure_all(self):
304 """Configure all settings."""
305 numeric_choices = [idx for idx, _, _ in self.setting_choices]
306 if not numeric_choices:
307 click.secho(
308 "There are no settings to configure. "
309 "For help, please see https://melta.no#no-plugin-settings-defined",
310 fg="yellow",
311 )
312 self.tracker.track_command_event(CliEvent.completed)
313 return
314
315 while True:
316 click.clear()
317 self._print_home_screen()
318 choices = ["all", *numeric_choices, "e"]
319
320 branch = "all"
321 try:
322 click.echo()
323 branch = click.prompt(
324 "Loop through all settings (all), select a setting by "
325 f"number ({min(int(chs) for chs in numeric_choices)} - "
326 f"{max(int(chs) for chs in numeric_choices)}), or exit (e)?",
327 type=click.Choice(choices, case_sensitive=False),
328 default="all",
329 show_choices=False,
330 )
331 except click.Abort:
332 click.echo()
333 branch = "e"
334
335 if branch == "all":
336 for index, name, _ in self.setting_choices:
337 click.clear()
338 status = InteractionStatus.START
339 while status not in {
340 InteractionStatus.SKIP,
341 InteractionStatus.EXIT,
342 }:
343 status = self.configure(
344 name=name,
345 index=index,
346 last_index=len(self.setting_choices),
347 )
348 if status == InteractionStatus.EXIT:
349 break
350 elif branch.lower() == "e":
351 self.tracker.track_command_event(CliEvent.completed)
352 click.echo()
353 return
354 else:
355 choice_name = next(
356 nme for idx, nme, _ in self.setting_choices if idx == branch
357 )
358 click.clear()
359 status = self.configure(
360 name=choice_name,
361 index=branch,
362 last_index=len(self.setting_choices),
363 show_set_prompt=False,
364 )
365
366 def set_value(self, setting_name, value, store, interactive=False):
367 """Set value helper function."""
368 settings = self.settings
369 path = list(setting_name)
370 try:
371 value, metadata = settings.set_with_metadata(
372 path, value, store=store, session=self.session
373 )
374 except StoreNotSupportedError as err:
375 if interactive:
376 self.tracker.track_command_event(CliEvent.inflight)
377 else:
378 self.tracker.track_command_event(CliEvent.aborted)
379 raise CliError(
380 f"{settings.label.capitalize()} setting '{path}' could not be set in {store.label}: {err}"
381 ) from err
382
383 name = metadata["name"]
384 store = metadata["store"]
385 is_redacted = metadata["setting"] and metadata["setting"].is_redacted
386 if is_redacted:
387 value = REDACTED_VALUE
388 click.secho(
389 f"{settings.label.capitalize()} setting '{name}' was set in {store.label}: {value!r}",
390 fg=VALUE_COLOR,
391 )
392
393 current_value, source = settings.get_with_source(name, session=self.session)
394 if source != store:
395 if is_redacted:
396 current_value = REDACTED_VALUE
397 click.secho(
398 f"Current value is still: {current_value!r} (from {source.label})",
399 fg="yellow",
400 )
401
402 if interactive:
403 self.tracker.track_command_event(CliEvent.inflight)
404 else:
405 self.tracker.track_command_event(CliEvent.completed)
406
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/cli/interactive/config.py b/src/meltano/cli/interactive/config.py
--- a/src/meltano/cli/interactive/config.py
+++ b/src/meltano/cli/interactive/config.py
@@ -2,6 +2,14 @@
from __future__ import annotations
+from contextlib import suppress
+
+# NOTE: Importing the readline module enables the use of arrow
+# keys for text navigation during interactive config.
+# Refer to https://docs.python.org/3/library/readline.html
+with suppress(ImportError):
+ import readline # noqa: F401
+
import click
from jinja2 import BaseLoader, Environment
from rich.console import Console, Group
| {"golden_diff": "diff --git a/src/meltano/cli/interactive/config.py b/src/meltano/cli/interactive/config.py\n--- a/src/meltano/cli/interactive/config.py\n+++ b/src/meltano/cli/interactive/config.py\n@@ -2,6 +2,14 @@\n \n from __future__ import annotations\n \n+from contextlib import suppress\n+\n+# NOTE: Importing the readline module enables the use of arrow\n+# keys for text navigation during interactive config.\n+# Refer to https://docs.python.org/3/library/readline.html\n+with suppress(ImportError):\n+ import readline # noqa: F401\n+\n import click\n from jinja2 import BaseLoader, Environment\n from rich.console import Console, Group\n", "issue": "feature: Support arrow key text navigation during interactive config\n### Feature scope\n\nCLI (options, error messages, logging, etc.)\n\n### Description\n\nCurrently when using interactive config, the arrow keys are interpreted as raw values, rather than as navigation controls:\r\n\r\nExamples:\r\n\r\nPressing the up key to try to set the prompt to the last value entered:\r\n\r\n```\r\nNew value: ^[[A\r\n```\r\n\r\nPressing the left key repeatedly to try to add a missing quote:\r\n\r\n```\r\nNew value: example\"^[[D^[[D^[[D^[[D^[[D^[[D^[[D^[[D\r\n```\r\n\r\nIdeally arrow keys pressed during interactive config would result in typical text navigation behaviour.\n", "before_files": [{"content": "\"\"\"Interactive configuration handler.\"\"\"\n\nfrom __future__ import annotations\n\nimport click\nfrom jinja2 import BaseLoader, Environment\nfrom rich.console import Console, Group\nfrom rich.markdown import Markdown\nfrom rich.panel import Panel\nfrom rich.table import Table\nfrom rich.text import Text\n\nfrom meltano.cli.interactive.utils import InteractionStatus\nfrom meltano.cli.utils import CliError\nfrom meltano.core.environment_service import EnvironmentService\nfrom meltano.core.project import Project\nfrom meltano.core.settings_service import (\n REDACTED_VALUE,\n SettingKind,\n SettingsService,\n SettingValueStore,\n)\nfrom meltano.core.settings_store import StoreNotSupportedError\nfrom meltano.core.tracking.contexts import CliEvent\n\nPLUGIN_COLOR = \"magenta\"\nENVIRONMENT_COLOR = \"orange1\"\nSETTING_COLOR = \"blue1\"\nVALUE_COLOR = \"green\"\n\nHOME_SCREEN_TEMPLATE = \"\"\"[bold underline]Configuring [{{ plugin_color }}]{{ plugin_name.capitalize() | safe }}[/{{ plugin_color }}] {% if environment_name %}in Environment[{{ environment_color }}]{{ environment_name }}[/{{ environment_color }}] {% endif %}Interactively[/bold underline]\n\nFollowing the prompts below, you will be guided through configuration of this plugin.\n\nMeltano is responsible for managing the configuration of all of a project\u2019s plugins.\nIt knows what settings are supported by each plugin, and how and when different types of plugins expect to be fed that configuration.\n\nTo determine the values of settings, Meltano will look in 4 main places, with each taking precedence over the next:\n\n 1. Environment variables\n 2. Your meltano.yml project file\n 3. Your project's system database\n 4. The default values set in the plugin's settings metadata\n\nWithin meltano.yml you can also associate configuration with a Meltano Environment, allowing you to define custom layers of configuration within your project.\n\nTo learn more about configuration options, see the [link=https://docs.meltano.com/guide/configuration]Meltano Configuration Guide[/link]\n\n[bold underline]Settings[/bold underline]\n{% for setting in settings %}\n{{ loop.index }}. [blue]{{ setting[\"name\"] }}[/blue]: {{ setting[\"description\"] | safe }}\n{%- endfor %}\n\n{% if plugin_url %}To learn more about {{ plugin_name | safe }} and its settings, visit [link={{ plugin_url }}]{{ plugin_url }}[/link]{% endif %}\n\"\"\"\n\n\nclass InteractiveConfig: # noqa: WPS230, WPS214\n \"\"\"Manage Config interactively.\"\"\"\n\n def __init__(self, ctx, store, extras=False, max_width=None):\n \"\"\"Initialise InteractiveConfig instance.\"\"\"\n self.ctx = ctx\n self.store = store\n self.extras = extras\n self.project: Project = self.ctx.obj[\"project\"]\n self.settings: SettingsService = self.ctx.obj[\"settings\"]\n self.session = self.ctx.obj[\"session\"]\n self.tracker = self.ctx.obj[\"tracker\"]\n self.environment_service = EnvironmentService(self.project)\n self.max_width = max_width or 75 # noqa: WPS432\n self.console = Console()\n\n @property\n def configurable_settings(self):\n \"\"\"Return settings available for interactive configuration.\"\"\"\n return self.settings.config_with_metadata(\n session=self.session, extras=self.extras, redacted=True\n )\n\n @property\n def setting_choices(self):\n \"\"\"Return simplified setting choices, for easy printing.\"\"\"\n setting_choices = []\n for index, (name, config_metadata) in enumerate(\n self.configurable_settings.items()\n ):\n description = config_metadata[\"setting\"].description\n description = \"\" if description is None else description\n setting_choices.append((str(index + 1), name, description))\n return setting_choices\n\n def truncate(self, text: str) -> str:\n \"\"\"Truncate text.\"\"\"\n if len(text) >= self.max_width:\n return f\"{text[: self.max_width - 3]}...\"\n return text\n\n def _print_home_screen(self):\n \"\"\"Print screen for this interactive.\"\"\"\n markdown_template = Environment(loader=BaseLoader, autoescape=True).from_string(\n HOME_SCREEN_TEMPLATE\n )\n markdown_text = markdown_template.render(\n {\n \"plugin_color\": PLUGIN_COLOR,\n \"environment_color\": ENVIRONMENT_COLOR,\n \"setting_color\": SETTING_COLOR,\n \"plugin_name\": self.settings.label,\n \"plugin_url\": self.settings.docs_url,\n \"environment_name\": self.project.environment.name\n if self.project.environment\n else None,\n \"settings\": [\n {\n \"name\": name,\n \"description\": self.truncate(description.replace(\"\\n\", \" \")),\n }\n for _, name, description in self.setting_choices\n ],\n }\n )\n self.console.print(Panel(Text.from_markup(markdown_text)))\n\n def _print_setting(self, name, config_metadata, index, last_index):\n \"\"\"Print setting.\"\"\"\n value = config_metadata[\"value\"]\n source = config_metadata[\"source\"]\n setting_def = config_metadata[\"setting\"]\n details = Table(show_header=False)\n details.add_column(\"name\", justify=\"right\")\n details.add_column(\"value\")\n\n pre = [\n Text.from_markup(\n f\"[bold underline][{PLUGIN_COLOR}]{self.settings.label.capitalize()}[/{PLUGIN_COLOR}][/bold underline] Setting {index} of {last_index}\"\n )\n ]\n\n if setting_def.is_extra:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Extra: plugin-specific options handled by Meltano[/yellow1]\"\n )\n )\n\n elif setting_def.is_custom:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Setting: possibly unsupported by the plugin[/yellow1]\"\n )\n )\n\n details.add_row(\n Text(\"Name\"), Text.from_markup(f\"[{SETTING_COLOR}]{name}[/{SETTING_COLOR}]\")\n )\n\n if source is SettingValueStore.DEFAULT:\n label = \"default\"\n elif source is SettingValueStore.INHERITED:\n label = f\"inherited from '{self.settings.plugin.parent.name}'\"\n else:\n label = f\"from {source.label}\"\n expanded_value = value if value is not None else \"(empty string)\"\n unexpanded_value = config_metadata.get(\"unexpanded_value\")\n if unexpanded_value:\n current_value = (\n unexpanded_value if unexpanded_value is not None else \"(empty string)\"\n )\n\n details.add_row(Text(\"Current Expanded Value\"), Text(f\"{expanded_value}\"))\n else:\n current_value = value if value is not None else \"(empty string)\"\n details.add_row(\n Text(f\"Current Value ({label})\"),\n Text.from_markup(f\"[{VALUE_COLOR}]{current_value}[/{VALUE_COLOR}]\"),\n )\n\n if setting_def.kind:\n details.add_row(Text(\"Kind\"), Text(f\"{setting_def.kind}\"))\n if source is not SettingValueStore.DEFAULT:\n default_value = setting_def.value\n if default_value is not None:\n details.add_row(Text(\"Default\"), Text(f\"{default_value!r}\"))\n env_keys = [\n var.definition for var in self.settings.setting_env_vars(setting_def)\n ]\n\n details.add_row(Text(\"Env(s)\"), Text(f\"{', '.join(env_keys)}\"))\n post = []\n if setting_def.description:\n post.append(\n Group(\n Text(\" Description:\"),\n Panel(Markdown(setting_def.description, justify=\"left\")),\n )\n )\n\n docs_url = self.settings.docs_url\n if docs_url:\n post.append(\n Text.from_markup(\n f\" To learn more about {self.settings.label} and its settings, visit [link={docs_url}]{docs_url}[/link]\"\n )\n )\n\n self.console.print(Panel(Group(*pre, details, *post)))\n\n @staticmethod\n def _value_prompt(config_metadata):\n if config_metadata[\"setting\"].kind != SettingKind.OPTIONS:\n return (\n click.prompt(\n \"New value\",\n default=\"\",\n show_default=False,\n hide_input=True,\n confirmation_prompt=True,\n )\n if config_metadata[\"setting\"].is_redacted\n else click.prompt(\"New value\", default=\"\", show_default=False)\n )\n\n options_index = {\n str(index + 1): value\n for index, value in enumerate(\n (chs[\"label\"], chs[\"value\"])\n for chs in config_metadata[\"setting\"].options\n )\n }\n\n click.echo()\n for index, value in options_index.items():\n click.echo(f\"{index}. {value[0]}\")\n click.echo()\n chosen_index = click.prompt(\n \"Select value\",\n type=click.Choice(list(options_index.keys())),\n show_default=False,\n )\n return options_index[chosen_index][1]\n\n def configure(self, name, index=None, last_index=None, show_set_prompt=True):\n \"\"\"Configure a single setting interactively.\"\"\"\n config_metadata = next(\n (\n config_metadata\n for nme, config_metadata in self.configurable_settings.items()\n if nme == name\n )\n )\n self._print_setting(\n name=name,\n config_metadata=config_metadata,\n index=index,\n last_index=last_index,\n )\n\n action = \"y\"\n if show_set_prompt:\n try:\n click.echo()\n action = click.prompt(\n \"Set this value (Y/n) or exit (e)?\",\n default=\"y\",\n type=click.Choice([\"y\", \"n\", \"e\"], case_sensitive=False),\n )\n except click.Abort:\n action = \"e\"\n\n if action.lower() == \"y\":\n while True:\n click.echo()\n try:\n new_value = self._value_prompt(config_metadata)\n except click.Abort:\n click.echo()\n click.echo(\"Skipping...\")\n click.pause()\n return InteractionStatus.SKIP\n\n try:\n click.echo()\n self.set_value(\n setting_name=tuple(name.split(\".\")),\n value=new_value,\n store=self.store,\n interactive=True,\n )\n click.echo()\n click.pause()\n return InteractionStatus.SKIP\n except Exception as e:\n self.tracker.track_command_event(CliEvent.inflight)\n click.secho(f\"Failed to set value: {e}\", fg=\"red\")\n\n elif action.lower() == \"n\":\n return InteractionStatus.SKIP\n\n elif action.lower() == \"e\":\n return InteractionStatus.EXIT\n\n def configure_all(self):\n \"\"\"Configure all settings.\"\"\"\n numeric_choices = [idx for idx, _, _ in self.setting_choices]\n if not numeric_choices:\n click.secho(\n \"There are no settings to configure. \"\n \"For help, please see https://melta.no#no-plugin-settings-defined\",\n fg=\"yellow\",\n )\n self.tracker.track_command_event(CliEvent.completed)\n return\n\n while True:\n click.clear()\n self._print_home_screen()\n choices = [\"all\", *numeric_choices, \"e\"]\n\n branch = \"all\"\n try:\n click.echo()\n branch = click.prompt(\n \"Loop through all settings (all), select a setting by \"\n f\"number ({min(int(chs) for chs in numeric_choices)} - \"\n f\"{max(int(chs) for chs in numeric_choices)}), or exit (e)?\",\n type=click.Choice(choices, case_sensitive=False),\n default=\"all\",\n show_choices=False,\n )\n except click.Abort:\n click.echo()\n branch = \"e\"\n\n if branch == \"all\":\n for index, name, _ in self.setting_choices:\n click.clear()\n status = InteractionStatus.START\n while status not in {\n InteractionStatus.SKIP,\n InteractionStatus.EXIT,\n }:\n status = self.configure(\n name=name,\n index=index,\n last_index=len(self.setting_choices),\n )\n if status == InteractionStatus.EXIT:\n break\n elif branch.lower() == \"e\":\n self.tracker.track_command_event(CliEvent.completed)\n click.echo()\n return\n else:\n choice_name = next(\n nme for idx, nme, _ in self.setting_choices if idx == branch\n )\n click.clear()\n status = self.configure(\n name=choice_name,\n index=branch,\n last_index=len(self.setting_choices),\n show_set_prompt=False,\n )\n\n def set_value(self, setting_name, value, store, interactive=False):\n \"\"\"Set value helper function.\"\"\"\n settings = self.settings\n path = list(setting_name)\n try:\n value, metadata = settings.set_with_metadata(\n path, value, store=store, session=self.session\n )\n except StoreNotSupportedError as err:\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.aborted)\n raise CliError(\n f\"{settings.label.capitalize()} setting '{path}' could not be set in {store.label}: {err}\"\n ) from err\n\n name = metadata[\"name\"]\n store = metadata[\"store\"]\n is_redacted = metadata[\"setting\"] and metadata[\"setting\"].is_redacted\n if is_redacted:\n value = REDACTED_VALUE\n click.secho(\n f\"{settings.label.capitalize()} setting '{name}' was set in {store.label}: {value!r}\",\n fg=VALUE_COLOR,\n )\n\n current_value, source = settings.get_with_source(name, session=self.session)\n if source != store:\n if is_redacted:\n current_value = REDACTED_VALUE\n click.secho(\n f\"Current value is still: {current_value!r} (from {source.label})\",\n fg=\"yellow\",\n )\n\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.completed)\n", "path": "src/meltano/cli/interactive/config.py"}], "after_files": [{"content": "\"\"\"Interactive configuration handler.\"\"\"\n\nfrom __future__ import annotations\n\nfrom contextlib import suppress\n\n# NOTE: Importing the readline module enables the use of arrow\n# keys for text navigation during interactive config.\n# Refer to https://docs.python.org/3/library/readline.html\nwith suppress(ImportError):\n import readline # noqa: F401\n\nimport click\nfrom jinja2 import BaseLoader, Environment\nfrom rich.console import Console, Group\nfrom rich.markdown import Markdown\nfrom rich.panel import Panel\nfrom rich.table import Table\nfrom rich.text import Text\n\nfrom meltano.cli.interactive.utils import InteractionStatus\nfrom meltano.cli.utils import CliError\nfrom meltano.core.environment_service import EnvironmentService\nfrom meltano.core.project import Project\nfrom meltano.core.settings_service import (\n REDACTED_VALUE,\n SettingKind,\n SettingsService,\n SettingValueStore,\n)\nfrom meltano.core.settings_store import StoreNotSupportedError\nfrom meltano.core.tracking.contexts import CliEvent\n\nPLUGIN_COLOR = \"magenta\"\nENVIRONMENT_COLOR = \"orange1\"\nSETTING_COLOR = \"blue1\"\nVALUE_COLOR = \"green\"\n\nHOME_SCREEN_TEMPLATE = \"\"\"[bold underline]Configuring [{{ plugin_color }}]{{ plugin_name.capitalize() | safe }}[/{{ plugin_color }}] {% if environment_name %}in Environment[{{ environment_color }}]{{ environment_name }}[/{{ environment_color }}] {% endif %}Interactively[/bold underline]\n\nFollowing the prompts below, you will be guided through configuration of this plugin.\n\nMeltano is responsible for managing the configuration of all of a project\u2019s plugins.\nIt knows what settings are supported by each plugin, and how and when different types of plugins expect to be fed that configuration.\n\nTo determine the values of settings, Meltano will look in 4 main places, with each taking precedence over the next:\n\n 1. Environment variables\n 2. Your meltano.yml project file\n 3. Your project's system database\n 4. The default values set in the plugin's settings metadata\n\nWithin meltano.yml you can also associate configuration with a Meltano Environment, allowing you to define custom layers of configuration within your project.\n\nTo learn more about configuration options, see the [link=https://docs.meltano.com/guide/configuration]Meltano Configuration Guide[/link]\n\n[bold underline]Settings[/bold underline]\n{% for setting in settings %}\n{{ loop.index }}. [blue]{{ setting[\"name\"] }}[/blue]: {{ setting[\"description\"] | safe }}\n{%- endfor %}\n\n{% if plugin_url %}To learn more about {{ plugin_name | safe }} and its settings, visit [link={{ plugin_url }}]{{ plugin_url }}[/link]{% endif %}\n\"\"\"\n\n\nclass InteractiveConfig: # noqa: WPS230, WPS214\n \"\"\"Manage Config interactively.\"\"\"\n\n def __init__(self, ctx, store, extras=False, max_width=None):\n \"\"\"Initialise InteractiveConfig instance.\"\"\"\n self.ctx = ctx\n self.store = store\n self.extras = extras\n self.project: Project = self.ctx.obj[\"project\"]\n self.settings: SettingsService = self.ctx.obj[\"settings\"]\n self.session = self.ctx.obj[\"session\"]\n self.tracker = self.ctx.obj[\"tracker\"]\n self.environment_service = EnvironmentService(self.project)\n self.max_width = max_width or 75 # noqa: WPS432\n self.console = Console()\n\n @property\n def configurable_settings(self):\n \"\"\"Return settings available for interactive configuration.\"\"\"\n return self.settings.config_with_metadata(\n session=self.session, extras=self.extras, redacted=True\n )\n\n @property\n def setting_choices(self):\n \"\"\"Return simplified setting choices, for easy printing.\"\"\"\n setting_choices = []\n for index, (name, config_metadata) in enumerate(\n self.configurable_settings.items()\n ):\n description = config_metadata[\"setting\"].description\n description = \"\" if description is None else description\n setting_choices.append((str(index + 1), name, description))\n return setting_choices\n\n def truncate(self, text: str) -> str:\n \"\"\"Truncate text.\"\"\"\n if len(text) >= self.max_width:\n return f\"{text[: self.max_width - 3]}...\"\n return text\n\n def _print_home_screen(self):\n \"\"\"Print screen for this interactive.\"\"\"\n markdown_template = Environment(loader=BaseLoader, autoescape=True).from_string(\n HOME_SCREEN_TEMPLATE\n )\n markdown_text = markdown_template.render(\n {\n \"plugin_color\": PLUGIN_COLOR,\n \"environment_color\": ENVIRONMENT_COLOR,\n \"setting_color\": SETTING_COLOR,\n \"plugin_name\": self.settings.label,\n \"plugin_url\": self.settings.docs_url,\n \"environment_name\": self.project.environment.name\n if self.project.environment\n else None,\n \"settings\": [\n {\n \"name\": name,\n \"description\": self.truncate(description.replace(\"\\n\", \" \")),\n }\n for _, name, description in self.setting_choices\n ],\n }\n )\n self.console.print(Panel(Text.from_markup(markdown_text)))\n\n def _print_setting(self, name, config_metadata, index, last_index):\n \"\"\"Print setting.\"\"\"\n value = config_metadata[\"value\"]\n source = config_metadata[\"source\"]\n setting_def = config_metadata[\"setting\"]\n details = Table(show_header=False)\n details.add_column(\"name\", justify=\"right\")\n details.add_column(\"value\")\n\n pre = [\n Text.from_markup(\n f\"[bold underline][{PLUGIN_COLOR}]{self.settings.label.capitalize()}[/{PLUGIN_COLOR}][/bold underline] Setting {index} of {last_index}\"\n )\n ]\n\n if setting_def.is_extra:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Extra: plugin-specific options handled by Meltano[/yellow1]\"\n )\n )\n\n elif setting_def.is_custom:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Setting: possibly unsupported by the plugin[/yellow1]\"\n )\n )\n\n details.add_row(\n Text(\"Name\"), Text.from_markup(f\"[{SETTING_COLOR}]{name}[/{SETTING_COLOR}]\")\n )\n\n if source is SettingValueStore.DEFAULT:\n label = \"default\"\n elif source is SettingValueStore.INHERITED:\n label = f\"inherited from '{self.settings.plugin.parent.name}'\"\n else:\n label = f\"from {source.label}\"\n expanded_value = value if value is not None else \"(empty string)\"\n unexpanded_value = config_metadata.get(\"unexpanded_value\")\n if unexpanded_value:\n current_value = (\n unexpanded_value if unexpanded_value is not None else \"(empty string)\"\n )\n\n details.add_row(Text(\"Current Expanded Value\"), Text(f\"{expanded_value}\"))\n else:\n current_value = value if value is not None else \"(empty string)\"\n details.add_row(\n Text(f\"Current Value ({label})\"),\n Text.from_markup(f\"[{VALUE_COLOR}]{current_value}[/{VALUE_COLOR}]\"),\n )\n\n if setting_def.kind:\n details.add_row(Text(\"Kind\"), Text(f\"{setting_def.kind}\"))\n if source is not SettingValueStore.DEFAULT:\n default_value = setting_def.value\n if default_value is not None:\n details.add_row(Text(\"Default\"), Text(f\"{default_value!r}\"))\n env_keys = [\n var.definition for var in self.settings.setting_env_vars(setting_def)\n ]\n\n details.add_row(Text(\"Env(s)\"), Text(f\"{', '.join(env_keys)}\"))\n post = []\n if setting_def.description:\n post.append(\n Group(\n Text(\" Description:\"),\n Panel(Markdown(setting_def.description, justify=\"left\")),\n )\n )\n\n docs_url = self.settings.docs_url\n if docs_url:\n post.append(\n Text.from_markup(\n f\" To learn more about {self.settings.label} and its settings, visit [link={docs_url}]{docs_url}[/link]\"\n )\n )\n\n self.console.print(Panel(Group(*pre, details, *post)))\n\n @staticmethod\n def _value_prompt(config_metadata):\n if config_metadata[\"setting\"].kind != SettingKind.OPTIONS:\n return (\n click.prompt(\n \"New value\",\n default=\"\",\n show_default=False,\n hide_input=True,\n confirmation_prompt=True,\n )\n if config_metadata[\"setting\"].is_redacted\n else click.prompt(\"New value\", default=\"\", show_default=False)\n )\n\n options_index = {\n str(index + 1): value\n for index, value in enumerate(\n (chs[\"label\"], chs[\"value\"])\n for chs in config_metadata[\"setting\"].options\n )\n }\n\n click.echo()\n for index, value in options_index.items():\n click.echo(f\"{index}. {value[0]}\")\n click.echo()\n chosen_index = click.prompt(\n \"Select value\",\n type=click.Choice(list(options_index.keys())),\n show_default=False,\n )\n return options_index[chosen_index][1]\n\n def configure(self, name, index=None, last_index=None, show_set_prompt=True):\n \"\"\"Configure a single setting interactively.\"\"\"\n config_metadata = next(\n (\n config_metadata\n for nme, config_metadata in self.configurable_settings.items()\n if nme == name\n )\n )\n self._print_setting(\n name=name,\n config_metadata=config_metadata,\n index=index,\n last_index=last_index,\n )\n\n action = \"y\"\n if show_set_prompt:\n try:\n click.echo()\n action = click.prompt(\n \"Set this value (Y/n) or exit (e)?\",\n default=\"y\",\n type=click.Choice([\"y\", \"n\", \"e\"], case_sensitive=False),\n )\n except click.Abort:\n action = \"e\"\n\n if action.lower() == \"y\":\n while True:\n click.echo()\n try:\n new_value = self._value_prompt(config_metadata)\n except click.Abort:\n click.echo()\n click.echo(\"Skipping...\")\n click.pause()\n return InteractionStatus.SKIP\n\n try:\n click.echo()\n self.set_value(\n setting_name=tuple(name.split(\".\")),\n value=new_value,\n store=self.store,\n interactive=True,\n )\n click.echo()\n click.pause()\n return InteractionStatus.SKIP\n except Exception as e:\n self.tracker.track_command_event(CliEvent.inflight)\n click.secho(f\"Failed to set value: {e}\", fg=\"red\")\n\n elif action.lower() == \"n\":\n return InteractionStatus.SKIP\n\n elif action.lower() == \"e\":\n return InteractionStatus.EXIT\n\n def configure_all(self):\n \"\"\"Configure all settings.\"\"\"\n numeric_choices = [idx for idx, _, _ in self.setting_choices]\n if not numeric_choices:\n click.secho(\n \"There are no settings to configure. \"\n \"For help, please see https://melta.no#no-plugin-settings-defined\",\n fg=\"yellow\",\n )\n self.tracker.track_command_event(CliEvent.completed)\n return\n\n while True:\n click.clear()\n self._print_home_screen()\n choices = [\"all\", *numeric_choices, \"e\"]\n\n branch = \"all\"\n try:\n click.echo()\n branch = click.prompt(\n \"Loop through all settings (all), select a setting by \"\n f\"number ({min(int(chs) for chs in numeric_choices)} - \"\n f\"{max(int(chs) for chs in numeric_choices)}), or exit (e)?\",\n type=click.Choice(choices, case_sensitive=False),\n default=\"all\",\n show_choices=False,\n )\n except click.Abort:\n click.echo()\n branch = \"e\"\n\n if branch == \"all\":\n for index, name, _ in self.setting_choices:\n click.clear()\n status = InteractionStatus.START\n while status not in {\n InteractionStatus.SKIP,\n InteractionStatus.EXIT,\n }:\n status = self.configure(\n name=name,\n index=index,\n last_index=len(self.setting_choices),\n )\n if status == InteractionStatus.EXIT:\n break\n elif branch.lower() == \"e\":\n self.tracker.track_command_event(CliEvent.completed)\n click.echo()\n return\n else:\n choice_name = next(\n nme for idx, nme, _ in self.setting_choices if idx == branch\n )\n click.clear()\n status = self.configure(\n name=choice_name,\n index=branch,\n last_index=len(self.setting_choices),\n show_set_prompt=False,\n )\n\n def set_value(self, setting_name, value, store, interactive=False):\n \"\"\"Set value helper function.\"\"\"\n settings = self.settings\n path = list(setting_name)\n try:\n value, metadata = settings.set_with_metadata(\n path, value, store=store, session=self.session\n )\n except StoreNotSupportedError as err:\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.aborted)\n raise CliError(\n f\"{settings.label.capitalize()} setting '{path}' could not be set in {store.label}: {err}\"\n ) from err\n\n name = metadata[\"name\"]\n store = metadata[\"store\"]\n is_redacted = metadata[\"setting\"] and metadata[\"setting\"].is_redacted\n if is_redacted:\n value = REDACTED_VALUE\n click.secho(\n f\"{settings.label.capitalize()} setting '{name}' was set in {store.label}: {value!r}\",\n fg=VALUE_COLOR,\n )\n\n current_value, source = settings.get_with_source(name, session=self.session)\n if source != store:\n if is_redacted:\n current_value = REDACTED_VALUE\n click.secho(\n f\"Current value is still: {current_value!r} (from {source.label})\",\n fg=\"yellow\",\n )\n\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.completed)\n", "path": "src/meltano/cli/interactive/config.py"}]} |
gh_patches_debug_1452 | rasdani/github-patches | git_diff | nipy__nipype-3634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: add STC partial volume correction to PETPVC interface
### Summary
Partial Volume Correction using Single-target correction (STC) has been added to PETPVC since the Nipype PETPVC interface was created, and it would therefore be ideal if this could be added to the interface as well.
### Actual behavior
The interface should include the 'STC' option for the 'pvc' flag.
### Expected behavior
### How to replicate the behavior
### Script/Workflow details
Please put URL to code or code here (if not too long).
### Platform details:
<!-- Please run the following code from your shell and place the output between the triple ticks, below.
python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())"
-->
```
```
### Execution environment
Choose one
- Container [Tag: ???]
- My python environment inside container [Base Tag: ???]
- My python environment outside container
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nipype/interfaces/petpvc.py`
Content:
```
1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
2 # vi: set ft=python sts=4 ts=4 sw=4 et:
3 """PETPVC is a toolbox for partial volume correction in positron emission tomography."""
4 import os
5
6 from .base import (
7 TraitedSpec,
8 CommandLineInputSpec,
9 CommandLine,
10 File,
11 isdefined,
12 traits,
13 )
14 from ..utils.filemanip import fname_presuffix
15 from ..external.due import BibTeX
16
17 pvc_methods = [
18 "GTM",
19 "IY",
20 "IY+RL",
21 "IY+VC",
22 "LABBE",
23 "LABBE+MTC",
24 "LABBE+MTC+RL",
25 "LABBE+MTC+VC",
26 "LABBE+RBV",
27 "LABBE+RBV+RL",
28 "LABBE+RBV+VC",
29 "MG",
30 "MG+RL",
31 "MG+VC",
32 "MTC",
33 "MTC+RL",
34 "MTC+VC",
35 "RBV",
36 "RBV+RL",
37 "RBV+VC",
38 "RL",
39 "VC",
40 ]
41
42
43 class PETPVCInputSpec(CommandLineInputSpec):
44 in_file = File(desc="PET image file", exists=True, mandatory=True, argstr="-i %s")
45 out_file = File(desc="Output file", genfile=True, hash_files=False, argstr="-o %s")
46 mask_file = File(
47 desc="Mask image file", exists=True, mandatory=True, argstr="-m %s"
48 )
49 pvc = traits.Enum(
50 pvc_methods,
51 mandatory=True,
52 argstr="-p %s",
53 desc="""\
54 Desired PVC method:
55
56 * Geometric transfer matrix -- ``GTM``
57 * Labbe approach -- ``LABBE``
58 * Richardson-Lucy -- ``RL``
59 * Van-Cittert -- ``VC``
60 * Region-based voxel-wise correction -- ``RBV``
61 * RBV with Labbe -- ``LABBE+RBV``
62 * RBV with Van-Cittert -- ``RBV+VC``
63 * RBV with Richardson-Lucy -- ``RBV+RL``
64 * RBV with Labbe and Van-Cittert -- ``LABBE+RBV+VC``
65 * RBV with Labbe and Richardson-Lucy -- ``LABBE+RBV+RL``
66 * Multi-target correction -- ``MTC``
67 * MTC with Labbe -- ``LABBE+MTC``
68 * MTC with Van-Cittert -- ``MTC+VC``
69 * MTC with Richardson-Lucy -- ``MTC+RL``
70 * MTC with Labbe and Van-Cittert -- ``LABBE+MTC+VC``
71 * MTC with Labbe and Richardson-Lucy -- ``LABBE+MTC+RL``
72 * Iterative Yang -- ``IY``
73 * Iterative Yang with Van-Cittert -- ``IY+VC``
74 * Iterative Yang with Richardson-Lucy -- ``IY+RL``
75 * Muller Gartner -- ``MG``
76 * Muller Gartner with Van-Cittert -- ``MG+VC``
77 * Muller Gartner with Richardson-Lucy -- ``MG+RL``
78
79 """,
80 )
81 fwhm_x = traits.Float(
82 desc="The full-width at half maximum in mm along x-axis",
83 mandatory=True,
84 argstr="-x %.4f",
85 )
86 fwhm_y = traits.Float(
87 desc="The full-width at half maximum in mm along y-axis",
88 mandatory=True,
89 argstr="-y %.4f",
90 )
91 fwhm_z = traits.Float(
92 desc="The full-width at half maximum in mm along z-axis",
93 mandatory=True,
94 argstr="-z %.4f",
95 )
96 debug = traits.Bool(
97 desc="Prints debug information",
98 usedefault=True,
99 default_value=False,
100 argstr="-d",
101 )
102 n_iter = traits.Int(
103 desc="Number of iterations", default_value=10, usedefault=True, argstr="-n %d"
104 )
105 n_deconv = traits.Int(
106 desc="Number of deconvolution iterations",
107 default_value=10,
108 usedefault=True,
109 argstr="-k %d",
110 )
111 alpha = traits.Float(
112 desc="Alpha value", default_value=1.5, usedefault=True, argstr="-a %.4f"
113 )
114 stop_crit = traits.Float(
115 desc="Stopping criterion", default_value=0.01, usedefault=True, argstr="-s %.4f"
116 )
117
118
119 class PETPVCOutputSpec(TraitedSpec):
120 out_file = File(desc="Output file")
121
122
123 class PETPVC(CommandLine):
124 """Use PETPVC for partial volume correction of PET images.
125
126 PETPVC ([1]_, [2]_) is a software from the Nuclear Medicine Department
127 of the UCL University Hospital, London, UK.
128
129 Examples
130 --------
131 >>> from ..testing import example_data
132 >>> #TODO get data for PETPVC
133 >>> pvc = PETPVC()
134 >>> pvc.inputs.in_file = 'pet.nii.gz'
135 >>> pvc.inputs.mask_file = 'tissues.nii.gz'
136 >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz'
137 >>> pvc.inputs.pvc = 'RBV'
138 >>> pvc.inputs.fwhm_x = 2.0
139 >>> pvc.inputs.fwhm_y = 2.0
140 >>> pvc.inputs.fwhm_z = 2.0
141 >>> outs = pvc.run() #doctest: +SKIP
142
143 References
144 ----------
145 .. [1] K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton,
146 "A review of partial volume correction techniques for emission tomography
147 and their applications in neurology, cardiology and oncology," Phys. Med.
148 Biol., vol. 57, no. 21, p. R119, 2012.
149 .. [2] https://github.com/UCL/PETPVC
150
151 """
152
153 input_spec = PETPVCInputSpec
154 output_spec = PETPVCOutputSpec
155 _cmd = "petpvc"
156
157 _references = [
158 {
159 "entry": BibTeX(
160 "@article{0031-9155-61-22-7975,"
161 "author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and "
162 "Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson},"
163 "title={PETPVC: a toolbox for performing partial volume correction "
164 "techniques in positron emission tomography},"
165 "journal={Physics in Medicine and Biology},"
166 "volume={61},"
167 "number={22},"
168 "pages={7975},"
169 "url={http://stacks.iop.org/0031-9155/61/i=22/a=7975},"
170 "doi={https://doi.org/10.1088/0031-9155/61/22/7975},"
171 "year={2016},"
172 "}"
173 ),
174 "description": "PETPVC software implementation publication",
175 "tags": ["implementation"],
176 }
177 ]
178
179 def _list_outputs(self):
180 outputs = self.output_spec().get()
181 outputs["out_file"] = self.inputs.out_file
182 if not isdefined(outputs["out_file"]):
183 method_name = self.inputs.pvc.lower()
184 outputs["out_file"] = self._gen_fname(
185 self.inputs.in_file, suffix=f"_{method_name}_pvc"
186 )
187
188 outputs["out_file"] = os.path.abspath(outputs["out_file"])
189 return outputs
190
191 def _gen_fname(
192 self, basename, cwd=None, suffix=None, change_ext=True, ext=".nii.gz"
193 ):
194 """Generate a filename based on the given parameters.
195
196 The filename will take the form: cwd/basename<suffix><ext>.
197 If change_ext is True, it will use the extensions specified in
198 <instance>inputs.output_type.
199
200 Parameters
201 ----------
202 basename : str
203 Filename to base the new filename on.
204 cwd : str
205 Path to prefix to the new filename. (default is os.getcwd())
206 suffix : str
207 Suffix to add to the `basename`. (defaults is '' )
208 change_ext : bool
209 Flag to change the filename extension to the given `ext`.
210 (Default is False)
211
212 Returns
213 -------
214 fname : str
215 New filename based on given parameters.
216
217 """
218 if basename == "":
219 msg = "Unable to generate filename for command %s. " % self.cmd
220 msg += "basename is not set!"
221 raise ValueError(msg)
222 if cwd is None:
223 cwd = os.getcwd()
224 if change_ext:
225 if suffix:
226 suffix = "".join((suffix, ext))
227 else:
228 suffix = ext
229 if suffix is None:
230 suffix = ""
231 fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)
232 return fname
233
234 def _gen_filename(self, name):
235 if name == "out_file":
236 return self._list_outputs()["out_file"]
237 return None
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py
--- a/nipype/interfaces/petpvc.py
+++ b/nipype/interfaces/petpvc.py
@@ -37,6 +37,7 @@
"RBV+VC",
"RL",
"VC",
+ "STC",
]
@@ -75,6 +76,7 @@
* Muller Gartner -- ``MG``
* Muller Gartner with Van-Cittert -- ``MG+VC``
* Muller Gartner with Richardson-Lucy -- ``MG+RL``
+ * Single-target correction -- ``STC``
""",
)
| {"golden_diff": "diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py\n--- a/nipype/interfaces/petpvc.py\n+++ b/nipype/interfaces/petpvc.py\n@@ -37,6 +37,7 @@\n \"RBV+VC\",\n \"RL\",\n \"VC\",\n+ \"STC\",\n ]\n \n \n@@ -75,6 +76,7 @@\n * Muller Gartner -- ``MG``\n * Muller Gartner with Van-Cittert -- ``MG+VC``\n * Muller Gartner with Richardson-Lucy -- ``MG+RL``\n+ * Single-target correction -- ``STC``\n \n \"\"\",\n )\n", "issue": "ENH: add STC partial volume correction to PETPVC interface\n### Summary\r\nPartial Volume Correction using Single-target correction (STC) has been added to PETPVC since the Nipype PETPVC interface was created, and it would therefore be ideal if this could be added to the interface as well.\r\n\r\n### Actual behavior\r\nThe interface should include the 'STC' option for the 'pvc' flag.\r\n\r\n### Expected behavior\r\n\r\n### How to replicate the behavior\r\n\r\n### Script/Workflow details\r\n\r\nPlease put URL to code or code here (if not too long).\r\n\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n\r\n```\r\n\r\n### Execution environment\r\n\r\nChoose one\r\n- Container [Tag: ???]\r\n- My python environment inside container [Base Tag: ???]\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"PETPVC is a toolbox for partial volume correction in positron emission tomography.\"\"\"\nimport os\n\nfrom .base import (\n TraitedSpec,\n CommandLineInputSpec,\n CommandLine,\n File,\n isdefined,\n traits,\n)\nfrom ..utils.filemanip import fname_presuffix\nfrom ..external.due import BibTeX\n\npvc_methods = [\n \"GTM\",\n \"IY\",\n \"IY+RL\",\n \"IY+VC\",\n \"LABBE\",\n \"LABBE+MTC\",\n \"LABBE+MTC+RL\",\n \"LABBE+MTC+VC\",\n \"LABBE+RBV\",\n \"LABBE+RBV+RL\",\n \"LABBE+RBV+VC\",\n \"MG\",\n \"MG+RL\",\n \"MG+VC\",\n \"MTC\",\n \"MTC+RL\",\n \"MTC+VC\",\n \"RBV\",\n \"RBV+RL\",\n \"RBV+VC\",\n \"RL\",\n \"VC\",\n]\n\n\nclass PETPVCInputSpec(CommandLineInputSpec):\n in_file = File(desc=\"PET image file\", exists=True, mandatory=True, argstr=\"-i %s\")\n out_file = File(desc=\"Output file\", genfile=True, hash_files=False, argstr=\"-o %s\")\n mask_file = File(\n desc=\"Mask image file\", exists=True, mandatory=True, argstr=\"-m %s\"\n )\n pvc = traits.Enum(\n pvc_methods,\n mandatory=True,\n argstr=\"-p %s\",\n desc=\"\"\"\\\nDesired PVC method:\n\n * Geometric transfer matrix -- ``GTM``\n * Labbe approach -- ``LABBE``\n * Richardson-Lucy -- ``RL``\n * Van-Cittert -- ``VC``\n * Region-based voxel-wise correction -- ``RBV``\n * RBV with Labbe -- ``LABBE+RBV``\n * RBV with Van-Cittert -- ``RBV+VC``\n * RBV with Richardson-Lucy -- ``RBV+RL``\n * RBV with Labbe and Van-Cittert -- ``LABBE+RBV+VC``\n * RBV with Labbe and Richardson-Lucy -- ``LABBE+RBV+RL``\n * Multi-target correction -- ``MTC``\n * MTC with Labbe -- ``LABBE+MTC``\n * MTC with Van-Cittert -- ``MTC+VC``\n * MTC with Richardson-Lucy -- ``MTC+RL``\n * MTC with Labbe and Van-Cittert -- ``LABBE+MTC+VC``\n * MTC with Labbe and Richardson-Lucy -- ``LABBE+MTC+RL``\n * Iterative Yang -- ``IY``\n * Iterative Yang with Van-Cittert -- ``IY+VC``\n * Iterative Yang with Richardson-Lucy -- ``IY+RL``\n * Muller Gartner -- ``MG``\n * Muller Gartner with Van-Cittert -- ``MG+VC``\n * Muller Gartner with Richardson-Lucy -- ``MG+RL``\n\n\"\"\",\n )\n fwhm_x = traits.Float(\n desc=\"The full-width at half maximum in mm along x-axis\",\n mandatory=True,\n argstr=\"-x %.4f\",\n )\n fwhm_y = traits.Float(\n desc=\"The full-width at half maximum in mm along y-axis\",\n mandatory=True,\n argstr=\"-y %.4f\",\n )\n fwhm_z = traits.Float(\n desc=\"The full-width at half maximum in mm along z-axis\",\n mandatory=True,\n argstr=\"-z %.4f\",\n )\n debug = traits.Bool(\n desc=\"Prints debug information\",\n usedefault=True,\n default_value=False,\n argstr=\"-d\",\n )\n n_iter = traits.Int(\n desc=\"Number of iterations\", default_value=10, usedefault=True, argstr=\"-n %d\"\n )\n n_deconv = traits.Int(\n desc=\"Number of deconvolution iterations\",\n default_value=10,\n usedefault=True,\n argstr=\"-k %d\",\n )\n alpha = traits.Float(\n desc=\"Alpha value\", default_value=1.5, usedefault=True, argstr=\"-a %.4f\"\n )\n stop_crit = traits.Float(\n desc=\"Stopping criterion\", default_value=0.01, usedefault=True, argstr=\"-s %.4f\"\n )\n\n\nclass PETPVCOutputSpec(TraitedSpec):\n out_file = File(desc=\"Output file\")\n\n\nclass PETPVC(CommandLine):\n \"\"\"Use PETPVC for partial volume correction of PET images.\n\n PETPVC ([1]_, [2]_) is a software from the Nuclear Medicine Department\n of the UCL University Hospital, London, UK.\n\n Examples\n --------\n >>> from ..testing import example_data\n >>> #TODO get data for PETPVC\n >>> pvc = PETPVC()\n >>> pvc.inputs.in_file = 'pet.nii.gz'\n >>> pvc.inputs.mask_file = 'tissues.nii.gz'\n >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz'\n >>> pvc.inputs.pvc = 'RBV'\n >>> pvc.inputs.fwhm_x = 2.0\n >>> pvc.inputs.fwhm_y = 2.0\n >>> pvc.inputs.fwhm_z = 2.0\n >>> outs = pvc.run() #doctest: +SKIP\n\n References\n ----------\n .. [1] K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton,\n \"A review of partial volume correction techniques for emission tomography\n and their applications in neurology, cardiology and oncology,\" Phys. Med.\n Biol., vol. 57, no. 21, p. R119, 2012.\n .. [2] https://github.com/UCL/PETPVC\n\n \"\"\"\n\n input_spec = PETPVCInputSpec\n output_spec = PETPVCOutputSpec\n _cmd = \"petpvc\"\n\n _references = [\n {\n \"entry\": BibTeX(\n \"@article{0031-9155-61-22-7975,\"\n \"author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and \"\n \"Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson},\"\n \"title={PETPVC: a toolbox for performing partial volume correction \"\n \"techniques in positron emission tomography},\"\n \"journal={Physics in Medicine and Biology},\"\n \"volume={61},\"\n \"number={22},\"\n \"pages={7975},\"\n \"url={http://stacks.iop.org/0031-9155/61/i=22/a=7975},\"\n \"doi={https://doi.org/10.1088/0031-9155/61/22/7975},\"\n \"year={2016},\"\n \"}\"\n ),\n \"description\": \"PETPVC software implementation publication\",\n \"tags\": [\"implementation\"],\n }\n ]\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n if not isdefined(outputs[\"out_file\"]):\n method_name = self.inputs.pvc.lower()\n outputs[\"out_file\"] = self._gen_fname(\n self.inputs.in_file, suffix=f\"_{method_name}_pvc\"\n )\n\n outputs[\"out_file\"] = os.path.abspath(outputs[\"out_file\"])\n return outputs\n\n def _gen_fname(\n self, basename, cwd=None, suffix=None, change_ext=True, ext=\".nii.gz\"\n ):\n \"\"\"Generate a filename based on the given parameters.\n\n The filename will take the form: cwd/basename<suffix><ext>.\n If change_ext is True, it will use the extensions specified in\n <instance>inputs.output_type.\n\n Parameters\n ----------\n basename : str\n Filename to base the new filename on.\n cwd : str\n Path to prefix to the new filename. (default is os.getcwd())\n suffix : str\n Suffix to add to the `basename`. (defaults is '' )\n change_ext : bool\n Flag to change the filename extension to the given `ext`.\n (Default is False)\n\n Returns\n -------\n fname : str\n New filename based on given parameters.\n\n \"\"\"\n if basename == \"\":\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = \"\".join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n", "path": "nipype/interfaces/petpvc.py"}], "after_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"PETPVC is a toolbox for partial volume correction in positron emission tomography.\"\"\"\nimport os\n\nfrom .base import (\n TraitedSpec,\n CommandLineInputSpec,\n CommandLine,\n File,\n isdefined,\n traits,\n)\nfrom ..utils.filemanip import fname_presuffix\nfrom ..external.due import BibTeX\n\npvc_methods = [\n \"GTM\",\n \"IY\",\n \"IY+RL\",\n \"IY+VC\",\n \"LABBE\",\n \"LABBE+MTC\",\n \"LABBE+MTC+RL\",\n \"LABBE+MTC+VC\",\n \"LABBE+RBV\",\n \"LABBE+RBV+RL\",\n \"LABBE+RBV+VC\",\n \"MG\",\n \"MG+RL\",\n \"MG+VC\",\n \"MTC\",\n \"MTC+RL\",\n \"MTC+VC\",\n \"RBV\",\n \"RBV+RL\",\n \"RBV+VC\",\n \"RL\",\n \"VC\",\n \"STC\",\n]\n\n\nclass PETPVCInputSpec(CommandLineInputSpec):\n in_file = File(desc=\"PET image file\", exists=True, mandatory=True, argstr=\"-i %s\")\n out_file = File(desc=\"Output file\", genfile=True, hash_files=False, argstr=\"-o %s\")\n mask_file = File(\n desc=\"Mask image file\", exists=True, mandatory=True, argstr=\"-m %s\"\n )\n pvc = traits.Enum(\n pvc_methods,\n mandatory=True,\n argstr=\"-p %s\",\n desc=\"\"\"\\\nDesired PVC method:\n\n * Geometric transfer matrix -- ``GTM``\n * Labbe approach -- ``LABBE``\n * Richardson-Lucy -- ``RL``\n * Van-Cittert -- ``VC``\n * Region-based voxel-wise correction -- ``RBV``\n * RBV with Labbe -- ``LABBE+RBV``\n * RBV with Van-Cittert -- ``RBV+VC``\n * RBV with Richardson-Lucy -- ``RBV+RL``\n * RBV with Labbe and Van-Cittert -- ``LABBE+RBV+VC``\n * RBV with Labbe and Richardson-Lucy -- ``LABBE+RBV+RL``\n * Multi-target correction -- ``MTC``\n * MTC with Labbe -- ``LABBE+MTC``\n * MTC with Van-Cittert -- ``MTC+VC``\n * MTC with Richardson-Lucy -- ``MTC+RL``\n * MTC with Labbe and Van-Cittert -- ``LABBE+MTC+VC``\n * MTC with Labbe and Richardson-Lucy -- ``LABBE+MTC+RL``\n * Iterative Yang -- ``IY``\n * Iterative Yang with Van-Cittert -- ``IY+VC``\n * Iterative Yang with Richardson-Lucy -- ``IY+RL``\n * Muller Gartner -- ``MG``\n * Muller Gartner with Van-Cittert -- ``MG+VC``\n * Muller Gartner with Richardson-Lucy -- ``MG+RL``\n * Single-target correction -- ``STC``\n\n\"\"\",\n )\n fwhm_x = traits.Float(\n desc=\"The full-width at half maximum in mm along x-axis\",\n mandatory=True,\n argstr=\"-x %.4f\",\n )\n fwhm_y = traits.Float(\n desc=\"The full-width at half maximum in mm along y-axis\",\n mandatory=True,\n argstr=\"-y %.4f\",\n )\n fwhm_z = traits.Float(\n desc=\"The full-width at half maximum in mm along z-axis\",\n mandatory=True,\n argstr=\"-z %.4f\",\n )\n debug = traits.Bool(\n desc=\"Prints debug information\",\n usedefault=True,\n default_value=False,\n argstr=\"-d\",\n )\n n_iter = traits.Int(\n desc=\"Number of iterations\", default_value=10, usedefault=True, argstr=\"-n %d\"\n )\n n_deconv = traits.Int(\n desc=\"Number of deconvolution iterations\",\n default_value=10,\n usedefault=True,\n argstr=\"-k %d\",\n )\n alpha = traits.Float(\n desc=\"Alpha value\", default_value=1.5, usedefault=True, argstr=\"-a %.4f\"\n )\n stop_crit = traits.Float(\n desc=\"Stopping criterion\", default_value=0.01, usedefault=True, argstr=\"-s %.4f\"\n )\n\n\nclass PETPVCOutputSpec(TraitedSpec):\n out_file = File(desc=\"Output file\")\n\n\nclass PETPVC(CommandLine):\n \"\"\"Use PETPVC for partial volume correction of PET images.\n\n PETPVC ([1]_, [2]_) is a software from the Nuclear Medicine Department\n of the UCL University Hospital, London, UK.\n\n Examples\n --------\n >>> from ..testing import example_data\n >>> #TODO get data for PETPVC\n >>> pvc = PETPVC()\n >>> pvc.inputs.in_file = 'pet.nii.gz'\n >>> pvc.inputs.mask_file = 'tissues.nii.gz'\n >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz'\n >>> pvc.inputs.pvc = 'RBV'\n >>> pvc.inputs.fwhm_x = 2.0\n >>> pvc.inputs.fwhm_y = 2.0\n >>> pvc.inputs.fwhm_z = 2.0\n >>> outs = pvc.run() #doctest: +SKIP\n\n References\n ----------\n .. [1] K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton,\n \"A review of partial volume correction techniques for emission tomography\n and their applications in neurology, cardiology and oncology,\" Phys. Med.\n Biol., vol. 57, no. 21, p. R119, 2012.\n .. [2] https://github.com/UCL/PETPVC\n\n \"\"\"\n\n input_spec = PETPVCInputSpec\n output_spec = PETPVCOutputSpec\n _cmd = \"petpvc\"\n\n _references = [\n {\n \"entry\": BibTeX(\n \"@article{0031-9155-61-22-7975,\"\n \"author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and \"\n \"Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson},\"\n \"title={PETPVC: a toolbox for performing partial volume correction \"\n \"techniques in positron emission tomography},\"\n \"journal={Physics in Medicine and Biology},\"\n \"volume={61},\"\n \"number={22},\"\n \"pages={7975},\"\n \"url={http://stacks.iop.org/0031-9155/61/i=22/a=7975},\"\n \"doi={https://doi.org/10.1088/0031-9155/61/22/7975},\"\n \"year={2016},\"\n \"}\"\n ),\n \"description\": \"PETPVC software implementation publication\",\n \"tags\": [\"implementation\"],\n }\n ]\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n if not isdefined(outputs[\"out_file\"]):\n method_name = self.inputs.pvc.lower()\n outputs[\"out_file\"] = self._gen_fname(\n self.inputs.in_file, suffix=f\"_{method_name}_pvc\"\n )\n\n outputs[\"out_file\"] = os.path.abspath(outputs[\"out_file\"])\n return outputs\n\n def _gen_fname(\n self, basename, cwd=None, suffix=None, change_ext=True, ext=\".nii.gz\"\n ):\n \"\"\"Generate a filename based on the given parameters.\n\n The filename will take the form: cwd/basename<suffix><ext>.\n If change_ext is True, it will use the extensions specified in\n <instance>inputs.output_type.\n\n Parameters\n ----------\n basename : str\n Filename to base the new filename on.\n cwd : str\n Path to prefix to the new filename. (default is os.getcwd())\n suffix : str\n Suffix to add to the `basename`. (defaults is '' )\n change_ext : bool\n Flag to change the filename extension to the given `ext`.\n (Default is False)\n\n Returns\n -------\n fname : str\n New filename based on given parameters.\n\n \"\"\"\n if basename == \"\":\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = \"\".join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n", "path": "nipype/interfaces/petpvc.py"}]} |
gh_patches_debug_1453 | rasdani/github-patches | git_diff | Pyomo__pyomo-797 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gams writer, splitting lines with characters > 80,000
if line is > 80,000 the line is splitted at the last space within the fist 80,000 characters '(function 'split_long_line' of 'gams_writer.py' This mostly works but sometimes leads to an error if the space is followed by an '*' (multiply symbol).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/repn/plugins/gams_writer.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 #
12 # Problem Writer for GAMS Format Files
13 #
14
15 from six import StringIO, string_types, iteritems
16 from six.moves import xrange
17
18 from pyutilib.misc import PauseGC
19
20 from pyomo.core.expr import current as EXPR
21 from pyomo.core.expr.numvalue import (
22 is_fixed, value, as_numeric, native_types, native_numeric_types)
23 from pyomo.core.base import (
24 SymbolMap, ShortNameLabeler, NumericLabeler, Block, Constraint, Expression,
25 Objective, Var, Param, minimize, Suffix, SortComponents)
26 from pyomo.core.base.component import ActiveComponent
27 from pyomo.core.kernel.base import ICategorizedObject
28 from pyomo.opt import ProblemFormat
29 from pyomo.opt.base import AbstractProblemWriter, WriterFactory
30 from pyomo.repn.util import valid_expr_ctypes_minlp, \
31 valid_active_ctypes_minlp
32
33 import logging
34
35 logger = logging.getLogger('pyomo.core')
36
37 #
38 # A visitor pattern that creates a string for an expression
39 # that is compatible with the GAMS syntax.
40 #
41 class ToGamsVisitor(EXPR.ExpressionValueVisitor):
42
43 def __init__(self, smap, treechecker):
44 super(ToGamsVisitor, self).__init__()
45 self.smap = smap
46 self.treechecker = treechecker
47
48 def visit(self, node, values):
49 """ Visit nodes that have been expanded """
50 tmp = []
51 for i,val in enumerate(values):
52 arg = node._args_[i]
53
54 if arg is None:
55 tmp.append('Undefined')
56 elif arg.__class__ in native_numeric_types:
57 if arg < 0:
58 # Wrap negative values in parens to avoid double operator
59 tmp.append("(%s)" % val)
60 else:
61 tmp.append(val)
62 elif arg.__class__ in native_types:
63 tmp.append("'{0}'".format(val))
64 elif arg.is_variable_type():
65 if arg.is_fixed():
66 # bind fixed var values in parens to avoid double negatives
67 tmp.append("(%s)" % val)
68 else:
69 tmp.append(val)
70 elif (arg.is_expression_type() and
71 node._precedence() < arg._precedence()):
72 tmp.append("({0})".format(val))
73 else:
74 tmp.append(val)
75
76 if node.__class__ is EXPR.PowExpression:
77 # If the exponent is a positive integer, use the power() function.
78 # Otherwise, use the ** operator.
79 exponent = node.arg(1)
80 if (exponent.__class__ in native_numeric_types and
81 exponent == int(exponent)):
82 return "power({0}, {1})".format(tmp[0], tmp[1])
83 else:
84 return "{0} ** {1}".format(tmp[0], tmp[1])
85 else:
86 return node._to_string(tmp, None, self.smap, True)
87
88 def visiting_potential_leaf(self, node):
89 """
90 Visiting a potential leaf.
91
92 Return True if the node is not expanded.
93 """
94 if node is None:
95 return True, None
96
97 if node.__class__ in native_types:
98 return True, str(node)
99
100 if node.is_expression_type():
101 # we will descend into this, so type checking will happen later
102 if node.is_component_type():
103 self.treechecker(node)
104 return False, None
105
106 if node.is_component_type():
107 if self.ctype(node) not in valid_expr_ctypes_minlp:
108 # Make sure all components in active constraints
109 # are basic ctypes we know how to deal with.
110 raise RuntimeError(
111 "Unallowable component '%s' of type %s found in an active "
112 "constraint or objective.\nThe GAMS writer cannot export "
113 "expressions with this component type."
114 % (node.name, self.ctype(node).__name__))
115 if self.ctype(node) is not Var:
116 # For these, make sure it's on the right model. We can check
117 # Vars later since they don't disappear from the expressions
118 self.treechecker(node)
119
120 if node.is_variable_type():
121 if node.fixed:
122 return True, str(value(node))
123 label = self.smap.getSymbol(node)
124 return True, label
125
126 return True, str(value(node))
127
128 def ctype(self, comp):
129 if isinstance(comp, ICategorizedObject):
130 return comp.ctype
131 else:
132 return comp.type()
133
134
135 def expression_to_string(expr, treechecker, labeler=None, smap=None):
136 if labeler is not None:
137 if smap is None:
138 smap = SymbolMap()
139 smap.default_labeler = labeler
140 visitor = ToGamsVisitor(smap, treechecker)
141 return visitor.dfs_postorder_stack(expr)
142
143
144 class Categorizer(object):
145 """Class for representing categorized variables.
146
147 Given a list of variable names and a symbol map, categorizes the variable
148 names into the categories: binary, ints, positive and reals.
149
150 """
151
152 def __init__(self, var_list, symbol_map):
153 self.binary = []
154 self.ints = []
155 self.positive = []
156 self.reals = []
157
158 # categorize variables
159 for var in var_list:
160 v = symbol_map.getObject(var)
161 if v.is_binary():
162 self.binary.append(var)
163 elif v.is_integer():
164 if (v.has_lb() and (value(v.lb) >= 0)) and \
165 (v.has_ub() and (value(v.ub) <= 1)):
166 self.binary.append(var)
167 else:
168 self.ints.append(var)
169 elif value(v.lb) == 0:
170 self.positive.append(var)
171 else:
172 self.reals.append(var)
173
174 def __iter__(self):
175 """Iterate over all variables.
176
177 Yield a tuple containing the variables category and its name.
178 """
179 for category in ['binary', 'ints', 'positive', 'reals']:
180 var_list = getattr(self, category)
181 for var_name in var_list:
182 yield category, var_name
183
184
185 class StorageTreeChecker(object):
186 def __init__(self, model):
187 # blocks are hashable so we can use a normal set
188 self.tree = {model}
189 self.model = model
190 # add everything above the model
191 pb = self.parent_block(model)
192 while pb is not None:
193 self.tree.add(pb)
194 pb = self.parent_block(pb)
195
196 def __call__(self, comp, exception_flag=True):
197 if comp is self.model:
198 return True
199
200 # walk up tree until there are no more parents
201 seen = set()
202 pb = self.parent_block(comp)
203 while pb is not None:
204 if pb in self.tree:
205 self.tree.update(seen)
206 return True
207 seen.add(pb)
208 pb = self.parent_block(pb)
209
210 if exception_flag:
211 self.raise_error(comp)
212 else:
213 return False
214
215 def parent_block(self, comp):
216 if isinstance(comp, ICategorizedObject):
217 parent = comp.parent
218 while (parent is not None) and \
219 (not parent._is_heterogeneous_container):
220 parent = parent.parent
221 return parent
222 else:
223 return comp.parent_block()
224
225 def raise_error(self, comp):
226 raise RuntimeError(
227 "GAMS writer: found component '%s' not on same model tree.\n"
228 "All components must have the same parent model." % comp.name)
229
230
231 def split_long_line(line):
232 """
233 GAMS has an 80,000 character limit for lines, so split as many
234 times as needed so as to not have illegal lines.
235 """
236 new_lines = ''
237 while len(line) > 80000:
238 i = 80000
239 while line[i] != ' ':
240 # Walk backwards to find closest space,
241 # where it is safe to split to a new line
242 if i < 0:
243 raise RuntimeError(
244 "Found an 80,000+ character string with no spaces")
245 i -= 1
246 new_lines += line[:i] + '\n'
247 line = line[i + 1:]
248 new_lines += line
249 return new_lines
250
251
252 def _get_bound(exp):
253 if exp is None:
254 return None
255 if is_fixed(exp):
256 return value(exp)
257 raise ValueError("non-fixed bound or weight: " + str(exp))
258
259
260 @WriterFactory.register('gams', 'Generate the corresponding GAMS file')
261 class ProblemWriter_gams(AbstractProblemWriter):
262
263 def __init__(self):
264 AbstractProblemWriter.__init__(self, ProblemFormat.gams)
265
266 def __call__(self,
267 model,
268 output_filename,
269 solver_capability,
270 io_options):
271 """
272 Write a model in the GAMS modeling language format.
273
274 Keyword Arguments
275 -----------------
276 output_filename: str
277 Name of file to write GAMS model to. Optionally pass a file-like
278 stream and the model will be written to that instead.
279 io_options: dict
280 - warmstart=True
281 Warmstart by initializing model's variables to their values.
282 - symbolic_solver_labels=False
283 Use full Pyomo component names rather than
284 shortened symbols (slower, but useful for debugging).
285 - labeler=None
286 Custom labeler. Incompatible with symbolic_solver_labels.
287 - solver=None
288 If None, GAMS will use default solver for model type.
289 - mtype=None
290 Model type. If None, will chose from lp, nlp, mip, and minlp.
291 - add_options=None
292 List of additional lines to write directly
293 into model file before the solve statement.
294 For model attributes, <model name> is GAMS_MODEL.
295 - skip_trivial_constraints=False
296 Skip writing constraints whose body section is fixed.
297 - file_determinism=1
298 | How much effort do we want to put into ensuring the
299 | GAMS file is written deterministically for a Pyomo model:
300 | 0 : None
301 | 1 : sort keys of indexed components (default)
302 | 2 : sort keys AND sort names (over declaration order)
303 - put_results=None
304 Filename for optionally writing solution values and
305 marginals to (put_results).dat, and solver statuses
306 to (put_results + 'stat').dat.
307 """
308
309 # Make sure not to modify the user's dictionary,
310 # they may be reusing it outside of this call
311 io_options = dict(io_options)
312
313 # Use full Pyomo component names rather than
314 # shortened symbols (slower, but useful for debugging).
315 symbolic_solver_labels = io_options.pop("symbolic_solver_labels", False)
316
317 # Custom labeler option. Incompatible with symbolic_solver_labels.
318 labeler = io_options.pop("labeler", None)
319
320 # If None, GAMS will use default solver for model type.
321 solver = io_options.pop("solver", None)
322
323 # If None, will chose from lp, nlp, mip, and minlp.
324 mtype = io_options.pop("mtype", None)
325
326 # Lines to add before solve statement.
327 add_options = io_options.pop("add_options", None)
328
329 # Skip writing constraints whose body section is
330 # fixed (i.e., no variables)
331 skip_trivial_constraints = \
332 io_options.pop("skip_trivial_constraints", False)
333
334 # How much effort do we want to put into ensuring the
335 # GAMS file is written deterministically for a Pyomo model:
336 # 0 : None
337 # 1 : sort keys of indexed components (default)
338 # 2 : sort keys AND sort names (over declaration order)
339 file_determinism = io_options.pop("file_determinism", 1)
340 sorter_map = {0:SortComponents.unsorted,
341 1:SortComponents.deterministic,
342 2:SortComponents.sortBoth}
343 sort = sorter_map[file_determinism]
344
345 # Warmstart by initializing model's variables to their values.
346 warmstart = io_options.pop("warmstart", True)
347
348 # Filename for optionally writing solution values and marginals
349 # Set to True by GAMSSolver
350 put_results = io_options.pop("put_results", None)
351
352 if len(io_options):
353 raise ValueError(
354 "GAMS writer passed unrecognized io_options:\n\t" +
355 "\n\t".join("%s = %s"
356 % (k,v) for k,v in iteritems(io_options)))
357
358 if solver is not None and solver.upper() not in valid_solvers:
359 raise ValueError(
360 "GAMS writer passed unrecognized solver: %s" % solver)
361
362 if mtype is not None:
363 valid_mtypes = set([
364 'lp', 'qcp', 'nlp', 'dnlp', 'rmip', 'mip', 'rmiqcp', 'rminlp',
365 'miqcp', 'minlp', 'rmpec', 'mpec', 'mcp', 'cns', 'emp'])
366 if mtype.lower() not in valid_mtypes:
367 raise ValueError("GAMS writer passed unrecognized "
368 "model type: %s" % mtype)
369 if (solver is not None and
370 mtype.upper() not in valid_solvers[solver.upper()]):
371 raise ValueError("GAMS writer passed solver (%s) "
372 "unsuitable for given model type (%s)"
373 % (solver, mtype))
374
375 if output_filename is None:
376 output_filename = model.name + ".gms"
377
378 if symbolic_solver_labels and (labeler is not None):
379 raise ValueError("GAMS writer: Using both the "
380 "'symbolic_solver_labels' and 'labeler' "
381 "I/O options is forbidden")
382
383 if symbolic_solver_labels:
384 var_labeler = con_labeler = ShortNameLabeler(63, '_')
385 elif labeler is None:
386 var_labeler = NumericLabeler('x')
387 con_labeler = NumericLabeler('c')
388 else:
389 var_labeler = con_labeler = labeler
390
391 var_list = []
392
393 def var_recorder(obj):
394 ans = var_labeler(obj)
395 try:
396 if obj.is_variable_type():
397 var_list.append(ans)
398 except:
399 pass
400 return ans
401
402 def var_label(obj):
403 #if obj.is_fixed():
404 # return str(value(obj))
405 return symbolMap.getSymbol(obj, var_recorder)
406
407 symbolMap = SymbolMap(var_label)
408
409 # when sorting, there are a non-trivial number of
410 # temporary objects created. these all yield
411 # non-circular references, so disable GC - the
412 # overhead is non-trivial, and because references
413 # are non-circular, everything will be collected
414 # immediately anyway.
415 with PauseGC() as pgc:
416 try:
417 if isinstance(output_filename, string_types):
418 output_file = open(output_filename, "w")
419 else:
420 # Support passing of stream such as a StringIO
421 # on which to write the model file
422 output_file = output_filename
423 self._write_model(
424 model=model,
425 output_file=output_file,
426 solver_capability=solver_capability,
427 var_list=var_list,
428 var_label=var_label,
429 symbolMap=symbolMap,
430 con_labeler=con_labeler,
431 sort=sort,
432 skip_trivial_constraints=skip_trivial_constraints,
433 warmstart=warmstart,
434 solver=solver,
435 mtype=mtype,
436 add_options=add_options,
437 put_results=put_results
438 )
439 finally:
440 if isinstance(output_filename, string_types):
441 output_file.close()
442
443 return output_filename, symbolMap
444
445 def _write_model(self,
446 model,
447 output_file,
448 solver_capability,
449 var_list,
450 var_label,
451 symbolMap,
452 con_labeler,
453 sort,
454 skip_trivial_constraints,
455 warmstart,
456 solver,
457 mtype,
458 add_options,
459 put_results):
460 constraint_names = []
461 ConstraintIO = StringIO()
462 linear = True
463 linear_degree = set([0,1])
464
465 # Make sure there are no strange ActiveComponents. The expression
466 # walker will handle strange things in constraints later.
467 model_ctypes = model.collect_ctypes(active=True)
468 invalids = set()
469 for t in (model_ctypes - valid_active_ctypes_minlp):
470 if issubclass(t, ActiveComponent):
471 invalids.add(t)
472 if len(invalids):
473 invalids = [t.__name__ for t in invalids]
474 raise RuntimeError(
475 "Unallowable active component(s) %s.\nThe GAMS writer cannot "
476 "export models with this component type." %
477 ", ".join(invalids))
478
479 tc = StorageTreeChecker(model)
480
481 # Walk through the model and generate the constraint definition
482 # for all active constraints. Any Vars / Expressions that are
483 # encountered will be added to the var_list due to the labeler
484 # defined above.
485 for con in model.component_data_objects(Constraint,
486 active=True,
487 sort=sort):
488
489 if not con.has_lb() and not con.has_ub():
490 assert not con.equality
491 continue # non-binding, so skip
492
493 con_body = as_numeric(con.body)
494 if skip_trivial_constraints and con_body.is_fixed():
495 continue
496 if linear:
497 if con_body.polynomial_degree() not in linear_degree:
498 linear = False
499
500 cName = symbolMap.getSymbol(con, con_labeler)
501 if con.equality:
502 constraint_names.append('%s' % cName)
503 ConstraintIO.write('%s.. %s =e= %s ;\n' % (
504 constraint_names[-1],
505 expression_to_string(con_body, tc, smap=symbolMap),
506 _get_bound(con.upper)
507 ))
508 else:
509 if con.has_lb():
510 constraint_names.append('%s_lo' % cName)
511 ConstraintIO.write('%s.. %s =l= %s ;\n' % (
512 constraint_names[-1],
513 _get_bound(con.lower),
514 expression_to_string(con_body, tc, smap=symbolMap)
515 ))
516 if con.has_ub():
517 constraint_names.append('%s_hi' % cName)
518 ConstraintIO.write('%s.. %s =l= %s ;\n' % (
519 constraint_names[-1],
520 expression_to_string(con_body, tc, smap=symbolMap),
521 _get_bound(con.upper)
522 ))
523
524 obj = list(model.component_data_objects(Objective,
525 active=True,
526 sort=sort))
527 if len(obj) != 1:
528 raise RuntimeError(
529 "GAMS writer requires exactly one active objective (found %s)"
530 % (len(obj)))
531 obj = obj[0]
532 if linear:
533 if obj.expr.polynomial_degree() not in linear_degree:
534 linear = False
535 oName = symbolMap.getSymbol(obj, con_labeler)
536 constraint_names.append(oName)
537 ConstraintIO.write('%s.. GAMS_OBJECTIVE =e= %s ;\n' % (
538 oName,
539 expression_to_string(obj.expr, tc, smap=symbolMap)
540 ))
541
542 # Categorize the variables that we found
543 categorized_vars = Categorizer(var_list, symbolMap)
544
545 # Write the GAMS model
546 # $offdigit ignores extra precise digits instead of erroring
547 output_file.write("$offdigit\n\n")
548 output_file.write("EQUATIONS\n\t")
549 output_file.write("\n\t".join(constraint_names))
550 if categorized_vars.binary:
551 output_file.write(";\n\nBINARY VARIABLES\n\t")
552 output_file.write("\n\t".join(categorized_vars.binary))
553 if categorized_vars.ints:
554 output_file.write(";\n\nINTEGER VARIABLES")
555 output_file.write("\n\t")
556 output_file.write("\n\t".join(categorized_vars.ints))
557 if categorized_vars.positive:
558 output_file.write(";\n\nPOSITIVE VARIABLES\n\t")
559 output_file.write("\n\t".join(categorized_vars.positive))
560 output_file.write(";\n\nVARIABLES\n\tGAMS_OBJECTIVE\n\t")
561 output_file.write("\n\t".join(categorized_vars.reals))
562 output_file.write(";\n\n")
563
564 for line in ConstraintIO.getvalue().splitlines():
565 if len(line) > 80000:
566 line = split_long_line(line)
567 output_file.write(line + "\n")
568
569 output_file.write("\n")
570
571 warn_int_bounds = False
572 for category, var_name in categorized_vars:
573 var = symbolMap.getObject(var_name)
574 tc(var)
575 if category == 'positive':
576 if var.has_ub():
577 output_file.write("%s.up = %s;\n" %
578 (var_name, _get_bound(var.ub)))
579 elif category == 'ints':
580 if not var.has_lb():
581 warn_int_bounds = True
582 # GAMS doesn't allow -INF lower bound for ints
583 logger.warning("Lower bound for integer variable %s set "
584 "to -1.0E+100." % var.name)
585 output_file.write("%s.lo = -1.0E+100;\n" % (var_name))
586 elif value(var.lb) != 0:
587 output_file.write("%s.lo = %s;\n" %
588 (var_name, _get_bound(var.lb)))
589 if not var.has_ub():
590 warn_int_bounds = True
591 # GAMS has an option value called IntVarUp that is the
592 # default upper integer bound, which it applies if the
593 # integer's upper bound is INF. This option maxes out at
594 # 2147483647, so we can go higher by setting the bound.
595 logger.warning("Upper bound for integer variable %s set "
596 "to +1.0E+100." % var.name)
597 output_file.write("%s.up = +1.0E+100;\n" % (var_name))
598 else:
599 output_file.write("%s.up = %s;\n" %
600 (var_name, _get_bound(var.ub)))
601 elif category == 'binary':
602 if var.has_lb() and value(var.lb) != 0:
603 output_file.write("%s.lo = %s;\n" %
604 (var_name, _get_bound(var.lb)))
605 if var.has_ub() and value(var.ub) != 1:
606 output_file.write("%s.up = %s;\n" %
607 (var_name, _get_bound(var.ub)))
608 elif category == 'reals':
609 if var.has_lb():
610 output_file.write("%s.lo = %s;\n" %
611 (var_name, _get_bound(var.lb)))
612 if var.has_ub():
613 output_file.write("%s.up = %s;\n" %
614 (var_name, _get_bound(var.ub)))
615 else:
616 raise KeyError('Category %s not supported' % category)
617 if warmstart and var.value is not None:
618 output_file.write("%s.l = %s;\n" % (var_name, var.value))
619
620 if warn_int_bounds:
621 logger.warning(
622 "GAMS requires finite bounds for integer variables. 1.0E100 "
623 "is as extreme as GAMS will define, and should be enough to "
624 "appear unbounded. If the solver cannot handle this bound, "
625 "explicitly set a smaller bound on the pyomo model, or try a "
626 "different GAMS solver.")
627
628 model_name = "GAMS_MODEL"
629 output_file.write("\nMODEL %s /all/ ;\n" % model_name)
630
631 if mtype is None:
632 mtype = ('lp','nlp','mip','minlp')[
633 (0 if linear else 1) +
634 (2 if (categorized_vars.binary or categorized_vars.ints)
635 else 0)]
636
637 if solver is not None:
638 if mtype.upper() not in valid_solvers[solver.upper()]:
639 raise ValueError("GAMS writer passed solver (%s) "
640 "unsuitable for model type (%s)"
641 % (solver, mtype))
642 output_file.write("option %s=%s;\n" % (mtype, solver))
643
644 if add_options is not None:
645 output_file.write("\n* START USER ADDITIONAL OPTIONS\n")
646 for line in add_options:
647 output_file.write('\n' + line)
648 output_file.write("\n\n* END USER ADDITIONAL OPTIONS\n\n")
649
650 output_file.write(
651 "SOLVE %s USING %s %simizing GAMS_OBJECTIVE;\n\n"
652 % ( model_name,
653 mtype,
654 'min' if obj.sense == minimize else 'max'))
655
656 # Set variables to store certain statuses and attributes
657 stat_vars = ['MODELSTAT', 'SOLVESTAT', 'OBJEST', 'OBJVAL', 'NUMVAR',
658 'NUMEQU', 'NUMDVAR', 'NUMNZ', 'ETSOLVE']
659 output_file.write("Scalars MODELSTAT 'model status', "
660 "SOLVESTAT 'solve status';\n")
661 output_file.write("MODELSTAT = %s.modelstat;\n" % model_name)
662 output_file.write("SOLVESTAT = %s.solvestat;\n\n" % model_name)
663
664 output_file.write("Scalar OBJEST 'best objective', "
665 "OBJVAL 'objective value';\n")
666 output_file.write("OBJEST = %s.objest;\n" % model_name)
667 output_file.write("OBJVAL = %s.objval;\n\n" % model_name)
668
669 output_file.write("Scalar NUMVAR 'number of variables';\n")
670 output_file.write("NUMVAR = %s.numvar\n\n" % model_name)
671
672 output_file.write("Scalar NUMEQU 'number of equations';\n")
673 output_file.write("NUMEQU = %s.numequ\n\n" % model_name)
674
675 output_file.write("Scalar NUMDVAR 'number of discrete variables';\n")
676 output_file.write("NUMDVAR = %s.numdvar\n\n" % model_name)
677
678 output_file.write("Scalar NUMNZ 'number of nonzeros';\n")
679 output_file.write("NUMNZ = %s.numnz\n\n" % model_name)
680
681 output_file.write("Scalar ETSOLVE 'time to execute solve statement';\n")
682 output_file.write("ETSOLVE = %s.etsolve\n\n" % model_name)
683
684 if put_results is not None:
685 results = put_results + '.dat'
686 output_file.write("\nfile results /'%s'/;" % results)
687 output_file.write("\nresults.nd=15;")
688 output_file.write("\nresults.nw=21;")
689 output_file.write("\nput results;")
690 output_file.write("\nput 'SYMBOL : LEVEL : MARGINAL' /;")
691 for var in var_list:
692 output_file.write("\nput %s %s.l %s.m /;" % (var, var, var))
693 for con in constraint_names:
694 output_file.write("\nput %s %s.l %s.m /;" % (con, con, con))
695 output_file.write("\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l "
696 "GAMS_OBJECTIVE.m;\n")
697
698 statresults = put_results + 'stat.dat'
699 output_file.write("\nfile statresults /'%s'/;" % statresults)
700 output_file.write("\nstatresults.nd=15;")
701 output_file.write("\nstatresults.nw=21;")
702 output_file.write("\nput statresults;")
703 output_file.write("\nput 'SYMBOL : VALUE' /;")
704 for stat in stat_vars:
705 output_file.write("\nput '%s' %s /;\n" % (stat, stat))
706
707
708 valid_solvers = {
709 'ALPHAECP': {'MINLP','MIQCP'},
710 'AMPL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},
711 'ANTIGONE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
712 'BARON': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
713 'BDMLP': {'LP','MIP','RMIP'},
714 'BDMLPD': {'LP','RMIP'},
715 'BENCH': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
716 'BONMIN': {'MINLP','MIQCP'},
717 'BONMINH': {'MINLP','MIQCP'},
718 'CBC': {'LP','MIP','RMIP'},
719 'COINBONMIN': {'MINLP','MIQCP'},
720 'COINCBC': {'LP','MIP','RMIP'},
721 'COINCOUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
722 'COINIPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
723 'COINOS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
724 'COINSCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
725 'CONOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
726 'CONOPT3': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
727 'CONOPT4': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
728 'CONOPTD': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
729 'CONVERT': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
730 'CONVERTD': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},
731 'COUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
732 'CPLEX': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},
733 'CPLEXD': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},
734 'CPOPTIMIZER': {'MIP','MINLP','MIQCP'},
735 'DE': {'EMP'},
736 'DECIS': {'EMP'},
737 'DECISC': {'LP'},
738 'DECISM': {'LP'},
739 'DICOPT': {'MINLP','MIQCP'},
740 'DICOPTD': {'MINLP','MIQCP'},
741 'EXAMINER': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
742 'EXAMINER2': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
743 'GAMSCHK': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
744 'GLOMIQO': {'QCP','MIQCP','RMIQCP'},
745 'GUROBI': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},
746 'GUSS': {'LP', 'MIP', 'NLP', 'MCP', 'CNS', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},
747 'IPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
748 'IPOPTH': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
749 'JAMS': {'EMP'},
750 'KESTREL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},
751 'KNITRO': {'LP','RMIP','NLP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
752 'LGO': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},
753 'LGOD': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},
754 'LINDO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},
755 'LINDOGLOBAL': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
756 'LINGO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP'},
757 'LOCALSOLVER': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
758 'LOGMIP': {'EMP'},
759 'LS': {'LP','RMIP'},
760 'MILES': {'MCP'},
761 'MILESE': {'MCP'},
762 'MINOS': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
763 'MINOS5': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
764 'MINOS55': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
765 'MOSEK': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','QCP','MIQCP','RMIQCP'},
766 'MPECDUMP': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},
767 'MPSGE': {},
768 'MSNLP': {'NLP','DNLP','RMINLP','QCP','RMIQCP'},
769 'NLPEC': {'MCP','MPEC','RMPEC'},
770 'OQNLP': {'NLP', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},
771 'OS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
772 'OSICPLEX': {'LP','MIP','RMIP'},
773 'OSIGUROBI': {'LP','MIP','RMIP'},
774 'OSIMOSEK': {'LP','MIP','RMIP'},
775 'OSISOPLEX': {'LP','RMIP'},
776 'OSIXPRESS': {'LP','MIP','RMIP'},
777 'PATH': {'MCP','CNS'},
778 'PATHC': {'MCP','CNS'},
779 'PATHNLP': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},
780 'PYOMO': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},
781 'QUADMINOS': {'LP'},
782 'SBB': {'MINLP','MIQCP'},
783 'SCENSOLVER': {'LP','MIP','RMIP','NLP','MCP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
784 'SCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},
785 'SNOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},
786 'SOPLEX': {'LP','RMIP'},
787 'XA': {'LP','MIP','RMIP'},
788 'XPRESS': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}
789 }
790
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/repn/plugins/gams_writer.py b/pyomo/repn/plugins/gams_writer.py
--- a/pyomo/repn/plugins/gams_writer.py
+++ b/pyomo/repn/plugins/gams_writer.py
@@ -244,7 +244,9 @@
"Found an 80,000+ character string with no spaces")
i -= 1
new_lines += line[:i] + '\n'
- line = line[i + 1:]
+ # the space will be the first character in the next line,
+ # so that the line doesn't start with the comment character '*'
+ line = line[i:]
new_lines += line
return new_lines
| {"golden_diff": "diff --git a/pyomo/repn/plugins/gams_writer.py b/pyomo/repn/plugins/gams_writer.py\n--- a/pyomo/repn/plugins/gams_writer.py\n+++ b/pyomo/repn/plugins/gams_writer.py\n@@ -244,7 +244,9 @@\n \"Found an 80,000+ character string with no spaces\")\n i -= 1\n new_lines += line[:i] + '\\n'\n- line = line[i + 1:]\n+ # the space will be the first character in the next line,\n+ # so that the line doesn't start with the comment character '*'\n+ line = line[i:]\n new_lines += line\n return new_lines\n", "issue": "gams writer, splitting lines with characters > 80,000\nif line is > 80,000 the line is splitted at the last space within the fist 80,000 characters '(function 'split_long_line' of 'gams_writer.py' This mostly works but sometimes leads to an error if the space is followed by an '*' (multiply symbol). \r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n#\n# Problem Writer for GAMS Format Files\n#\n\nfrom six import StringIO, string_types, iteritems\nfrom six.moves import xrange\n\nfrom pyutilib.misc import PauseGC\n\nfrom pyomo.core.expr import current as EXPR\nfrom pyomo.core.expr.numvalue import (\n is_fixed, value, as_numeric, native_types, native_numeric_types)\nfrom pyomo.core.base import (\n SymbolMap, ShortNameLabeler, NumericLabeler, Block, Constraint, Expression,\n Objective, Var, Param, minimize, Suffix, SortComponents)\nfrom pyomo.core.base.component import ActiveComponent\nfrom pyomo.core.kernel.base import ICategorizedObject\nfrom pyomo.opt import ProblemFormat\nfrom pyomo.opt.base import AbstractProblemWriter, WriterFactory\nfrom pyomo.repn.util import valid_expr_ctypes_minlp, \\\n valid_active_ctypes_minlp\n\nimport logging\n\nlogger = logging.getLogger('pyomo.core')\n\n#\n# A visitor pattern that creates a string for an expression\n# that is compatible with the GAMS syntax.\n#\nclass ToGamsVisitor(EXPR.ExpressionValueVisitor):\n\n def __init__(self, smap, treechecker):\n super(ToGamsVisitor, self).__init__()\n self.smap = smap\n self.treechecker = treechecker\n\n def visit(self, node, values):\n \"\"\" Visit nodes that have been expanded \"\"\"\n tmp = []\n for i,val in enumerate(values):\n arg = node._args_[i]\n\n if arg is None:\n tmp.append('Undefined')\n elif arg.__class__ in native_numeric_types:\n if arg < 0:\n # Wrap negative values in parens to avoid double operator\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif arg.__class__ in native_types:\n tmp.append(\"'{0}'\".format(val))\n elif arg.is_variable_type():\n if arg.is_fixed():\n # bind fixed var values in parens to avoid double negatives\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif (arg.is_expression_type() and\n node._precedence() < arg._precedence()):\n tmp.append(\"({0})\".format(val))\n else:\n tmp.append(val)\n\n if node.__class__ is EXPR.PowExpression:\n # If the exponent is a positive integer, use the power() function.\n # Otherwise, use the ** operator.\n exponent = node.arg(1)\n if (exponent.__class__ in native_numeric_types and\n exponent == int(exponent)):\n return \"power({0}, {1})\".format(tmp[0], tmp[1])\n else:\n return \"{0} ** {1}\".format(tmp[0], tmp[1])\n else:\n return node._to_string(tmp, None, self.smap, True)\n\n def visiting_potential_leaf(self, node):\n \"\"\"\n Visiting a potential leaf.\n\n Return True if the node is not expanded.\n \"\"\"\n if node is None:\n return True, None\n\n if node.__class__ in native_types:\n return True, str(node)\n\n if node.is_expression_type():\n # we will descend into this, so type checking will happen later\n if node.is_component_type():\n self.treechecker(node)\n return False, None\n\n if node.is_component_type():\n if self.ctype(node) not in valid_expr_ctypes_minlp:\n # Make sure all components in active constraints\n # are basic ctypes we know how to deal with.\n raise RuntimeError(\n \"Unallowable component '%s' of type %s found in an active \"\n \"constraint or objective.\\nThe GAMS writer cannot export \"\n \"expressions with this component type.\"\n % (node.name, self.ctype(node).__name__))\n if self.ctype(node) is not Var:\n # For these, make sure it's on the right model. We can check\n # Vars later since they don't disappear from the expressions\n self.treechecker(node)\n\n if node.is_variable_type():\n if node.fixed:\n return True, str(value(node))\n label = self.smap.getSymbol(node)\n return True, label\n\n return True, str(value(node))\n\n def ctype(self, comp):\n if isinstance(comp, ICategorizedObject):\n return comp.ctype\n else:\n return comp.type()\n\n\ndef expression_to_string(expr, treechecker, labeler=None, smap=None):\n if labeler is not None:\n if smap is None:\n smap = SymbolMap()\n smap.default_labeler = labeler\n visitor = ToGamsVisitor(smap, treechecker)\n return visitor.dfs_postorder_stack(expr)\n\n\nclass Categorizer(object):\n \"\"\"Class for representing categorized variables.\n\n Given a list of variable names and a symbol map, categorizes the variable\n names into the categories: binary, ints, positive and reals.\n\n \"\"\"\n\n def __init__(self, var_list, symbol_map):\n self.binary = []\n self.ints = []\n self.positive = []\n self.reals = []\n\n # categorize variables\n for var in var_list:\n v = symbol_map.getObject(var)\n if v.is_binary():\n self.binary.append(var)\n elif v.is_integer():\n if (v.has_lb() and (value(v.lb) >= 0)) and \\\n (v.has_ub() and (value(v.ub) <= 1)):\n self.binary.append(var)\n else:\n self.ints.append(var)\n elif value(v.lb) == 0:\n self.positive.append(var)\n else:\n self.reals.append(var)\n\n def __iter__(self):\n \"\"\"Iterate over all variables.\n\n Yield a tuple containing the variables category and its name.\n \"\"\"\n for category in ['binary', 'ints', 'positive', 'reals']:\n var_list = getattr(self, category)\n for var_name in var_list:\n yield category, var_name\n\n\nclass StorageTreeChecker(object):\n def __init__(self, model):\n # blocks are hashable so we can use a normal set\n self.tree = {model}\n self.model = model\n # add everything above the model\n pb = self.parent_block(model)\n while pb is not None:\n self.tree.add(pb)\n pb = self.parent_block(pb)\n\n def __call__(self, comp, exception_flag=True):\n if comp is self.model:\n return True\n\n # walk up tree until there are no more parents\n seen = set()\n pb = self.parent_block(comp)\n while pb is not None:\n if pb in self.tree:\n self.tree.update(seen)\n return True\n seen.add(pb)\n pb = self.parent_block(pb)\n\n if exception_flag:\n self.raise_error(comp)\n else:\n return False\n\n def parent_block(self, comp):\n if isinstance(comp, ICategorizedObject):\n parent = comp.parent\n while (parent is not None) and \\\n (not parent._is_heterogeneous_container):\n parent = parent.parent\n return parent\n else:\n return comp.parent_block()\n\n def raise_error(self, comp):\n raise RuntimeError(\n \"GAMS writer: found component '%s' not on same model tree.\\n\"\n \"All components must have the same parent model.\" % comp.name)\n\n\ndef split_long_line(line):\n \"\"\"\n GAMS has an 80,000 character limit for lines, so split as many\n times as needed so as to not have illegal lines.\n \"\"\"\n new_lines = ''\n while len(line) > 80000:\n i = 80000\n while line[i] != ' ':\n # Walk backwards to find closest space,\n # where it is safe to split to a new line\n if i < 0:\n raise RuntimeError(\n \"Found an 80,000+ character string with no spaces\")\n i -= 1\n new_lines += line[:i] + '\\n'\n line = line[i + 1:]\n new_lines += line\n return new_lines\n\n\ndef _get_bound(exp):\n if exp is None:\n return None\n if is_fixed(exp):\n return value(exp)\n raise ValueError(\"non-fixed bound or weight: \" + str(exp))\n\n\[email protected]('gams', 'Generate the corresponding GAMS file')\nclass ProblemWriter_gams(AbstractProblemWriter):\n\n def __init__(self):\n AbstractProblemWriter.__init__(self, ProblemFormat.gams)\n\n def __call__(self,\n model,\n output_filename,\n solver_capability,\n io_options):\n \"\"\"\n Write a model in the GAMS modeling language format.\n\n Keyword Arguments\n -----------------\n output_filename: str\n Name of file to write GAMS model to. Optionally pass a file-like\n stream and the model will be written to that instead.\n io_options: dict\n - warmstart=True\n Warmstart by initializing model's variables to their values.\n - symbolic_solver_labels=False\n Use full Pyomo component names rather than\n shortened symbols (slower, but useful for debugging).\n - labeler=None\n Custom labeler. Incompatible with symbolic_solver_labels.\n - solver=None\n If None, GAMS will use default solver for model type.\n - mtype=None\n Model type. If None, will chose from lp, nlp, mip, and minlp.\n - add_options=None\n List of additional lines to write directly\n into model file before the solve statement.\n For model attributes, <model name> is GAMS_MODEL.\n - skip_trivial_constraints=False\n Skip writing constraints whose body section is fixed.\n - file_determinism=1\n | How much effort do we want to put into ensuring the\n | GAMS file is written deterministically for a Pyomo model:\n | 0 : None\n | 1 : sort keys of indexed components (default)\n | 2 : sort keys AND sort names (over declaration order)\n - put_results=None\n Filename for optionally writing solution values and\n marginals to (put_results).dat, and solver statuses\n to (put_results + 'stat').dat.\n \"\"\"\n\n # Make sure not to modify the user's dictionary,\n # they may be reusing it outside of this call\n io_options = dict(io_options)\n\n # Use full Pyomo component names rather than\n # shortened symbols (slower, but useful for debugging).\n symbolic_solver_labels = io_options.pop(\"symbolic_solver_labels\", False)\n\n # Custom labeler option. Incompatible with symbolic_solver_labels.\n labeler = io_options.pop(\"labeler\", None)\n\n # If None, GAMS will use default solver for model type.\n solver = io_options.pop(\"solver\", None)\n\n # If None, will chose from lp, nlp, mip, and minlp.\n mtype = io_options.pop(\"mtype\", None)\n\n # Lines to add before solve statement.\n add_options = io_options.pop(\"add_options\", None)\n\n # Skip writing constraints whose body section is\n # fixed (i.e., no variables)\n skip_trivial_constraints = \\\n io_options.pop(\"skip_trivial_constraints\", False)\n\n # How much effort do we want to put into ensuring the\n # GAMS file is written deterministically for a Pyomo model:\n # 0 : None\n # 1 : sort keys of indexed components (default)\n # 2 : sort keys AND sort names (over declaration order)\n file_determinism = io_options.pop(\"file_determinism\", 1)\n sorter_map = {0:SortComponents.unsorted,\n 1:SortComponents.deterministic,\n 2:SortComponents.sortBoth}\n sort = sorter_map[file_determinism]\n\n # Warmstart by initializing model's variables to their values.\n warmstart = io_options.pop(\"warmstart\", True)\n\n # Filename for optionally writing solution values and marginals\n # Set to True by GAMSSolver\n put_results = io_options.pop(\"put_results\", None)\n\n if len(io_options):\n raise ValueError(\n \"GAMS writer passed unrecognized io_options:\\n\\t\" +\n \"\\n\\t\".join(\"%s = %s\"\n % (k,v) for k,v in iteritems(io_options)))\n\n if solver is not None and solver.upper() not in valid_solvers:\n raise ValueError(\n \"GAMS writer passed unrecognized solver: %s\" % solver)\n\n if mtype is not None:\n valid_mtypes = set([\n 'lp', 'qcp', 'nlp', 'dnlp', 'rmip', 'mip', 'rmiqcp', 'rminlp',\n 'miqcp', 'minlp', 'rmpec', 'mpec', 'mcp', 'cns', 'emp'])\n if mtype.lower() not in valid_mtypes:\n raise ValueError(\"GAMS writer passed unrecognized \"\n \"model type: %s\" % mtype)\n if (solver is not None and\n mtype.upper() not in valid_solvers[solver.upper()]):\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for given model type (%s)\"\n % (solver, mtype))\n\n if output_filename is None:\n output_filename = model.name + \".gms\"\n\n if symbolic_solver_labels and (labeler is not None):\n raise ValueError(\"GAMS writer: Using both the \"\n \"'symbolic_solver_labels' and 'labeler' \"\n \"I/O options is forbidden\")\n\n if symbolic_solver_labels:\n var_labeler = con_labeler = ShortNameLabeler(63, '_')\n elif labeler is None:\n var_labeler = NumericLabeler('x')\n con_labeler = NumericLabeler('c')\n else:\n var_labeler = con_labeler = labeler\n\n var_list = []\n\n def var_recorder(obj):\n ans = var_labeler(obj)\n try:\n if obj.is_variable_type():\n var_list.append(ans)\n except:\n pass\n return ans\n\n def var_label(obj):\n #if obj.is_fixed():\n # return str(value(obj))\n return symbolMap.getSymbol(obj, var_recorder)\n\n symbolMap = SymbolMap(var_label)\n\n # when sorting, there are a non-trivial number of\n # temporary objects created. these all yield\n # non-circular references, so disable GC - the\n # overhead is non-trivial, and because references\n # are non-circular, everything will be collected\n # immediately anyway.\n with PauseGC() as pgc:\n try:\n if isinstance(output_filename, string_types):\n output_file = open(output_filename, \"w\")\n else:\n # Support passing of stream such as a StringIO\n # on which to write the model file\n output_file = output_filename\n self._write_model(\n model=model,\n output_file=output_file,\n solver_capability=solver_capability,\n var_list=var_list,\n var_label=var_label,\n symbolMap=symbolMap,\n con_labeler=con_labeler,\n sort=sort,\n skip_trivial_constraints=skip_trivial_constraints,\n warmstart=warmstart,\n solver=solver,\n mtype=mtype,\n add_options=add_options,\n put_results=put_results\n )\n finally:\n if isinstance(output_filename, string_types):\n output_file.close()\n\n return output_filename, symbolMap\n\n def _write_model(self,\n model,\n output_file,\n solver_capability,\n var_list,\n var_label,\n symbolMap,\n con_labeler,\n sort,\n skip_trivial_constraints,\n warmstart,\n solver,\n mtype,\n add_options,\n put_results):\n constraint_names = []\n ConstraintIO = StringIO()\n linear = True\n linear_degree = set([0,1])\n\n # Make sure there are no strange ActiveComponents. The expression\n # walker will handle strange things in constraints later.\n model_ctypes = model.collect_ctypes(active=True)\n invalids = set()\n for t in (model_ctypes - valid_active_ctypes_minlp):\n if issubclass(t, ActiveComponent):\n invalids.add(t)\n if len(invalids):\n invalids = [t.__name__ for t in invalids]\n raise RuntimeError(\n \"Unallowable active component(s) %s.\\nThe GAMS writer cannot \"\n \"export models with this component type.\" %\n \", \".join(invalids))\n\n tc = StorageTreeChecker(model)\n\n # Walk through the model and generate the constraint definition\n # for all active constraints. Any Vars / Expressions that are\n # encountered will be added to the var_list due to the labeler\n # defined above.\n for con in model.component_data_objects(Constraint,\n active=True,\n sort=sort):\n\n if not con.has_lb() and not con.has_ub():\n assert not con.equality\n continue # non-binding, so skip\n\n con_body = as_numeric(con.body)\n if skip_trivial_constraints and con_body.is_fixed():\n continue\n if linear:\n if con_body.polynomial_degree() not in linear_degree:\n linear = False\n\n cName = symbolMap.getSymbol(con, con_labeler)\n if con.equality:\n constraint_names.append('%s' % cName)\n ConstraintIO.write('%s.. %s =e= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n else:\n if con.has_lb():\n constraint_names.append('%s_lo' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n _get_bound(con.lower),\n expression_to_string(con_body, tc, smap=symbolMap)\n ))\n if con.has_ub():\n constraint_names.append('%s_hi' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n\n obj = list(model.component_data_objects(Objective,\n active=True,\n sort=sort))\n if len(obj) != 1:\n raise RuntimeError(\n \"GAMS writer requires exactly one active objective (found %s)\"\n % (len(obj)))\n obj = obj[0]\n if linear:\n if obj.expr.polynomial_degree() not in linear_degree:\n linear = False\n oName = symbolMap.getSymbol(obj, con_labeler)\n constraint_names.append(oName)\n ConstraintIO.write('%s.. GAMS_OBJECTIVE =e= %s ;\\n' % (\n oName,\n expression_to_string(obj.expr, tc, smap=symbolMap)\n ))\n\n # Categorize the variables that we found\n categorized_vars = Categorizer(var_list, symbolMap)\n\n # Write the GAMS model\n # $offdigit ignores extra precise digits instead of erroring\n output_file.write(\"$offdigit\\n\\n\")\n output_file.write(\"EQUATIONS\\n\\t\")\n output_file.write(\"\\n\\t\".join(constraint_names))\n if categorized_vars.binary:\n output_file.write(\";\\n\\nBINARY VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.binary))\n if categorized_vars.ints:\n output_file.write(\";\\n\\nINTEGER VARIABLES\")\n output_file.write(\"\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.ints))\n if categorized_vars.positive:\n output_file.write(\";\\n\\nPOSITIVE VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.positive))\n output_file.write(\";\\n\\nVARIABLES\\n\\tGAMS_OBJECTIVE\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.reals))\n output_file.write(\";\\n\\n\")\n\n for line in ConstraintIO.getvalue().splitlines():\n if len(line) > 80000:\n line = split_long_line(line)\n output_file.write(line + \"\\n\")\n\n output_file.write(\"\\n\")\n\n warn_int_bounds = False\n for category, var_name in categorized_vars:\n var = symbolMap.getObject(var_name)\n tc(var)\n if category == 'positive':\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'ints':\n if not var.has_lb():\n warn_int_bounds = True\n # GAMS doesn't allow -INF lower bound for ints\n logger.warning(\"Lower bound for integer variable %s set \"\n \"to -1.0E+100.\" % var.name)\n output_file.write(\"%s.lo = -1.0E+100;\\n\" % (var_name))\n elif value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if not var.has_ub():\n warn_int_bounds = True\n # GAMS has an option value called IntVarUp that is the\n # default upper integer bound, which it applies if the\n # integer's upper bound is INF. This option maxes out at\n # 2147483647, so we can go higher by setting the bound.\n logger.warning(\"Upper bound for integer variable %s set \"\n \"to +1.0E+100.\" % var.name)\n output_file.write(\"%s.up = +1.0E+100;\\n\" % (var_name))\n else:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'binary':\n if var.has_lb() and value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub() and value(var.ub) != 1:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'reals':\n if var.has_lb():\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n else:\n raise KeyError('Category %s not supported' % category)\n if warmstart and var.value is not None:\n output_file.write(\"%s.l = %s;\\n\" % (var_name, var.value))\n\n if warn_int_bounds:\n logger.warning(\n \"GAMS requires finite bounds for integer variables. 1.0E100 \"\n \"is as extreme as GAMS will define, and should be enough to \"\n \"appear unbounded. If the solver cannot handle this bound, \"\n \"explicitly set a smaller bound on the pyomo model, or try a \"\n \"different GAMS solver.\")\n\n model_name = \"GAMS_MODEL\"\n output_file.write(\"\\nMODEL %s /all/ ;\\n\" % model_name)\n\n if mtype is None:\n mtype = ('lp','nlp','mip','minlp')[\n (0 if linear else 1) +\n (2 if (categorized_vars.binary or categorized_vars.ints)\n else 0)]\n\n if solver is not None:\n if mtype.upper() not in valid_solvers[solver.upper()]:\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for model type (%s)\"\n % (solver, mtype))\n output_file.write(\"option %s=%s;\\n\" % (mtype, solver))\n\n if add_options is not None:\n output_file.write(\"\\n* START USER ADDITIONAL OPTIONS\\n\")\n for line in add_options:\n output_file.write('\\n' + line)\n output_file.write(\"\\n\\n* END USER ADDITIONAL OPTIONS\\n\\n\")\n\n output_file.write(\n \"SOLVE %s USING %s %simizing GAMS_OBJECTIVE;\\n\\n\"\n % ( model_name,\n mtype,\n 'min' if obj.sense == minimize else 'max'))\n\n # Set variables to store certain statuses and attributes\n stat_vars = ['MODELSTAT', 'SOLVESTAT', 'OBJEST', 'OBJVAL', 'NUMVAR',\n 'NUMEQU', 'NUMDVAR', 'NUMNZ', 'ETSOLVE']\n output_file.write(\"Scalars MODELSTAT 'model status', \"\n \"SOLVESTAT 'solve status';\\n\")\n output_file.write(\"MODELSTAT = %s.modelstat;\\n\" % model_name)\n output_file.write(\"SOLVESTAT = %s.solvestat;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar OBJEST 'best objective', \"\n \"OBJVAL 'objective value';\\n\")\n output_file.write(\"OBJEST = %s.objest;\\n\" % model_name)\n output_file.write(\"OBJVAL = %s.objval;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMVAR 'number of variables';\\n\")\n output_file.write(\"NUMVAR = %s.numvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMEQU 'number of equations';\\n\")\n output_file.write(\"NUMEQU = %s.numequ\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMDVAR 'number of discrete variables';\\n\")\n output_file.write(\"NUMDVAR = %s.numdvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMNZ 'number of nonzeros';\\n\")\n output_file.write(\"NUMNZ = %s.numnz\\n\\n\" % model_name)\n\n output_file.write(\"Scalar ETSOLVE 'time to execute solve statement';\\n\")\n output_file.write(\"ETSOLVE = %s.etsolve\\n\\n\" % model_name)\n\n if put_results is not None:\n results = put_results + '.dat'\n output_file.write(\"\\nfile results /'%s'/;\" % results)\n output_file.write(\"\\nresults.nd=15;\")\n output_file.write(\"\\nresults.nw=21;\")\n output_file.write(\"\\nput results;\")\n output_file.write(\"\\nput 'SYMBOL : LEVEL : MARGINAL' /;\")\n for var in var_list:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (var, var, var))\n for con in constraint_names:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (con, con, con))\n output_file.write(\"\\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l \"\n \"GAMS_OBJECTIVE.m;\\n\")\n\n statresults = put_results + 'stat.dat'\n output_file.write(\"\\nfile statresults /'%s'/;\" % statresults)\n output_file.write(\"\\nstatresults.nd=15;\")\n output_file.write(\"\\nstatresults.nw=21;\")\n output_file.write(\"\\nput statresults;\")\n output_file.write(\"\\nput 'SYMBOL : VALUE' /;\")\n for stat in stat_vars:\n output_file.write(\"\\nput '%s' %s /;\\n\" % (stat, stat))\n\n\nvalid_solvers = {\n'ALPHAECP': {'MINLP','MIQCP'},\n'AMPL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'ANTIGONE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BARON': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BDMLP': {'LP','MIP','RMIP'},\n'BDMLPD': {'LP','RMIP'},\n'BENCH': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BONMIN': {'MINLP','MIQCP'},\n'BONMINH': {'MINLP','MIQCP'},\n'CBC': {'LP','MIP','RMIP'},\n'COINBONMIN': {'MINLP','MIQCP'},\n'COINCBC': {'LP','MIP','RMIP'},\n'COINCOUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINIPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'COINOS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINSCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT3': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT4': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPTD': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONVERT': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONVERTD': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'COUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CPLEX': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPLEXD': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPOPTIMIZER': {'MIP','MINLP','MIQCP'},\n'DE': {'EMP'},\n'DECIS': {'EMP'},\n'DECISC': {'LP'},\n'DECISM': {'LP'},\n'DICOPT': {'MINLP','MIQCP'},\n'DICOPTD': {'MINLP','MIQCP'},\n'EXAMINER': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'EXAMINER2': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GAMSCHK': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GLOMIQO': {'QCP','MIQCP','RMIQCP'},\n'GUROBI': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'GUSS': {'LP', 'MIP', 'NLP', 'MCP', 'CNS', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'IPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'IPOPTH': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'JAMS': {'EMP'},\n'KESTREL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'KNITRO': {'LP','RMIP','NLP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LGO': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LGOD': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LINDO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'LINDOGLOBAL': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LINGO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP'},\n'LOCALSOLVER': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LOGMIP': {'EMP'},\n'LS': {'LP','RMIP'},\n'MILES': {'MCP'},\n'MILESE': {'MCP'},\n'MINOS': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS5': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS55': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MOSEK': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','QCP','MIQCP','RMIQCP'},\n'MPECDUMP': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'MPSGE': {},\n'MSNLP': {'NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'NLPEC': {'MCP','MPEC','RMPEC'},\n'OQNLP': {'NLP', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'OS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'OSICPLEX': {'LP','MIP','RMIP'},\n'OSIGUROBI': {'LP','MIP','RMIP'},\n'OSIMOSEK': {'LP','MIP','RMIP'},\n'OSISOPLEX': {'LP','RMIP'},\n'OSIXPRESS': {'LP','MIP','RMIP'},\n'PATH': {'MCP','CNS'},\n'PATHC': {'MCP','CNS'},\n'PATHNLP': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'PYOMO': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'QUADMINOS': {'LP'},\n'SBB': {'MINLP','MIQCP'},\n'SCENSOLVER': {'LP','MIP','RMIP','NLP','MCP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SNOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'SOPLEX': {'LP','RMIP'},\n'XA': {'LP','MIP','RMIP'},\n'XPRESS': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}\n}\n", "path": "pyomo/repn/plugins/gams_writer.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n#\n# Problem Writer for GAMS Format Files\n#\n\nfrom six import StringIO, string_types, iteritems\nfrom six.moves import xrange\n\nfrom pyutilib.misc import PauseGC\n\nfrom pyomo.core.expr import current as EXPR\nfrom pyomo.core.expr.numvalue import (\n is_fixed, value, as_numeric, native_types, native_numeric_types)\nfrom pyomo.core.base import (\n SymbolMap, ShortNameLabeler, NumericLabeler, Block, Constraint, Expression,\n Objective, Var, Param, minimize, Suffix, SortComponents)\nfrom pyomo.core.base.component import ActiveComponent\nfrom pyomo.core.kernel.base import ICategorizedObject\nfrom pyomo.opt import ProblemFormat\nfrom pyomo.opt.base import AbstractProblemWriter, WriterFactory\nfrom pyomo.repn.util import valid_expr_ctypes_minlp, \\\n valid_active_ctypes_minlp\n\nimport logging\n\nlogger = logging.getLogger('pyomo.core')\n\n#\n# A visitor pattern that creates a string for an expression\n# that is compatible with the GAMS syntax.\n#\nclass ToGamsVisitor(EXPR.ExpressionValueVisitor):\n\n def __init__(self, smap, treechecker):\n super(ToGamsVisitor, self).__init__()\n self.smap = smap\n self.treechecker = treechecker\n\n def visit(self, node, values):\n \"\"\" Visit nodes that have been expanded \"\"\"\n tmp = []\n for i,val in enumerate(values):\n arg = node._args_[i]\n\n if arg is None:\n tmp.append('Undefined')\n elif arg.__class__ in native_numeric_types:\n if arg < 0:\n # Wrap negative values in parens to avoid double operator\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif arg.__class__ in native_types:\n tmp.append(\"'{0}'\".format(val))\n elif arg.is_variable_type():\n if arg.is_fixed():\n # bind fixed var values in parens to avoid double negatives\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif (arg.is_expression_type() and\n node._precedence() < arg._precedence()):\n tmp.append(\"({0})\".format(val))\n else:\n tmp.append(val)\n\n if node.__class__ is EXPR.PowExpression:\n # If the exponent is a positive integer, use the power() function.\n # Otherwise, use the ** operator.\n exponent = node.arg(1)\n if (exponent.__class__ in native_numeric_types and\n exponent == int(exponent)):\n return \"power({0}, {1})\".format(tmp[0], tmp[1])\n else:\n return \"{0} ** {1}\".format(tmp[0], tmp[1])\n else:\n return node._to_string(tmp, None, self.smap, True)\n\n def visiting_potential_leaf(self, node):\n \"\"\"\n Visiting a potential leaf.\n\n Return True if the node is not expanded.\n \"\"\"\n if node is None:\n return True, None\n\n if node.__class__ in native_types:\n return True, str(node)\n\n if node.is_expression_type():\n # we will descend into this, so type checking will happen later\n if node.is_component_type():\n self.treechecker(node)\n return False, None\n\n if node.is_component_type():\n if self.ctype(node) not in valid_expr_ctypes_minlp:\n # Make sure all components in active constraints\n # are basic ctypes we know how to deal with.\n raise RuntimeError(\n \"Unallowable component '%s' of type %s found in an active \"\n \"constraint or objective.\\nThe GAMS writer cannot export \"\n \"expressions with this component type.\"\n % (node.name, self.ctype(node).__name__))\n if self.ctype(node) is not Var:\n # For these, make sure it's on the right model. We can check\n # Vars later since they don't disappear from the expressions\n self.treechecker(node)\n\n if node.is_variable_type():\n if node.fixed:\n return True, str(value(node))\n label = self.smap.getSymbol(node)\n return True, label\n\n return True, str(value(node))\n\n def ctype(self, comp):\n if isinstance(comp, ICategorizedObject):\n return comp.ctype\n else:\n return comp.type()\n\n\ndef expression_to_string(expr, treechecker, labeler=None, smap=None):\n if labeler is not None:\n if smap is None:\n smap = SymbolMap()\n smap.default_labeler = labeler\n visitor = ToGamsVisitor(smap, treechecker)\n return visitor.dfs_postorder_stack(expr)\n\n\nclass Categorizer(object):\n \"\"\"Class for representing categorized variables.\n\n Given a list of variable names and a symbol map, categorizes the variable\n names into the categories: binary, ints, positive and reals.\n\n \"\"\"\n\n def __init__(self, var_list, symbol_map):\n self.binary = []\n self.ints = []\n self.positive = []\n self.reals = []\n\n # categorize variables\n for var in var_list:\n v = symbol_map.getObject(var)\n if v.is_binary():\n self.binary.append(var)\n elif v.is_integer():\n if (v.has_lb() and (value(v.lb) >= 0)) and \\\n (v.has_ub() and (value(v.ub) <= 1)):\n self.binary.append(var)\n else:\n self.ints.append(var)\n elif value(v.lb) == 0:\n self.positive.append(var)\n else:\n self.reals.append(var)\n\n def __iter__(self):\n \"\"\"Iterate over all variables.\n\n Yield a tuple containing the variables category and its name.\n \"\"\"\n for category in ['binary', 'ints', 'positive', 'reals']:\n var_list = getattr(self, category)\n for var_name in var_list:\n yield category, var_name\n\n\nclass StorageTreeChecker(object):\n def __init__(self, model):\n # blocks are hashable so we can use a normal set\n self.tree = {model}\n self.model = model\n # add everything above the model\n pb = self.parent_block(model)\n while pb is not None:\n self.tree.add(pb)\n pb = self.parent_block(pb)\n\n def __call__(self, comp, exception_flag=True):\n if comp is self.model:\n return True\n\n # walk up tree until there are no more parents\n seen = set()\n pb = self.parent_block(comp)\n while pb is not None:\n if pb in self.tree:\n self.tree.update(seen)\n return True\n seen.add(pb)\n pb = self.parent_block(pb)\n\n if exception_flag:\n self.raise_error(comp)\n else:\n return False\n\n def parent_block(self, comp):\n if isinstance(comp, ICategorizedObject):\n parent = comp.parent\n while (parent is not None) and \\\n (not parent._is_heterogeneous_container):\n parent = parent.parent\n return parent\n else:\n return comp.parent_block()\n\n def raise_error(self, comp):\n raise RuntimeError(\n \"GAMS writer: found component '%s' not on same model tree.\\n\"\n \"All components must have the same parent model.\" % comp.name)\n\n\ndef split_long_line(line):\n \"\"\"\n GAMS has an 80,000 character limit for lines, so split as many\n times as needed so as to not have illegal lines.\n \"\"\"\n new_lines = ''\n while len(line) > 80000:\n i = 80000\n while line[i] != ' ':\n # Walk backwards to find closest space,\n # where it is safe to split to a new line\n if i < 0:\n raise RuntimeError(\n \"Found an 80,000+ character string with no spaces\")\n i -= 1\n new_lines += line[:i] + '\\n'\n # the space will be the first character in the next line,\n # so that the line doesn't start with the comment character '*'\n line = line[i:]\n new_lines += line\n return new_lines\n\n\ndef _get_bound(exp):\n if exp is None:\n return None\n if is_fixed(exp):\n return value(exp)\n raise ValueError(\"non-fixed bound or weight: \" + str(exp))\n\n\[email protected]('gams', 'Generate the corresponding GAMS file')\nclass ProblemWriter_gams(AbstractProblemWriter):\n\n def __init__(self):\n AbstractProblemWriter.__init__(self, ProblemFormat.gams)\n\n def __call__(self,\n model,\n output_filename,\n solver_capability,\n io_options):\n \"\"\"\n Write a model in the GAMS modeling language format.\n\n Keyword Arguments\n -----------------\n output_filename: str\n Name of file to write GAMS model to. Optionally pass a file-like\n stream and the model will be written to that instead.\n io_options: dict\n - warmstart=True\n Warmstart by initializing model's variables to their values.\n - symbolic_solver_labels=False\n Use full Pyomo component names rather than\n shortened symbols (slower, but useful for debugging).\n - labeler=None\n Custom labeler. Incompatible with symbolic_solver_labels.\n - solver=None\n If None, GAMS will use default solver for model type.\n - mtype=None\n Model type. If None, will chose from lp, nlp, mip, and minlp.\n - add_options=None\n List of additional lines to write directly\n into model file before the solve statement.\n For model attributes, <model name> is GAMS_MODEL.\n - skip_trivial_constraints=False\n Skip writing constraints whose body section is fixed.\n - file_determinism=1\n | How much effort do we want to put into ensuring the\n | GAMS file is written deterministically for a Pyomo model:\n | 0 : None\n | 1 : sort keys of indexed components (default)\n | 2 : sort keys AND sort names (over declaration order)\n - put_results=None\n Filename for optionally writing solution values and\n marginals to (put_results).dat, and solver statuses\n to (put_results + 'stat').dat.\n \"\"\"\n\n # Make sure not to modify the user's dictionary,\n # they may be reusing it outside of this call\n io_options = dict(io_options)\n\n # Use full Pyomo component names rather than\n # shortened symbols (slower, but useful for debugging).\n symbolic_solver_labels = io_options.pop(\"symbolic_solver_labels\", False)\n\n # Custom labeler option. Incompatible with symbolic_solver_labels.\n labeler = io_options.pop(\"labeler\", None)\n\n # If None, GAMS will use default solver for model type.\n solver = io_options.pop(\"solver\", None)\n\n # If None, will chose from lp, nlp, mip, and minlp.\n mtype = io_options.pop(\"mtype\", None)\n\n # Lines to add before solve statement.\n add_options = io_options.pop(\"add_options\", None)\n\n # Skip writing constraints whose body section is\n # fixed (i.e., no variables)\n skip_trivial_constraints = \\\n io_options.pop(\"skip_trivial_constraints\", False)\n\n # How much effort do we want to put into ensuring the\n # GAMS file is written deterministically for a Pyomo model:\n # 0 : None\n # 1 : sort keys of indexed components (default)\n # 2 : sort keys AND sort names (over declaration order)\n file_determinism = io_options.pop(\"file_determinism\", 1)\n sorter_map = {0:SortComponents.unsorted,\n 1:SortComponents.deterministic,\n 2:SortComponents.sortBoth}\n sort = sorter_map[file_determinism]\n\n # Warmstart by initializing model's variables to their values.\n warmstart = io_options.pop(\"warmstart\", True)\n\n # Filename for optionally writing solution values and marginals\n # Set to True by GAMSSolver\n put_results = io_options.pop(\"put_results\", None)\n\n if len(io_options):\n raise ValueError(\n \"GAMS writer passed unrecognized io_options:\\n\\t\" +\n \"\\n\\t\".join(\"%s = %s\"\n % (k,v) for k,v in iteritems(io_options)))\n\n if solver is not None and solver.upper() not in valid_solvers:\n raise ValueError(\n \"GAMS writer passed unrecognized solver: %s\" % solver)\n\n if mtype is not None:\n valid_mtypes = set([\n 'lp', 'qcp', 'nlp', 'dnlp', 'rmip', 'mip', 'rmiqcp', 'rminlp',\n 'miqcp', 'minlp', 'rmpec', 'mpec', 'mcp', 'cns', 'emp'])\n if mtype.lower() not in valid_mtypes:\n raise ValueError(\"GAMS writer passed unrecognized \"\n \"model type: %s\" % mtype)\n if (solver is not None and\n mtype.upper() not in valid_solvers[solver.upper()]):\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for given model type (%s)\"\n % (solver, mtype))\n\n if output_filename is None:\n output_filename = model.name + \".gms\"\n\n if symbolic_solver_labels and (labeler is not None):\n raise ValueError(\"GAMS writer: Using both the \"\n \"'symbolic_solver_labels' and 'labeler' \"\n \"I/O options is forbidden\")\n\n if symbolic_solver_labels:\n var_labeler = con_labeler = ShortNameLabeler(63, '_')\n elif labeler is None:\n var_labeler = NumericLabeler('x')\n con_labeler = NumericLabeler('c')\n else:\n var_labeler = con_labeler = labeler\n\n var_list = []\n\n def var_recorder(obj):\n ans = var_labeler(obj)\n try:\n if obj.is_variable_type():\n var_list.append(ans)\n except:\n pass\n return ans\n\n def var_label(obj):\n #if obj.is_fixed():\n # return str(value(obj))\n return symbolMap.getSymbol(obj, var_recorder)\n\n symbolMap = SymbolMap(var_label)\n\n # when sorting, there are a non-trivial number of\n # temporary objects created. these all yield\n # non-circular references, so disable GC - the\n # overhead is non-trivial, and because references\n # are non-circular, everything will be collected\n # immediately anyway.\n with PauseGC() as pgc:\n try:\n if isinstance(output_filename, string_types):\n output_file = open(output_filename, \"w\")\n else:\n # Support passing of stream such as a StringIO\n # on which to write the model file\n output_file = output_filename\n self._write_model(\n model=model,\n output_file=output_file,\n solver_capability=solver_capability,\n var_list=var_list,\n var_label=var_label,\n symbolMap=symbolMap,\n con_labeler=con_labeler,\n sort=sort,\n skip_trivial_constraints=skip_trivial_constraints,\n warmstart=warmstart,\n solver=solver,\n mtype=mtype,\n add_options=add_options,\n put_results=put_results\n )\n finally:\n if isinstance(output_filename, string_types):\n output_file.close()\n\n return output_filename, symbolMap\n\n def _write_model(self,\n model,\n output_file,\n solver_capability,\n var_list,\n var_label,\n symbolMap,\n con_labeler,\n sort,\n skip_trivial_constraints,\n warmstart,\n solver,\n mtype,\n add_options,\n put_results):\n constraint_names = []\n ConstraintIO = StringIO()\n linear = True\n linear_degree = set([0,1])\n\n # Make sure there are no strange ActiveComponents. The expression\n # walker will handle strange things in constraints later.\n model_ctypes = model.collect_ctypes(active=True)\n invalids = set()\n for t in (model_ctypes - valid_active_ctypes_minlp):\n if issubclass(t, ActiveComponent):\n invalids.add(t)\n if len(invalids):\n invalids = [t.__name__ for t in invalids]\n raise RuntimeError(\n \"Unallowable active component(s) %s.\\nThe GAMS writer cannot \"\n \"export models with this component type.\" %\n \", \".join(invalids))\n\n tc = StorageTreeChecker(model)\n\n # Walk through the model and generate the constraint definition\n # for all active constraints. Any Vars / Expressions that are\n # encountered will be added to the var_list due to the labeler\n # defined above.\n for con in model.component_data_objects(Constraint,\n active=True,\n sort=sort):\n\n if not con.has_lb() and not con.has_ub():\n assert not con.equality\n continue # non-binding, so skip\n\n con_body = as_numeric(con.body)\n if skip_trivial_constraints and con_body.is_fixed():\n continue\n if linear:\n if con_body.polynomial_degree() not in linear_degree:\n linear = False\n\n cName = symbolMap.getSymbol(con, con_labeler)\n if con.equality:\n constraint_names.append('%s' % cName)\n ConstraintIO.write('%s.. %s =e= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n else:\n if con.has_lb():\n constraint_names.append('%s_lo' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n _get_bound(con.lower),\n expression_to_string(con_body, tc, smap=symbolMap)\n ))\n if con.has_ub():\n constraint_names.append('%s_hi' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n\n obj = list(model.component_data_objects(Objective,\n active=True,\n sort=sort))\n if len(obj) != 1:\n raise RuntimeError(\n \"GAMS writer requires exactly one active objective (found %s)\"\n % (len(obj)))\n obj = obj[0]\n if linear:\n if obj.expr.polynomial_degree() not in linear_degree:\n linear = False\n oName = symbolMap.getSymbol(obj, con_labeler)\n constraint_names.append(oName)\n ConstraintIO.write('%s.. GAMS_OBJECTIVE =e= %s ;\\n' % (\n oName,\n expression_to_string(obj.expr, tc, smap=symbolMap)\n ))\n\n # Categorize the variables that we found\n categorized_vars = Categorizer(var_list, symbolMap)\n\n # Write the GAMS model\n # $offdigit ignores extra precise digits instead of erroring\n output_file.write(\"$offdigit\\n\\n\")\n output_file.write(\"EQUATIONS\\n\\t\")\n output_file.write(\"\\n\\t\".join(constraint_names))\n if categorized_vars.binary:\n output_file.write(\";\\n\\nBINARY VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.binary))\n if categorized_vars.ints:\n output_file.write(\";\\n\\nINTEGER VARIABLES\")\n output_file.write(\"\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.ints))\n if categorized_vars.positive:\n output_file.write(\";\\n\\nPOSITIVE VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.positive))\n output_file.write(\";\\n\\nVARIABLES\\n\\tGAMS_OBJECTIVE\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.reals))\n output_file.write(\";\\n\\n\")\n\n for line in ConstraintIO.getvalue().splitlines():\n if len(line) > 80000:\n line = split_long_line(line)\n output_file.write(line + \"\\n\")\n\n output_file.write(\"\\n\")\n\n warn_int_bounds = False\n for category, var_name in categorized_vars:\n var = symbolMap.getObject(var_name)\n tc(var)\n if category == 'positive':\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'ints':\n if not var.has_lb():\n warn_int_bounds = True\n # GAMS doesn't allow -INF lower bound for ints\n logger.warning(\"Lower bound for integer variable %s set \"\n \"to -1.0E+100.\" % var.name)\n output_file.write(\"%s.lo = -1.0E+100;\\n\" % (var_name))\n elif value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if not var.has_ub():\n warn_int_bounds = True\n # GAMS has an option value called IntVarUp that is the\n # default upper integer bound, which it applies if the\n # integer's upper bound is INF. This option maxes out at\n # 2147483647, so we can go higher by setting the bound.\n logger.warning(\"Upper bound for integer variable %s set \"\n \"to +1.0E+100.\" % var.name)\n output_file.write(\"%s.up = +1.0E+100;\\n\" % (var_name))\n else:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'binary':\n if var.has_lb() and value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub() and value(var.ub) != 1:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'reals':\n if var.has_lb():\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n else:\n raise KeyError('Category %s not supported' % category)\n if warmstart and var.value is not None:\n output_file.write(\"%s.l = %s;\\n\" % (var_name, var.value))\n\n if warn_int_bounds:\n logger.warning(\n \"GAMS requires finite bounds for integer variables. 1.0E100 \"\n \"is as extreme as GAMS will define, and should be enough to \"\n \"appear unbounded. If the solver cannot handle this bound, \"\n \"explicitly set a smaller bound on the pyomo model, or try a \"\n \"different GAMS solver.\")\n\n model_name = \"GAMS_MODEL\"\n output_file.write(\"\\nMODEL %s /all/ ;\\n\" % model_name)\n\n if mtype is None:\n mtype = ('lp','nlp','mip','minlp')[\n (0 if linear else 1) +\n (2 if (categorized_vars.binary or categorized_vars.ints)\n else 0)]\n\n if solver is not None:\n if mtype.upper() not in valid_solvers[solver.upper()]:\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for model type (%s)\"\n % (solver, mtype))\n output_file.write(\"option %s=%s;\\n\" % (mtype, solver))\n\n if add_options is not None:\n output_file.write(\"\\n* START USER ADDITIONAL OPTIONS\\n\")\n for line in add_options:\n output_file.write('\\n' + line)\n output_file.write(\"\\n\\n* END USER ADDITIONAL OPTIONS\\n\\n\")\n\n output_file.write(\n \"SOLVE %s USING %s %simizing GAMS_OBJECTIVE;\\n\\n\"\n % ( model_name,\n mtype,\n 'min' if obj.sense == minimize else 'max'))\n\n # Set variables to store certain statuses and attributes\n stat_vars = ['MODELSTAT', 'SOLVESTAT', 'OBJEST', 'OBJVAL', 'NUMVAR',\n 'NUMEQU', 'NUMDVAR', 'NUMNZ', 'ETSOLVE']\n output_file.write(\"Scalars MODELSTAT 'model status', \"\n \"SOLVESTAT 'solve status';\\n\")\n output_file.write(\"MODELSTAT = %s.modelstat;\\n\" % model_name)\n output_file.write(\"SOLVESTAT = %s.solvestat;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar OBJEST 'best objective', \"\n \"OBJVAL 'objective value';\\n\")\n output_file.write(\"OBJEST = %s.objest;\\n\" % model_name)\n output_file.write(\"OBJVAL = %s.objval;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMVAR 'number of variables';\\n\")\n output_file.write(\"NUMVAR = %s.numvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMEQU 'number of equations';\\n\")\n output_file.write(\"NUMEQU = %s.numequ\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMDVAR 'number of discrete variables';\\n\")\n output_file.write(\"NUMDVAR = %s.numdvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMNZ 'number of nonzeros';\\n\")\n output_file.write(\"NUMNZ = %s.numnz\\n\\n\" % model_name)\n\n output_file.write(\"Scalar ETSOLVE 'time to execute solve statement';\\n\")\n output_file.write(\"ETSOLVE = %s.etsolve\\n\\n\" % model_name)\n\n if put_results is not None:\n results = put_results + '.dat'\n output_file.write(\"\\nfile results /'%s'/;\" % results)\n output_file.write(\"\\nresults.nd=15;\")\n output_file.write(\"\\nresults.nw=21;\")\n output_file.write(\"\\nput results;\")\n output_file.write(\"\\nput 'SYMBOL : LEVEL : MARGINAL' /;\")\n for var in var_list:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (var, var, var))\n for con in constraint_names:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (con, con, con))\n output_file.write(\"\\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l \"\n \"GAMS_OBJECTIVE.m;\\n\")\n\n statresults = put_results + 'stat.dat'\n output_file.write(\"\\nfile statresults /'%s'/;\" % statresults)\n output_file.write(\"\\nstatresults.nd=15;\")\n output_file.write(\"\\nstatresults.nw=21;\")\n output_file.write(\"\\nput statresults;\")\n output_file.write(\"\\nput 'SYMBOL : VALUE' /;\")\n for stat in stat_vars:\n output_file.write(\"\\nput '%s' %s /;\\n\" % (stat, stat))\n\n\nvalid_solvers = {\n'ALPHAECP': {'MINLP','MIQCP'},\n'AMPL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'ANTIGONE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BARON': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BDMLP': {'LP','MIP','RMIP'},\n'BDMLPD': {'LP','RMIP'},\n'BENCH': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BONMIN': {'MINLP','MIQCP'},\n'BONMINH': {'MINLP','MIQCP'},\n'CBC': {'LP','MIP','RMIP'},\n'COINBONMIN': {'MINLP','MIQCP'},\n'COINCBC': {'LP','MIP','RMIP'},\n'COINCOUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINIPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'COINOS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINSCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT3': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT4': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPTD': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONVERT': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONVERTD': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'COUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CPLEX': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPLEXD': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPOPTIMIZER': {'MIP','MINLP','MIQCP'},\n'DE': {'EMP'},\n'DECIS': {'EMP'},\n'DECISC': {'LP'},\n'DECISM': {'LP'},\n'DICOPT': {'MINLP','MIQCP'},\n'DICOPTD': {'MINLP','MIQCP'},\n'EXAMINER': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'EXAMINER2': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GAMSCHK': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GLOMIQO': {'QCP','MIQCP','RMIQCP'},\n'GUROBI': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'GUSS': {'LP', 'MIP', 'NLP', 'MCP', 'CNS', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'IPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'IPOPTH': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'JAMS': {'EMP'},\n'KESTREL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'KNITRO': {'LP','RMIP','NLP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LGO': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LGOD': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LINDO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'LINDOGLOBAL': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LINGO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP'},\n'LOCALSOLVER': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LOGMIP': {'EMP'},\n'LS': {'LP','RMIP'},\n'MILES': {'MCP'},\n'MILESE': {'MCP'},\n'MINOS': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS5': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS55': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MOSEK': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','QCP','MIQCP','RMIQCP'},\n'MPECDUMP': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'MPSGE': {},\n'MSNLP': {'NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'NLPEC': {'MCP','MPEC','RMPEC'},\n'OQNLP': {'NLP', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'OS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'OSICPLEX': {'LP','MIP','RMIP'},\n'OSIGUROBI': {'LP','MIP','RMIP'},\n'OSIMOSEK': {'LP','MIP','RMIP'},\n'OSISOPLEX': {'LP','RMIP'},\n'OSIXPRESS': {'LP','MIP','RMIP'},\n'PATH': {'MCP','CNS'},\n'PATHC': {'MCP','CNS'},\n'PATHNLP': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'PYOMO': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'QUADMINOS': {'LP'},\n'SBB': {'MINLP','MIQCP'},\n'SCENSOLVER': {'LP','MIP','RMIP','NLP','MCP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SNOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'SOPLEX': {'LP','RMIP'},\n'XA': {'LP','MIP','RMIP'},\n'XPRESS': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}\n}\n", "path": "pyomo/repn/plugins/gams_writer.py"}]} |
gh_patches_debug_1454 | rasdani/github-patches | git_diff | fossasia__open-event-server-6182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Coorganizer and owners can't download tickets, invoices
Current config only allows the organizer role to download the tickets
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/models/user.py`
Content:
```
1 import random
2 from datetime import datetime
3
4 import humanize
5 import pytz
6 from flask import url_for
7 from flask_scrypt import generate_password_hash, generate_random_salt
8 from sqlalchemy import event, desc
9 from sqlalchemy.ext.hybrid import hybrid_property
10 from sqlalchemy.sql import func
11 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
12
13 from app.api.helpers.db import get_count
14 from app.models import db
15 from app.models.base import SoftDeletionModel
16 from app.models.custom_system_role import UserSystemRole, CustomSysRole
17 from app.models.helpers.versioning import clean_up_string, clean_html
18 from app.models.notification import Notification
19 from app.models.panel_permission import PanelPermission
20 from app.models.permission import Permission
21 from app.models.role import Role
22 from app.models.service import Service
23 from app.models.session import Session
24 from app.models.speaker import Speaker
25 from app.models.user_permission import UserPermission
26 from app.models.users_events_role import UsersEventsRoles as UER
27
28 # System-wide
29 ADMIN = 'admin'
30 SUPERADMIN = 'super_admin'
31
32 MARKETER = 'Marketer'
33 SALES_ADMIN = 'Sales Admin'
34
35 SYS_ROLES_LIST = [
36 ADMIN,
37 SUPERADMIN,
38 ]
39
40 # Event-specific
41 OWNER = 'owner'
42 ORGANIZER = 'organizer'
43 COORGANIZER = 'coorganizer'
44 TRACK_ORGANIZER = 'track_organizer'
45 MODERATOR = 'moderator'
46 ATTENDEE = 'attendee'
47 REGISTRAR = 'registrar'
48
49
50 class User(SoftDeletionModel):
51 """User model class"""
52 __tablename__ = 'users'
53
54 id = db.Column(db.Integer, primary_key=True, autoincrement=True)
55 _email = db.Column(db.String(120), unique=True, nullable=False)
56 _password = db.Column(db.String(128), nullable=False)
57 facebook_id = db.Column(db.BigInteger, unique=True, nullable=True, name='facebook_id')
58 facebook_login_hash = db.Column(db.String, nullable=True)
59 reset_password = db.Column(db.String(128))
60 salt = db.Column(db.String(128))
61 avatar_url = db.Column(db.String)
62 tokens = db.Column(db.Text)
63 first_name = db.Column(db.String, nullable=True)
64 last_name = db.Column(db.String, nullable=True)
65 details = db.Column(db.String)
66 contact = db.Column(db.String)
67 facebook_url = db.Column(db.String)
68 twitter_url = db.Column(db.String)
69 instagram_url = db.Column(db.String)
70 google_plus_url = db.Column(db.String)
71 original_image_url = db.Column(db.String, nullable=True, default=None)
72 thumbnail_image_url = db.Column(db.String)
73 small_image_url = db.Column(db.String)
74 icon_image_url = db.Column(db.String)
75 is_super_admin = db.Column(db.Boolean, default=False)
76 is_admin = db.Column(db.Boolean, default=False)
77 is_sales_admin = db.Column(db.Boolean, default=False)
78 is_marketer = db.Column(db.Boolean, default=False)
79 is_verified = db.Column(db.Boolean, default=False)
80 was_registered_with_order = db.Column(db.Boolean, default=False)
81 last_accessed_at = db.Column(db.DateTime(timezone=True))
82 created_at = db.Column(db.DateTime(timezone=True), default=func.now())
83 # Event Invoice Details
84 billing_contact_name = db.Column(db.String)
85 billing_phone = db.Column(db.String)
86 billing_state = db.Column(db.String)
87 billing_country = db.Column(db.String)
88 billing_tax_info = db.Column(db.String)
89 company = db.Column(db.String)
90 billing_address = db.Column(db.String)
91 billing_city = db.Column(db.String)
92 billing_zip_code = db.Column(db.String)
93 billing_additional_info = db.Column(db.String)
94
95 # relationships
96 speaker = db.relationship('Speaker', backref="user")
97 favourite_events = db.relationship('UserFavouriteEvent', backref="user")
98 session = db.relationship('Session', backref="user")
99 feedback = db.relationship('Feedback', backref="user")
100 access_codes = db.relationship('AccessCode', backref="user")
101 discount_codes = db.relationship('DiscountCode', backref="user")
102 marketer_events = db.relationship(
103 'Event',
104 viewonly=True,
105 secondary='join(UserSystemRole, CustomSysRole,'
106 ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == "Marketer"))',
107 primaryjoin='UserSystemRole.user_id == User.id',
108 secondaryjoin='Event.id == UserSystemRole.event_id'
109 )
110 sales_admin_events = db.relationship(
111 'Event',
112 viewonly=True,
113 secondary='join(UserSystemRole, CustomSysRole,'
114 ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == "Sales Admin"))',
115 primaryjoin='UserSystemRole.user_id == User.id',
116 secondaryjoin='Event.id == UserSystemRole.event_id')
117
118 @hybrid_property
119 def password(self):
120 """
121 Hybrid property for password
122 :return:
123 """
124 return self._password
125
126 @password.setter
127 def password(self, password):
128 """
129 Setter for _password, saves hashed password, salt and reset_password string
130 :param password:
131 :return:
132 """
133 salt = str(generate_random_salt(), 'utf-8')
134 self._password = str(generate_password_hash(password, salt), 'utf-8')
135 hash_ = random.getrandbits(128)
136 self.reset_password = str(hash_)
137 self.salt = salt
138
139 @hybrid_property
140 def email(self):
141 """
142 Hybrid property for email
143 :return:
144 """
145 return self._email
146
147 @email.setter
148 def email(self, email):
149 """
150 Setter for _email,
151 set user to 'not verified' if email is updated
152 :param email:
153 :return:
154 """
155 if self._email != email:
156 self._email = email
157 self.is_verified = False
158
159 # User Permissions
160 def can_publish_event(self):
161 """
162 Checks if User can publish an event
163 """
164 perm = UserPermission.query.filter_by(name='publish_event').first()
165 if not perm:
166 return self.is_verified
167
168 if self.is_verified is False:
169 return perm.unverified_user
170
171 return True
172
173 def can_create_event(self):
174 """
175 Checks if User can create an event
176 """
177 perm = UserPermission.query.filter_by(name='create_event').first()
178 if not perm:
179 return self.is_verified
180
181 if self.is_verified is False:
182 return perm.unverified_user
183
184 return True
185
186 def has_role(self, event_id):
187 """
188 Checks if user has any of the Roles at an Event.
189 Exclude Attendee Role.
190 """
191 attendee_role = Role.query.filter_by(name=ATTENDEE).first()
192 uer = UER.query.filter(UER.user == self, UER.event_id == event_id,
193 UER.role != attendee_role).first()
194 if uer is None:
195 return False
196 else:
197 return True
198
199 def _is_role(self, role_name, event_id=None):
200 """
201 Checks if a user has a particular Role at an Event.
202 """
203 role = Role.query.filter_by(name=role_name).first()
204 if event_id:
205 uer = UER.query.filter_by(user=self,
206 event_id=event_id,
207 role=role).first()
208 else:
209 uer = UER.query.filter_by(user=self,
210 role=role).first()
211 if not uer:
212 return False
213 else:
214 return True
215
216 def is_owner(self, event_id):
217 return self._is_role(OWNER, event_id)
218
219 def is_organizer(self, event_id):
220 # type: (object) -> object
221 return self._is_role(ORGANIZER, event_id)
222
223 def is_coorganizer(self, event_id):
224 return self._is_role(COORGANIZER, event_id)
225
226 def is_track_organizer(self, event_id):
227 return self._is_role(TRACK_ORGANIZER, event_id)
228
229 def is_moderator(self, event_id):
230 return self._is_role(MODERATOR, event_id)
231
232 def is_registrar(self, event_id):
233 return self._is_role(REGISTRAR, event_id)
234
235 def is_attendee(self, event_id):
236 return self._is_role(ATTENDEE, event_id)
237
238 def has_event_access(self, event_id):
239 return self._is_role(OWNER, event_id) or self._is_role(ORGANIZER, event_id) or \
240 self._is_role(COORGANIZER, event_id)
241
242 @hybrid_property
243 def is_user_owner(self):
244 return self._is_role(OWNER)
245
246 @hybrid_property
247 def is_user_organizer(self):
248 # type: (object) -> object
249 return self._is_role(ORGANIZER)
250
251 @hybrid_property
252 def is_user_coorganizer(self):
253 return self._is_role(COORGANIZER)
254
255 @hybrid_property
256 def is_user_track_organizer(self):
257 return self._is_role(TRACK_ORGANIZER)
258
259 @hybrid_property
260 def is_user_moderator(self):
261 return self._is_role(MODERATOR)
262
263 @hybrid_property
264 def is_user_registrar(self):
265 return self._is_role(REGISTRAR)
266
267 @hybrid_property
268 def is_user_attendee(self):
269 return self._is_role(ATTENDEE)
270
271 def _has_perm(self, operation, service_class, event_id):
272 # Operation names and their corresponding permission in `Permissions`
273 operations = {
274 'create': 'can_create',
275 'read': 'can_read',
276 'update': 'can_update',
277 'delete': 'can_delete',
278 }
279 if operation not in list(operations.keys()):
280 raise ValueError('No such operation defined')
281
282 try:
283 service_name = service_class.get_service_name()
284 except AttributeError:
285 # If `service_class` does not have `get_service_name()`
286 return False
287
288 if self.is_super_admin:
289 return True
290
291 service = Service.query.filter_by(name=service_name).first()
292
293 uer_querylist = UER.query.filter_by(user=self,
294 event_id=event_id)
295 for uer in uer_querylist:
296 role = uer.role
297 perm = Permission.query.filter_by(role=role,
298 service=service).first()
299 if getattr(perm, operations[operation]):
300 return True
301
302 return False
303
304 def can_create(self, service_class, event_id):
305 return self._has_perm('create', service_class, event_id)
306
307 def can_read(self, service_class, event_id):
308 return self._has_perm('read', service_class, event_id)
309
310 def can_update(self, service_class, event_id):
311 return self._has_perm('update', service_class, event_id)
312
313 def can_delete(self, service_class, event_id):
314 return self._has_perm('delete', service_class, event_id)
315
316 def is_speaker_at_session(self, session_id):
317 try:
318 session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(
319 Session.id == session_id).one()
320 if session:
321 return True
322 else:
323 return False
324 except MultipleResultsFound:
325 return False
326 except NoResultFound:
327 return False
328
329 def is_speaker_at_event(self, event_id):
330 try:
331 session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(
332 Session.event_id == event_id).first()
333 if session:
334 return True
335 else:
336 return False
337 except MultipleResultsFound:
338 return False
339 except NoResultFound:
340 return False
341
342 # Flask-Login integration
343 def is_authenticated(self):
344 return True
345
346 def is_active(self):
347 return True
348
349 def is_anonymous(self):
350 return False
351
352 def get_id(self):
353 return self.id
354
355 def is_correct_password(self, password):
356 salt = self.salt
357 password = str(generate_password_hash(password, salt), 'utf-8')
358 if password == self._password:
359 return True
360 return False
361
362 @property
363 def is_staff(self):
364 return self.is_super_admin or self.is_admin
365
366 def is_sys_role(self, role_id):
367 """
368 Check if a user has a Custom System Role assigned.
369 `role_id` is id of a `CustomSysRole` instance.
370 """
371 role = UserSystemRole.query.filter_by(user=self, role_id=role_id).first()
372 return bool(role)
373
374 def first_access_panel(self):
375 """
376 Check if the user is assigned a Custom Role or not
377 This checks if there is an entry containing the current user in the `user_system_roles` table
378 returns panel name if exists otherwise false
379 """
380 custom_role = UserSystemRole.query.filter_by(user=self).first()
381 if not custom_role:
382 return False
383 perm = PanelPermission.query.filter(PanelPermission.custom_system_roles.any(id=custom_role.role_id)).first()
384 if not perm:
385 return False
386 return perm.panel_name
387
388 def can_download_tickets(self, order):
389 permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]
390 if self.is_staff or self.is_organizer(order.event.id) or self.id in permissible_users:
391 return True
392 return False
393
394 def can_access_panel(self, panel_name):
395 """
396 Check if user can access an Admin Panel
397 """
398 if self.is_staff:
399 return True
400
401 custom_sys_roles = UserSystemRole.query.filter_by(user=self)
402 for custom_role in custom_sys_roles:
403 if custom_role.role.can_access(panel_name):
404 return True
405
406 return False
407
408 def get_unread_notif_count(self):
409 return get_count(Notification.query.filter_by(user=self, is_read=False))
410
411 def get_unread_notifs(self):
412 """
413 Get unread notifications with titles, humanized receiving time
414 and Mark-as-read links.
415 """
416 notifs = []
417 unread_notifs = Notification.query.filter_by(user=self, is_read=False).order_by(
418 desc(Notification.received_at))
419 for notif in unread_notifs:
420 notifs.append({
421 'title': notif.title,
422 'received_at': humanize.naturaltime(datetime.now(pytz.utc) - notif.received_at),
423 'mark_read': url_for('notifications.mark_as_read', notification_id=notif.id)
424 })
425
426 return notifs
427
428 # update last access time
429 def update_lat(self):
430 self.last_accessed_at = datetime.now(pytz.utc)
431
432 @property
433 def fullname(self):
434 firstname = self.first_name if self.first_name else ''
435 lastname = self.last_name if self.last_name else ''
436 if firstname and lastname:
437 return '{} {}'.format(firstname, lastname)
438 else:
439 return ''
440
441 def __repr__(self):
442 return '<User %r>' % self.email
443
444 def __str__(self):
445 return self.__repr__()
446
447 def __setattr__(self, name, value):
448 if name == 'details':
449 super(User, self).__setattr__(name, clean_html(clean_up_string(value)))
450 else:
451 super(User, self).__setattr__(name, value)
452
453
454 @event.listens_for(User, 'init')
455 def receive_init(target, args, kwargs):
456 target.signup_at = datetime.now(pytz.utc)
457
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/models/user.py b/app/models/user.py
--- a/app/models/user.py
+++ b/app/models/user.py
@@ -387,7 +387,7 @@
def can_download_tickets(self, order):
permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]
- if self.is_staff or self.is_organizer(order.event.id) or self.id in permissible_users:
+ if self.is_staff or self.has_event_access(order.event.id) or self.id in permissible_users:
return True
return False
| {"golden_diff": "diff --git a/app/models/user.py b/app/models/user.py\n--- a/app/models/user.py\n+++ b/app/models/user.py\n@@ -387,7 +387,7 @@\n \n def can_download_tickets(self, order):\n permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]\n- if self.is_staff or self.is_organizer(order.event.id) or self.id in permissible_users:\n+ if self.is_staff or self.has_event_access(order.event.id) or self.id in permissible_users:\n return True\n return False\n", "issue": "Coorganizer and owners can't download tickets, invoices\nCurrent config only allows the organizer role to download the tickets\n", "before_files": [{"content": "import random\nfrom datetime import datetime\n\nimport humanize\nimport pytz\nfrom flask import url_for\nfrom flask_scrypt import generate_password_hash, generate_random_salt\nfrom sqlalchemy import event, desc\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom app.api.helpers.db import get_count\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\nfrom app.models.custom_system_role import UserSystemRole, CustomSysRole\nfrom app.models.helpers.versioning import clean_up_string, clean_html\nfrom app.models.notification import Notification\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.permission import Permission\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.user_permission import UserPermission\nfrom app.models.users_events_role import UsersEventsRoles as UER\n\n# System-wide\nADMIN = 'admin'\nSUPERADMIN = 'super_admin'\n\nMARKETER = 'Marketer'\nSALES_ADMIN = 'Sales Admin'\n\nSYS_ROLES_LIST = [\n ADMIN,\n SUPERADMIN,\n]\n\n# Event-specific\nOWNER = 'owner'\nORGANIZER = 'organizer'\nCOORGANIZER = 'coorganizer'\nTRACK_ORGANIZER = 'track_organizer'\nMODERATOR = 'moderator'\nATTENDEE = 'attendee'\nREGISTRAR = 'registrar'\n\n\nclass User(SoftDeletionModel):\n \"\"\"User model class\"\"\"\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n _email = db.Column(db.String(120), unique=True, nullable=False)\n _password = db.Column(db.String(128), nullable=False)\n facebook_id = db.Column(db.BigInteger, unique=True, nullable=True, name='facebook_id')\n facebook_login_hash = db.Column(db.String, nullable=True)\n reset_password = db.Column(db.String(128))\n salt = db.Column(db.String(128))\n avatar_url = db.Column(db.String)\n tokens = db.Column(db.Text)\n first_name = db.Column(db.String, nullable=True)\n last_name = db.Column(db.String, nullable=True)\n details = db.Column(db.String)\n contact = db.Column(db.String)\n facebook_url = db.Column(db.String)\n twitter_url = db.Column(db.String)\n instagram_url = db.Column(db.String)\n google_plus_url = db.Column(db.String)\n original_image_url = db.Column(db.String, nullable=True, default=None)\n thumbnail_image_url = db.Column(db.String)\n small_image_url = db.Column(db.String)\n icon_image_url = db.Column(db.String)\n is_super_admin = db.Column(db.Boolean, default=False)\n is_admin = db.Column(db.Boolean, default=False)\n is_sales_admin = db.Column(db.Boolean, default=False)\n is_marketer = db.Column(db.Boolean, default=False)\n is_verified = db.Column(db.Boolean, default=False)\n was_registered_with_order = db.Column(db.Boolean, default=False)\n last_accessed_at = db.Column(db.DateTime(timezone=True))\n created_at = db.Column(db.DateTime(timezone=True), default=func.now())\n # Event Invoice Details\n billing_contact_name = db.Column(db.String)\n billing_phone = db.Column(db.String)\n billing_state = db.Column(db.String)\n billing_country = db.Column(db.String)\n billing_tax_info = db.Column(db.String)\n company = db.Column(db.String)\n billing_address = db.Column(db.String)\n billing_city = db.Column(db.String)\n billing_zip_code = db.Column(db.String)\n billing_additional_info = db.Column(db.String)\n\n # relationships\n speaker = db.relationship('Speaker', backref=\"user\")\n favourite_events = db.relationship('UserFavouriteEvent', backref=\"user\")\n session = db.relationship('Session', backref=\"user\")\n feedback = db.relationship('Feedback', backref=\"user\")\n access_codes = db.relationship('AccessCode', backref=\"user\")\n discount_codes = db.relationship('DiscountCode', backref=\"user\")\n marketer_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Marketer\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id'\n )\n sales_admin_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Sales Admin\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id')\n\n @hybrid_property\n def password(self):\n \"\"\"\n Hybrid property for password\n :return:\n \"\"\"\n return self._password\n\n @password.setter\n def password(self, password):\n \"\"\"\n Setter for _password, saves hashed password, salt and reset_password string\n :param password:\n :return:\n \"\"\"\n salt = str(generate_random_salt(), 'utf-8')\n self._password = str(generate_password_hash(password, salt), 'utf-8')\n hash_ = random.getrandbits(128)\n self.reset_password = str(hash_)\n self.salt = salt\n\n @hybrid_property\n def email(self):\n \"\"\"\n Hybrid property for email\n :return:\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email):\n \"\"\"\n Setter for _email,\n set user to 'not verified' if email is updated\n :param email:\n :return:\n \"\"\"\n if self._email != email:\n self._email = email\n self.is_verified = False\n\n # User Permissions\n def can_publish_event(self):\n \"\"\"\n Checks if User can publish an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='publish_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def can_create_event(self):\n \"\"\"\n Checks if User can create an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='create_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def has_role(self, event_id):\n \"\"\"\n Checks if user has any of the Roles at an Event.\n Exclude Attendee Role.\n \"\"\"\n attendee_role = Role.query.filter_by(name=ATTENDEE).first()\n uer = UER.query.filter(UER.user == self, UER.event_id == event_id,\n UER.role != attendee_role).first()\n if uer is None:\n return False\n else:\n return True\n\n def _is_role(self, role_name, event_id=None):\n \"\"\"\n Checks if a user has a particular Role at an Event.\n \"\"\"\n role = Role.query.filter_by(name=role_name).first()\n if event_id:\n uer = UER.query.filter_by(user=self,\n event_id=event_id,\n role=role).first()\n else:\n uer = UER.query.filter_by(user=self,\n role=role).first()\n if not uer:\n return False\n else:\n return True\n\n def is_owner(self, event_id):\n return self._is_role(OWNER, event_id)\n\n def is_organizer(self, event_id):\n # type: (object) -> object\n return self._is_role(ORGANIZER, event_id)\n\n def is_coorganizer(self, event_id):\n return self._is_role(COORGANIZER, event_id)\n\n def is_track_organizer(self, event_id):\n return self._is_role(TRACK_ORGANIZER, event_id)\n\n def is_moderator(self, event_id):\n return self._is_role(MODERATOR, event_id)\n\n def is_registrar(self, event_id):\n return self._is_role(REGISTRAR, event_id)\n\n def is_attendee(self, event_id):\n return self._is_role(ATTENDEE, event_id)\n\n def has_event_access(self, event_id):\n return self._is_role(OWNER, event_id) or self._is_role(ORGANIZER, event_id) or \\\n self._is_role(COORGANIZER, event_id)\n\n @hybrid_property\n def is_user_owner(self):\n return self._is_role(OWNER)\n\n @hybrid_property\n def is_user_organizer(self):\n # type: (object) -> object\n return self._is_role(ORGANIZER)\n\n @hybrid_property\n def is_user_coorganizer(self):\n return self._is_role(COORGANIZER)\n\n @hybrid_property\n def is_user_track_organizer(self):\n return self._is_role(TRACK_ORGANIZER)\n\n @hybrid_property\n def is_user_moderator(self):\n return self._is_role(MODERATOR)\n\n @hybrid_property\n def is_user_registrar(self):\n return self._is_role(REGISTRAR)\n\n @hybrid_property\n def is_user_attendee(self):\n return self._is_role(ATTENDEE)\n\n def _has_perm(self, operation, service_class, event_id):\n # Operation names and their corresponding permission in `Permissions`\n operations = {\n 'create': 'can_create',\n 'read': 'can_read',\n 'update': 'can_update',\n 'delete': 'can_delete',\n }\n if operation not in list(operations.keys()):\n raise ValueError('No such operation defined')\n\n try:\n service_name = service_class.get_service_name()\n except AttributeError:\n # If `service_class` does not have `get_service_name()`\n return False\n\n if self.is_super_admin:\n return True\n\n service = Service.query.filter_by(name=service_name).first()\n\n uer_querylist = UER.query.filter_by(user=self,\n event_id=event_id)\n for uer in uer_querylist:\n role = uer.role\n perm = Permission.query.filter_by(role=role,\n service=service).first()\n if getattr(perm, operations[operation]):\n return True\n\n return False\n\n def can_create(self, service_class, event_id):\n return self._has_perm('create', service_class, event_id)\n\n def can_read(self, service_class, event_id):\n return self._has_perm('read', service_class, event_id)\n\n def can_update(self, service_class, event_id):\n return self._has_perm('update', service_class, event_id)\n\n def can_delete(self, service_class, event_id):\n return self._has_perm('delete', service_class, event_id)\n\n def is_speaker_at_session(self, session_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.id == session_id).one()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n def is_speaker_at_event(self, event_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.event_id == event_id).first()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n # Flask-Login integration\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return self.id\n\n def is_correct_password(self, password):\n salt = self.salt\n password = str(generate_password_hash(password, salt), 'utf-8')\n if password == self._password:\n return True\n return False\n\n @property\n def is_staff(self):\n return self.is_super_admin or self.is_admin\n\n def is_sys_role(self, role_id):\n \"\"\"\n Check if a user has a Custom System Role assigned.\n `role_id` is id of a `CustomSysRole` instance.\n \"\"\"\n role = UserSystemRole.query.filter_by(user=self, role_id=role_id).first()\n return bool(role)\n\n def first_access_panel(self):\n \"\"\"\n Check if the user is assigned a Custom Role or not\n This checks if there is an entry containing the current user in the `user_system_roles` table\n returns panel name if exists otherwise false\n \"\"\"\n custom_role = UserSystemRole.query.filter_by(user=self).first()\n if not custom_role:\n return False\n perm = PanelPermission.query.filter(PanelPermission.custom_system_roles.any(id=custom_role.role_id)).first()\n if not perm:\n return False\n return perm.panel_name\n\n def can_download_tickets(self, order):\n permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]\n if self.is_staff or self.is_organizer(order.event.id) or self.id in permissible_users:\n return True\n return False\n\n def can_access_panel(self, panel_name):\n \"\"\"\n Check if user can access an Admin Panel\n \"\"\"\n if self.is_staff:\n return True\n\n custom_sys_roles = UserSystemRole.query.filter_by(user=self)\n for custom_role in custom_sys_roles:\n if custom_role.role.can_access(panel_name):\n return True\n\n return False\n\n def get_unread_notif_count(self):\n return get_count(Notification.query.filter_by(user=self, is_read=False))\n\n def get_unread_notifs(self):\n \"\"\"\n Get unread notifications with titles, humanized receiving time\n and Mark-as-read links.\n \"\"\"\n notifs = []\n unread_notifs = Notification.query.filter_by(user=self, is_read=False).order_by(\n desc(Notification.received_at))\n for notif in unread_notifs:\n notifs.append({\n 'title': notif.title,\n 'received_at': humanize.naturaltime(datetime.now(pytz.utc) - notif.received_at),\n 'mark_read': url_for('notifications.mark_as_read', notification_id=notif.id)\n })\n\n return notifs\n\n # update last access time\n def update_lat(self):\n self.last_accessed_at = datetime.now(pytz.utc)\n\n @property\n def fullname(self):\n firstname = self.first_name if self.first_name else ''\n lastname = self.last_name if self.last_name else ''\n if firstname and lastname:\n return '{} {}'.format(firstname, lastname)\n else:\n return ''\n\n def __repr__(self):\n return '<User %r>' % self.email\n\n def __str__(self):\n return self.__repr__()\n\n def __setattr__(self, name, value):\n if name == 'details':\n super(User, self).__setattr__(name, clean_html(clean_up_string(value)))\n else:\n super(User, self).__setattr__(name, value)\n\n\[email protected]_for(User, 'init')\ndef receive_init(target, args, kwargs):\n target.signup_at = datetime.now(pytz.utc)\n", "path": "app/models/user.py"}], "after_files": [{"content": "import random\nfrom datetime import datetime\n\nimport humanize\nimport pytz\nfrom flask import url_for\nfrom flask_scrypt import generate_password_hash, generate_random_salt\nfrom sqlalchemy import event, desc\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom app.api.helpers.db import get_count\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\nfrom app.models.custom_system_role import UserSystemRole, CustomSysRole\nfrom app.models.helpers.versioning import clean_up_string, clean_html\nfrom app.models.notification import Notification\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.permission import Permission\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.user_permission import UserPermission\nfrom app.models.users_events_role import UsersEventsRoles as UER\n\n# System-wide\nADMIN = 'admin'\nSUPERADMIN = 'super_admin'\n\nMARKETER = 'Marketer'\nSALES_ADMIN = 'Sales Admin'\n\nSYS_ROLES_LIST = [\n ADMIN,\n SUPERADMIN,\n]\n\n# Event-specific\nOWNER = 'owner'\nORGANIZER = 'organizer'\nCOORGANIZER = 'coorganizer'\nTRACK_ORGANIZER = 'track_organizer'\nMODERATOR = 'moderator'\nATTENDEE = 'attendee'\nREGISTRAR = 'registrar'\n\n\nclass User(SoftDeletionModel):\n \"\"\"User model class\"\"\"\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n _email = db.Column(db.String(120), unique=True, nullable=False)\n _password = db.Column(db.String(128), nullable=False)\n facebook_id = db.Column(db.BigInteger, unique=True, nullable=True, name='facebook_id')\n facebook_login_hash = db.Column(db.String, nullable=True)\n reset_password = db.Column(db.String(128))\n salt = db.Column(db.String(128))\n avatar_url = db.Column(db.String)\n tokens = db.Column(db.Text)\n first_name = db.Column(db.String, nullable=True)\n last_name = db.Column(db.String, nullable=True)\n details = db.Column(db.String)\n contact = db.Column(db.String)\n facebook_url = db.Column(db.String)\n twitter_url = db.Column(db.String)\n instagram_url = db.Column(db.String)\n google_plus_url = db.Column(db.String)\n original_image_url = db.Column(db.String, nullable=True, default=None)\n thumbnail_image_url = db.Column(db.String)\n small_image_url = db.Column(db.String)\n icon_image_url = db.Column(db.String)\n is_super_admin = db.Column(db.Boolean, default=False)\n is_admin = db.Column(db.Boolean, default=False)\n is_sales_admin = db.Column(db.Boolean, default=False)\n is_marketer = db.Column(db.Boolean, default=False)\n is_verified = db.Column(db.Boolean, default=False)\n was_registered_with_order = db.Column(db.Boolean, default=False)\n last_accessed_at = db.Column(db.DateTime(timezone=True))\n created_at = db.Column(db.DateTime(timezone=True), default=func.now())\n # Event Invoice Details\n billing_contact_name = db.Column(db.String)\n billing_phone = db.Column(db.String)\n billing_state = db.Column(db.String)\n billing_country = db.Column(db.String)\n billing_tax_info = db.Column(db.String)\n company = db.Column(db.String)\n billing_address = db.Column(db.String)\n billing_city = db.Column(db.String)\n billing_zip_code = db.Column(db.String)\n billing_additional_info = db.Column(db.String)\n\n # relationships\n speaker = db.relationship('Speaker', backref=\"user\")\n favourite_events = db.relationship('UserFavouriteEvent', backref=\"user\")\n session = db.relationship('Session', backref=\"user\")\n feedback = db.relationship('Feedback', backref=\"user\")\n access_codes = db.relationship('AccessCode', backref=\"user\")\n discount_codes = db.relationship('DiscountCode', backref=\"user\")\n marketer_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Marketer\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id'\n )\n sales_admin_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Sales Admin\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id')\n\n @hybrid_property\n def password(self):\n \"\"\"\n Hybrid property for password\n :return:\n \"\"\"\n return self._password\n\n @password.setter\n def password(self, password):\n \"\"\"\n Setter for _password, saves hashed password, salt and reset_password string\n :param password:\n :return:\n \"\"\"\n salt = str(generate_random_salt(), 'utf-8')\n self._password = str(generate_password_hash(password, salt), 'utf-8')\n hash_ = random.getrandbits(128)\n self.reset_password = str(hash_)\n self.salt = salt\n\n @hybrid_property\n def email(self):\n \"\"\"\n Hybrid property for email\n :return:\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email):\n \"\"\"\n Setter for _email,\n set user to 'not verified' if email is updated\n :param email:\n :return:\n \"\"\"\n if self._email != email:\n self._email = email\n self.is_verified = False\n\n # User Permissions\n def can_publish_event(self):\n \"\"\"\n Checks if User can publish an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='publish_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def can_create_event(self):\n \"\"\"\n Checks if User can create an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='create_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def has_role(self, event_id):\n \"\"\"\n Checks if user has any of the Roles at an Event.\n Exclude Attendee Role.\n \"\"\"\n attendee_role = Role.query.filter_by(name=ATTENDEE).first()\n uer = UER.query.filter(UER.user == self, UER.event_id == event_id,\n UER.role != attendee_role).first()\n if uer is None:\n return False\n else:\n return True\n\n def _is_role(self, role_name, event_id=None):\n \"\"\"\n Checks if a user has a particular Role at an Event.\n \"\"\"\n role = Role.query.filter_by(name=role_name).first()\n if event_id:\n uer = UER.query.filter_by(user=self,\n event_id=event_id,\n role=role).first()\n else:\n uer = UER.query.filter_by(user=self,\n role=role).first()\n if not uer:\n return False\n else:\n return True\n\n def is_owner(self, event_id):\n return self._is_role(OWNER, event_id)\n\n def is_organizer(self, event_id):\n # type: (object) -> object\n return self._is_role(ORGANIZER, event_id)\n\n def is_coorganizer(self, event_id):\n return self._is_role(COORGANIZER, event_id)\n\n def is_track_organizer(self, event_id):\n return self._is_role(TRACK_ORGANIZER, event_id)\n\n def is_moderator(self, event_id):\n return self._is_role(MODERATOR, event_id)\n\n def is_registrar(self, event_id):\n return self._is_role(REGISTRAR, event_id)\n\n def is_attendee(self, event_id):\n return self._is_role(ATTENDEE, event_id)\n\n def has_event_access(self, event_id):\n return self._is_role(OWNER, event_id) or self._is_role(ORGANIZER, event_id) or \\\n self._is_role(COORGANIZER, event_id)\n\n @hybrid_property\n def is_user_owner(self):\n return self._is_role(OWNER)\n\n @hybrid_property\n def is_user_organizer(self):\n # type: (object) -> object\n return self._is_role(ORGANIZER)\n\n @hybrid_property\n def is_user_coorganizer(self):\n return self._is_role(COORGANIZER)\n\n @hybrid_property\n def is_user_track_organizer(self):\n return self._is_role(TRACK_ORGANIZER)\n\n @hybrid_property\n def is_user_moderator(self):\n return self._is_role(MODERATOR)\n\n @hybrid_property\n def is_user_registrar(self):\n return self._is_role(REGISTRAR)\n\n @hybrid_property\n def is_user_attendee(self):\n return self._is_role(ATTENDEE)\n\n def _has_perm(self, operation, service_class, event_id):\n # Operation names and their corresponding permission in `Permissions`\n operations = {\n 'create': 'can_create',\n 'read': 'can_read',\n 'update': 'can_update',\n 'delete': 'can_delete',\n }\n if operation not in list(operations.keys()):\n raise ValueError('No such operation defined')\n\n try:\n service_name = service_class.get_service_name()\n except AttributeError:\n # If `service_class` does not have `get_service_name()`\n return False\n\n if self.is_super_admin:\n return True\n\n service = Service.query.filter_by(name=service_name).first()\n\n uer_querylist = UER.query.filter_by(user=self,\n event_id=event_id)\n for uer in uer_querylist:\n role = uer.role\n perm = Permission.query.filter_by(role=role,\n service=service).first()\n if getattr(perm, operations[operation]):\n return True\n\n return False\n\n def can_create(self, service_class, event_id):\n return self._has_perm('create', service_class, event_id)\n\n def can_read(self, service_class, event_id):\n return self._has_perm('read', service_class, event_id)\n\n def can_update(self, service_class, event_id):\n return self._has_perm('update', service_class, event_id)\n\n def can_delete(self, service_class, event_id):\n return self._has_perm('delete', service_class, event_id)\n\n def is_speaker_at_session(self, session_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.id == session_id).one()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n def is_speaker_at_event(self, event_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.event_id == event_id).first()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n # Flask-Login integration\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return self.id\n\n def is_correct_password(self, password):\n salt = self.salt\n password = str(generate_password_hash(password, salt), 'utf-8')\n if password == self._password:\n return True\n return False\n\n @property\n def is_staff(self):\n return self.is_super_admin or self.is_admin\n\n def is_sys_role(self, role_id):\n \"\"\"\n Check if a user has a Custom System Role assigned.\n `role_id` is id of a `CustomSysRole` instance.\n \"\"\"\n role = UserSystemRole.query.filter_by(user=self, role_id=role_id).first()\n return bool(role)\n\n def first_access_panel(self):\n \"\"\"\n Check if the user is assigned a Custom Role or not\n This checks if there is an entry containing the current user in the `user_system_roles` table\n returns panel name if exists otherwise false\n \"\"\"\n custom_role = UserSystemRole.query.filter_by(user=self).first()\n if not custom_role:\n return False\n perm = PanelPermission.query.filter(PanelPermission.custom_system_roles.any(id=custom_role.role_id)).first()\n if not perm:\n return False\n return perm.panel_name\n\n def can_download_tickets(self, order):\n permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]\n if self.is_staff or self.has_event_access(order.event.id) or self.id in permissible_users:\n return True\n return False\n\n def can_access_panel(self, panel_name):\n \"\"\"\n Check if user can access an Admin Panel\n \"\"\"\n if self.is_staff:\n return True\n\n custom_sys_roles = UserSystemRole.query.filter_by(user=self)\n for custom_role in custom_sys_roles:\n if custom_role.role.can_access(panel_name):\n return True\n\n return False\n\n def get_unread_notif_count(self):\n return get_count(Notification.query.filter_by(user=self, is_read=False))\n\n def get_unread_notifs(self):\n \"\"\"\n Get unread notifications with titles, humanized receiving time\n and Mark-as-read links.\n \"\"\"\n notifs = []\n unread_notifs = Notification.query.filter_by(user=self, is_read=False).order_by(\n desc(Notification.received_at))\n for notif in unread_notifs:\n notifs.append({\n 'title': notif.title,\n 'received_at': humanize.naturaltime(datetime.now(pytz.utc) - notif.received_at),\n 'mark_read': url_for('notifications.mark_as_read', notification_id=notif.id)\n })\n\n return notifs\n\n # update last access time\n def update_lat(self):\n self.last_accessed_at = datetime.now(pytz.utc)\n\n @property\n def fullname(self):\n firstname = self.first_name if self.first_name else ''\n lastname = self.last_name if self.last_name else ''\n if firstname and lastname:\n return '{} {}'.format(firstname, lastname)\n else:\n return ''\n\n def __repr__(self):\n return '<User %r>' % self.email\n\n def __str__(self):\n return self.__repr__()\n\n def __setattr__(self, name, value):\n if name == 'details':\n super(User, self).__setattr__(name, clean_html(clean_up_string(value)))\n else:\n super(User, self).__setattr__(name, value)\n\n\[email protected]_for(User, 'init')\ndef receive_init(target, args, kwargs):\n target.signup_at = datetime.now(pytz.utc)\n", "path": "app/models/user.py"}]} |
gh_patches_debug_1455 | rasdani/github-patches | git_diff | zulip__zulip-20678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support restarting the server without rejecting any requests
In theory, it should be possible with uwsgi and its `master=true` setting to restart the server with 0 requests being rejected due to the service being down (the approach seems to be the obvious thing of queuing requests in the socket until the new processes are up). I tried this briefly with our supervisord and ran into problems where it would just fail to restart, so some investigation is required into how to do this properly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/zulip_tools.py`
Content:
```
1 #!/usr/bin/env python3
2 import argparse
3 import configparser
4 import datetime
5 import functools
6 import hashlib
7 import json
8 import logging
9 import os
10 import pwd
11 import random
12 import re
13 import shlex
14 import shutil
15 import subprocess
16 import sys
17 import time
18 import uuid
19 from typing import Any, Dict, List, Sequence, Set
20 from urllib.parse import SplitResult
21
22 DEPLOYMENTS_DIR = "/home/zulip/deployments"
23 LOCK_DIR = os.path.join(DEPLOYMENTS_DIR, "lock")
24 TIMESTAMP_FORMAT = "%Y-%m-%d-%H-%M-%S"
25
26 # Color codes
27 OKBLUE = "\033[94m"
28 OKGREEN = "\033[92m"
29 WARNING = "\033[93m"
30 FAIL = "\033[91m"
31 ENDC = "\033[0m"
32 BLACKONYELLOW = "\x1b[0;30;43m"
33 WHITEONRED = "\x1b[0;37;41m"
34 BOLDRED = "\x1B[1;31m"
35
36 GREEN = "\x1b[32m"
37 YELLOW = "\x1b[33m"
38 BLUE = "\x1b[34m"
39 MAGENTA = "\x1b[35m"
40 CYAN = "\x1b[36m"
41
42
43 def overwrite_symlink(src: str, dst: str) -> None:
44 dir, base = os.path.split(dst)
45 while True:
46 # Note: creating a temporary filename like this is not generally
47 # secure. It’s fine in this case because os.symlink refuses to
48 # overwrite an existing target; we handle the error and try again.
49 tmp = os.path.join(dir, f".{base}.{random.randrange(1 << 40):010x}")
50 try:
51 os.symlink(src, tmp)
52 except FileExistsError:
53 continue
54 break
55 try:
56 os.rename(tmp, dst)
57 except BaseException:
58 os.remove(tmp)
59 raise
60
61
62 def parse_cache_script_args(description: str) -> argparse.Namespace:
63 # Keep this in sync with clean_unused_caches in provision_inner.py
64 parser = argparse.ArgumentParser(description=description)
65
66 parser.add_argument(
67 "--threshold",
68 dest="threshold_days",
69 type=int,
70 default=14,
71 metavar="<days>",
72 help="Any cache which is not in "
73 "use by a deployment not older than threshold days(current "
74 "installation in dev) and older than threshold days will be "
75 "deleted. (defaults to 14)",
76 )
77 parser.add_argument(
78 "--dry-run",
79 action="store_true",
80 help="If specified then script will only print the caches "
81 "that it will delete/keep back. It will not delete any cache.",
82 )
83 parser.add_argument(
84 "--verbose",
85 action="store_true",
86 help="If specified then script will print a detailed report "
87 "of what is being will deleted/kept back.",
88 )
89 parser.add_argument(
90 "--no-print-headings",
91 dest="no_headings",
92 action="store_true",
93 help="If specified then script will not print headings for "
94 "what will be deleted/kept back.",
95 )
96
97 args = parser.parse_args()
98 args.verbose |= args.dry_run # Always print a detailed report in case of dry run.
99 return args
100
101
102 def get_deploy_root() -> str:
103 return os.path.realpath(
104 os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..")),
105 )
106
107
108 def get_deployment_version(extract_path: str) -> str:
109 version = "0.0.0"
110 for item in os.listdir(extract_path):
111 item_path = os.path.join(extract_path, item)
112 if item.startswith("zulip-server") and os.path.isdir(item_path):
113 with open(os.path.join(item_path, "version.py")) as f:
114 result = re.search('ZULIP_VERSION = "(.*)"', f.read())
115 if result:
116 version = result.groups()[0]
117 break
118 return version
119
120
121 def is_invalid_upgrade(current_version: str, new_version: str) -> bool:
122 if new_version > "1.4.3" and current_version <= "1.3.10":
123 return True
124 return False
125
126
127 def get_zulip_pwent() -> pwd.struct_passwd:
128 deploy_root_uid = os.stat(get_deploy_root()).st_uid
129 if deploy_root_uid != 0:
130 return pwd.getpwuid(deploy_root_uid)
131
132 # In the case that permissions got messed up and the deployment
133 # directory is unexpectedly owned by root, we fallback to the
134 # `zulip` user as that's the correct value in production.
135 return pwd.getpwnam("zulip")
136
137
138 def get_postgres_pwent() -> pwd.struct_passwd:
139 try:
140 return pwd.getpwnam("postgres")
141 except KeyError:
142 return get_zulip_pwent()
143
144
145 def su_to_zulip(save_suid: bool = False) -> None:
146 """Warning: su_to_zulip assumes that the zulip checkout is owned by
147 the zulip user (or whatever normal user is running the Zulip
148 installation). It should never be run from the installer or other
149 production contexts before /home/zulip/deployments/current is
150 created."""
151 pwent = get_zulip_pwent()
152 os.setgid(pwent.pw_gid)
153 if save_suid:
154 os.setresuid(pwent.pw_uid, pwent.pw_uid, os.getuid())
155 else:
156 os.setuid(pwent.pw_uid)
157 os.environ["HOME"] = pwent.pw_dir
158
159
160 def make_deploy_path() -> str:
161 timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)
162 return os.path.join(DEPLOYMENTS_DIR, timestamp)
163
164
165 TEMPLATE_DATABASE_DIR = "test-backend/databases"
166
167
168 def get_dev_uuid_var_path(create_if_missing: bool = False) -> str:
169 zulip_path = get_deploy_root()
170 uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), ".zulip-dev-uuid")
171 if os.path.exists(uuid_path):
172 with open(uuid_path) as f:
173 zulip_uuid = f.read().strip()
174 else:
175 if create_if_missing:
176 zulip_uuid = str(uuid.uuid4())
177 # We need root access here, since the path will be under /srv/ in the
178 # development environment.
179 run_as_root(["sh", "-c", 'echo "$1" > "$2"', "-", zulip_uuid, uuid_path])
180 else:
181 raise AssertionError("Missing UUID file; please run tools/provision!")
182
183 result_path = os.path.join(zulip_path, "var", zulip_uuid)
184 os.makedirs(result_path, exist_ok=True)
185 return result_path
186
187
188 def get_deployment_lock(error_rerun_script: str) -> None:
189 start_time = time.time()
190 got_lock = False
191 while time.time() - start_time < 300:
192 try:
193 os.mkdir(LOCK_DIR)
194 got_lock = True
195 break
196 except OSError:
197 print(
198 WARNING
199 + "Another deployment in progress; waiting for lock... "
200 + f"(If no deployment is running, rmdir {LOCK_DIR})"
201 + ENDC,
202 flush=True,
203 )
204 time.sleep(3)
205
206 if not got_lock:
207 print(
208 FAIL
209 + "Deployment already in progress. Please run\n"
210 + f" {error_rerun_script}\n"
211 + "manually when the previous deployment finishes, or run\n"
212 + f" rmdir {LOCK_DIR}\n"
213 + "if the previous deployment crashed."
214 + ENDC
215 )
216 sys.exit(1)
217
218
219 def release_deployment_lock() -> None:
220 shutil.rmtree(LOCK_DIR)
221
222
223 def run(args: Sequence[str], **kwargs: Any) -> None:
224 # Output what we're doing in the `set -x` style
225 print("+ {}".format(" ".join(map(shlex.quote, args))), flush=True)
226
227 try:
228 subprocess.check_call(args, **kwargs)
229 except subprocess.CalledProcessError:
230 print()
231 print(
232 WHITEONRED
233 + "Error running a subcommand of {}: {}".format(
234 sys.argv[0],
235 " ".join(map(shlex.quote, args)),
236 )
237 + ENDC
238 )
239 print(WHITEONRED + "Actual error output for the subcommand is just above this." + ENDC)
240 print()
241 sys.exit(1)
242
243
244 def log_management_command(cmd: Sequence[str], log_path: str) -> None:
245 log_dir = os.path.dirname(log_path)
246 if not os.path.exists(log_dir):
247 os.makedirs(log_dir)
248
249 formatter = logging.Formatter("%(asctime)s: %(message)s")
250 file_handler = logging.FileHandler(log_path)
251 file_handler.setFormatter(formatter)
252 logger = logging.getLogger("zulip.management")
253 logger.addHandler(file_handler)
254 logger.setLevel(logging.INFO)
255
256 logger.info("Ran %s", " ".join(map(shlex.quote, cmd)))
257
258
259 def get_environment() -> str:
260 if os.path.exists(DEPLOYMENTS_DIR):
261 return "prod"
262 return "dev"
263
264
265 def get_recent_deployments(threshold_days: int) -> Set[str]:
266 # Returns a list of deployments not older than threshold days
267 # including `/root/zulip` directory if it exists.
268 recent = set()
269 threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
270 for dir_name in os.listdir(DEPLOYMENTS_DIR):
271 target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name)
272 if not os.path.isdir(target_dir):
273 # Skip things like uwsgi sockets, symlinks, etc.
274 continue
275 if not os.path.exists(os.path.join(target_dir, "zerver")):
276 # Skip things like "lock" that aren't actually a deployment directory
277 continue
278 try:
279 date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)
280 if date >= threshold_date:
281 recent.add(target_dir)
282 except ValueError:
283 # Always include deployments whose name is not in the format of a timestamp.
284 recent.add(target_dir)
285 # If it is a symlink then include the target as well.
286 if os.path.islink(target_dir):
287 recent.add(os.path.realpath(target_dir))
288 if os.path.exists("/root/zulip"):
289 recent.add("/root/zulip")
290 return recent
291
292
293 def get_threshold_timestamp(threshold_days: int) -> int:
294 # Given number of days, this function returns timestamp corresponding
295 # to the time prior to given number of days.
296 threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
297 threshold_timestamp = int(time.mktime(threshold.utctimetuple()))
298 return threshold_timestamp
299
300
301 def get_caches_to_be_purged(
302 caches_dir: str, caches_in_use: Set[str], threshold_days: int
303 ) -> Set[str]:
304 # Given a directory containing caches, a list of caches in use
305 # and threshold days, this function return a list of caches
306 # which can be purged. Remove the cache only if it is:
307 # 1: Not in use by the current installation(in dev as well as in prod).
308 # 2: Not in use by a deployment not older than `threshold_days`(in prod).
309 # 3: Not in use by '/root/zulip'.
310 # 4: Not older than `threshold_days`.
311 caches_to_purge = set()
312 threshold_timestamp = get_threshold_timestamp(threshold_days)
313 for cache_dir_base in os.listdir(caches_dir):
314 cache_dir = os.path.join(caches_dir, cache_dir_base)
315 if cache_dir in caches_in_use:
316 # Never purge a cache which is in use.
317 continue
318 if os.path.getctime(cache_dir) < threshold_timestamp:
319 caches_to_purge.add(cache_dir)
320 return caches_to_purge
321
322
323 def purge_unused_caches(
324 caches_dir: str,
325 caches_in_use: Set[str],
326 cache_type: str,
327 args: argparse.Namespace,
328 ) -> None:
329 all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)}
330 caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)
331 caches_to_keep = all_caches - caches_to_purge
332
333 may_be_perform_purging(
334 caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose, args.no_headings
335 )
336 if args.verbose:
337 print("Done!")
338
339
340 def generate_sha1sum_emoji(zulip_path: str) -> str:
341 sha = hashlib.sha1()
342
343 filenames = [
344 "static/assets/zulip-emoji/zulip.png",
345 "tools/setup/emoji/emoji_map.json",
346 "tools/setup/emoji/build_emoji",
347 "tools/setup/emoji/emoji_setup_utils.py",
348 "tools/setup/emoji/emoji_names.py",
349 ]
350
351 for filename in filenames:
352 file_path = os.path.join(zulip_path, filename)
353 with open(file_path, "rb") as reader:
354 sha.update(reader.read())
355
356 # Take into account the version of `emoji-datasource-google` package
357 # while generating success stamp.
358 PACKAGE_FILE_PATH = os.path.join(zulip_path, "package.json")
359 with open(PACKAGE_FILE_PATH) as fp:
360 parsed_package_file = json.load(fp)
361 dependency_data = parsed_package_file["dependencies"]
362
363 if "emoji-datasource-google" in dependency_data:
364 with open(os.path.join(zulip_path, "yarn.lock")) as fp:
365 (emoji_datasource_version,) = re.findall(
366 r"^emoji-datasource-google@"
367 + re.escape(dependency_data["emoji-datasource-google"])
368 + r':\n version "(.*)"',
369 fp.read(),
370 re.M,
371 )
372 else:
373 emoji_datasource_version = "0"
374 sha.update(emoji_datasource_version.encode())
375
376 return sha.hexdigest()
377
378
379 def may_be_perform_purging(
380 dirs_to_purge: Set[str],
381 dirs_to_keep: Set[str],
382 dir_type: str,
383 dry_run: bool,
384 verbose: bool,
385 no_headings: bool,
386 ) -> None:
387 if dry_run:
388 print("Performing a dry run...")
389 if not no_headings:
390 print(f"Cleaning unused {dir_type}s...")
391
392 for directory in dirs_to_purge:
393 if verbose:
394 print(f"Cleaning unused {dir_type}: {directory}")
395 if not dry_run:
396 run_as_root(["rm", "-rf", directory])
397
398 for directory in dirs_to_keep:
399 if verbose:
400 print(f"Keeping used {dir_type}: {directory}")
401
402
403 @functools.lru_cache(None)
404 def parse_os_release() -> Dict[str, str]:
405 """
406 Example of the useful subset of the data:
407 {
408 'ID': 'ubuntu',
409 'VERSION_ID': '18.04',
410 'NAME': 'Ubuntu',
411 'VERSION': '18.04.3 LTS (Bionic Beaver)',
412 'PRETTY_NAME': 'Ubuntu 18.04.3 LTS',
413 }
414
415 VERSION_CODENAME (e.g. 'bionic') is nice and readable to Ubuntu
416 developers, but we avoid using it, as it is not available on
417 RHEL-based platforms.
418 """
419 distro_info = {} # type: Dict[str, str]
420 with open("/etc/os-release") as fp:
421 for line in fp:
422 line = line.strip()
423 if not line or line.startswith("#"):
424 # The line may be blank or a comment, see:
425 # https://www.freedesktop.org/software/systemd/man/os-release.html
426 continue
427 k, v = line.split("=", 1)
428 [distro_info[k]] = shlex.split(v)
429 return distro_info
430
431
432 @functools.lru_cache(None)
433 def os_families() -> Set[str]:
434 """
435 Known families:
436 debian (includes: debian, ubuntu)
437 ubuntu (includes: ubuntu)
438 fedora (includes: fedora, rhel, centos)
439 rhel (includes: rhel, centos)
440 centos (includes: centos)
441 """
442 distro_info = parse_os_release()
443 return {distro_info["ID"], *distro_info.get("ID_LIKE", "").split()}
444
445
446 def files_and_string_digest(filenames: Sequence[str], extra_strings: Sequence[str]) -> str:
447 # see is_digest_obsolete for more context
448 sha1sum = hashlib.sha1()
449 for fn in filenames:
450 with open(fn, "rb") as file_to_hash:
451 sha1sum.update(file_to_hash.read())
452
453 for extra_string in extra_strings:
454 sha1sum.update(extra_string.encode())
455
456 return sha1sum.hexdigest()
457
458
459 def is_digest_obsolete(
460 hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []
461 ) -> bool:
462 """
463 In order to determine if we need to run some
464 process, we calculate a digest of the important
465 files and strings whose respective contents
466 or values may indicate such a need.
467
468 filenames = files we should hash the contents of
469 extra_strings = strings we should hash directly
470
471 Grep for callers to see examples of how this is used.
472
473 To elaborate on extra_strings, they will typically
474 be things like:
475
476 - package versions (that we import)
477 - settings values (that we stringify with
478 json, deterministically)
479 """
480 last_hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)
481 try:
482 with open(last_hash_path) as f:
483 old_hash = f.read()
484 except FileNotFoundError:
485 # This is normal for a fresh checkout--a missing
486 # digest is an obsolete digest.
487 return True
488
489 new_hash = files_and_string_digest(filenames, extra_strings)
490
491 return new_hash != old_hash
492
493
494 def write_new_digest(
495 hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []
496 ) -> None:
497 hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)
498 new_hash = files_and_string_digest(filenames, extra_strings)
499 with open(hash_path, "w") as f:
500 f.write(new_hash)
501
502 # Be a little verbose here--our callers ensure we
503 # only write new digests when things have changed, and
504 # making this system more transparent to developers
505 # can help them troubleshoot provisioning glitches.
506 print("New digest written to: " + hash_path)
507
508
509 def is_root() -> bool:
510 if "posix" in os.name and os.geteuid() == 0:
511 return True
512 return False
513
514
515 def run_as_root(args: List[str], **kwargs: Any) -> None:
516 sudo_args = kwargs.pop("sudo_args", [])
517 if not is_root():
518 args = ["sudo", *sudo_args, "--", *args]
519 run(args, **kwargs)
520
521
522 def assert_not_running_as_root() -> None:
523 script_name = os.path.abspath(sys.argv[0])
524 if is_root():
525 pwent = get_zulip_pwent()
526 msg = (
527 "{shortname} should not be run as root. Use `su {user}` to switch to the 'zulip'\n"
528 "user before rerunning this, or use \n su {user} -c '{name} ...'\n"
529 "to switch users and run this as a single command."
530 ).format(name=script_name, shortname=os.path.basename(script_name), user=pwent.pw_name)
531 print(msg)
532 sys.exit(1)
533
534
535 def assert_running_as_root(strip_lib_from_paths: bool = False) -> None:
536 script_name = os.path.abspath(sys.argv[0])
537 # Since these Python scripts are run inside a thin shell wrapper,
538 # we need to replace the paths in order to ensure we instruct
539 # users to (re)run the right command.
540 if strip_lib_from_paths:
541 script_name = script_name.replace("scripts/lib/upgrade", "scripts/upgrade")
542 if not is_root():
543 print(f"{script_name} must be run as root.")
544 sys.exit(1)
545
546
547 def get_config(
548 config_file: configparser.RawConfigParser,
549 section: str,
550 key: str,
551 default_value: str = "",
552 ) -> str:
553 if config_file.has_option(section, key):
554 return config_file.get(section, key)
555 return default_value
556
557
558 def get_config_file() -> configparser.RawConfigParser:
559 config_file = configparser.RawConfigParser()
560 config_file.read("/etc/zulip/zulip.conf")
561 return config_file
562
563
564 def get_deploy_options(config_file: configparser.RawConfigParser) -> List[str]:
565 return get_config(config_file, "deployment", "deploy_options", "").strip().split()
566
567
568 def run_psql_as_postgres(
569 config_file: configparser.RawConfigParser,
570 sql_query: str,
571 ) -> None:
572 dbname = get_config(config_file, "postgresql", "database_name", "zulip")
573 subcmd = " ".join(
574 map(
575 shlex.quote,
576 [
577 "psql",
578 "-v",
579 "ON_ERROR_STOP=1",
580 "-d",
581 dbname,
582 "-c",
583 sql_query,
584 ],
585 )
586 )
587 subprocess.check_call(["su", "postgres", "-c", subcmd])
588
589
590 def get_tornado_ports(config_file: configparser.RawConfigParser) -> List[int]:
591 ports = []
592 if config_file.has_section("tornado_sharding"):
593 ports = [int(port) for port in config_file.options("tornado_sharding")]
594 if not ports:
595 ports = [9800]
596 return ports
597
598
599 def get_or_create_dev_uuid_var_path(path: str) -> str:
600 absolute_path = f"{get_dev_uuid_var_path()}/{path}"
601 os.makedirs(absolute_path, exist_ok=True)
602 return absolute_path
603
604
605 def is_vagrant_env_host(path: str) -> bool:
606 return ".vagrant" in os.listdir(path)
607
608
609 def has_application_server(once: bool = False) -> bool:
610 if once:
611 return os.path.exists("/etc/supervisor/conf.d/zulip/zulip-once.conf")
612 return (
613 # Current path
614 os.path.exists("/etc/supervisor/conf.d/zulip/zulip.conf")
615 # Old path, relevant for upgrades
616 or os.path.exists("/etc/supervisor/conf.d/zulip.conf")
617 )
618
619
620 def list_supervisor_processes(*args: str) -> List[str]:
621 worker_status = subprocess.run(
622 ["supervisorctl", "status", *args],
623 universal_newlines=True,
624 stdout=subprocess.PIPE,
625 )
626 # `supercisorctl status` returns 3 if any are stopped, which is
627 # fine here; and exit code 4 is for no such process, which is
628 # handled below.
629 if worker_status.returncode not in (0, 3, 4):
630 worker_status.check_returncode()
631
632 processes = []
633 for status_line in worker_status.stdout.splitlines():
634 if not re.search(r"ERROR \(no such (process|group)\)", status_line):
635 processes.append(status_line.split()[0])
636 return processes
637
638
639 def has_process_fts_updates() -> bool:
640 return (
641 # Current path
642 os.path.exists("/etc/supervisor/conf.d/zulip/zulip_db.conf")
643 # Old path, relevant for upgrades
644 or os.path.exists("/etc/supervisor/conf.d/zulip_db.conf")
645 )
646
647
648 def deport(netloc: str) -> str:
649 """Remove the port from a hostname:port string. Brackets on a literal
650 IPv6 address are included."""
651 r = SplitResult("", netloc, "", "", "")
652 assert r.hostname is not None
653 return "[" + r.hostname + "]" if ":" in r.hostname else r.hostname
654
655
656 def start_arg_parser(action: str, add_help: bool = False) -> argparse.ArgumentParser:
657 parser = argparse.ArgumentParser(add_help=add_help)
658 parser.add_argument("--fill-cache", action="store_true", help="Fill the memcached caches")
659 if action == "restart":
660 parser.add_argument(
661 "--less-graceful",
662 action="store_true",
663 help="Restart with more concern for expediency than minimizing availability interruption",
664 )
665 parser.add_argument(
666 "--skip-tornado",
667 action="store_true",
668 help="Do not restart Tornado processes",
669 )
670 return parser
671
672
673 if __name__ == "__main__":
674 cmd = sys.argv[1]
675 if cmd == "make_deploy_path":
676 print(make_deploy_path())
677 elif cmd == "get_dev_uuid":
678 print(get_dev_uuid_var_path())
679
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/zulip_tools.py b/scripts/lib/zulip_tools.py
--- a/scripts/lib/zulip_tools.py
+++ b/scripts/lib/zulip_tools.py
@@ -623,7 +623,7 @@
universal_newlines=True,
stdout=subprocess.PIPE,
)
- # `supercisorctl status` returns 3 if any are stopped, which is
+ # `supervisorctl status` returns 3 if any are stopped, which is
# fine here; and exit code 4 is for no such process, which is
# handled below.
if worker_status.returncode not in (0, 3, 4):
| {"golden_diff": "diff --git a/scripts/lib/zulip_tools.py b/scripts/lib/zulip_tools.py\n--- a/scripts/lib/zulip_tools.py\n+++ b/scripts/lib/zulip_tools.py\n@@ -623,7 +623,7 @@\n universal_newlines=True,\n stdout=subprocess.PIPE,\n )\n- # `supercisorctl status` returns 3 if any are stopped, which is\n+ # `supervisorctl status` returns 3 if any are stopped, which is\n # fine here; and exit code 4 is for no such process, which is\n # handled below.\n if worker_status.returncode not in (0, 3, 4):\n", "issue": "Support restarting the server without rejecting any requests\nIn theory, it should be possible with uwsgi and its `master=true` setting to restart the server with 0 requests being rejected due to the service being down (the approach seems to be the obvious thing of queuing requests in the socket until the new processes are up). I tried this briefly with our supervisord and ran into problems where it would just fail to restart, so some investigation is required into how to do this properly.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nimport configparser\nimport datetime\nimport functools\nimport hashlib\nimport json\nimport logging\nimport os\nimport pwd\nimport random\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport uuid\nfrom typing import Any, Dict, List, Sequence, Set\nfrom urllib.parse import SplitResult\n\nDEPLOYMENTS_DIR = \"/home/zulip/deployments\"\nLOCK_DIR = os.path.join(DEPLOYMENTS_DIR, \"lock\")\nTIMESTAMP_FORMAT = \"%Y-%m-%d-%H-%M-%S\"\n\n# Color codes\nOKBLUE = \"\\033[94m\"\nOKGREEN = \"\\033[92m\"\nWARNING = \"\\033[93m\"\nFAIL = \"\\033[91m\"\nENDC = \"\\033[0m\"\nBLACKONYELLOW = \"\\x1b[0;30;43m\"\nWHITEONRED = \"\\x1b[0;37;41m\"\nBOLDRED = \"\\x1B[1;31m\"\n\nGREEN = \"\\x1b[32m\"\nYELLOW = \"\\x1b[33m\"\nBLUE = \"\\x1b[34m\"\nMAGENTA = \"\\x1b[35m\"\nCYAN = \"\\x1b[36m\"\n\n\ndef overwrite_symlink(src: str, dst: str) -> None:\n dir, base = os.path.split(dst)\n while True:\n # Note: creating a temporary filename like this is not generally\n # secure. It\u2019s fine in this case because os.symlink refuses to\n # overwrite an existing target; we handle the error and try again.\n tmp = os.path.join(dir, f\".{base}.{random.randrange(1 << 40):010x}\")\n try:\n os.symlink(src, tmp)\n except FileExistsError:\n continue\n break\n try:\n os.rename(tmp, dst)\n except BaseException:\n os.remove(tmp)\n raise\n\n\ndef parse_cache_script_args(description: str) -> argparse.Namespace:\n # Keep this in sync with clean_unused_caches in provision_inner.py\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"--threshold\",\n dest=\"threshold_days\",\n type=int,\n default=14,\n metavar=\"<days>\",\n help=\"Any cache which is not in \"\n \"use by a deployment not older than threshold days(current \"\n \"installation in dev) and older than threshold days will be \"\n \"deleted. (defaults to 14)\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"If specified then script will only print the caches \"\n \"that it will delete/keep back. It will not delete any cache.\",\n )\n parser.add_argument(\n \"--verbose\",\n action=\"store_true\",\n help=\"If specified then script will print a detailed report \"\n \"of what is being will deleted/kept back.\",\n )\n parser.add_argument(\n \"--no-print-headings\",\n dest=\"no_headings\",\n action=\"store_true\",\n help=\"If specified then script will not print headings for \"\n \"what will be deleted/kept back.\",\n )\n\n args = parser.parse_args()\n args.verbose |= args.dry_run # Always print a detailed report in case of dry run.\n return args\n\n\ndef get_deploy_root() -> str:\n return os.path.realpath(\n os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\")),\n )\n\n\ndef get_deployment_version(extract_path: str) -> str:\n version = \"0.0.0\"\n for item in os.listdir(extract_path):\n item_path = os.path.join(extract_path, item)\n if item.startswith(\"zulip-server\") and os.path.isdir(item_path):\n with open(os.path.join(item_path, \"version.py\")) as f:\n result = re.search('ZULIP_VERSION = \"(.*)\"', f.read())\n if result:\n version = result.groups()[0]\n break\n return version\n\n\ndef is_invalid_upgrade(current_version: str, new_version: str) -> bool:\n if new_version > \"1.4.3\" and current_version <= \"1.3.10\":\n return True\n return False\n\n\ndef get_zulip_pwent() -> pwd.struct_passwd:\n deploy_root_uid = os.stat(get_deploy_root()).st_uid\n if deploy_root_uid != 0:\n return pwd.getpwuid(deploy_root_uid)\n\n # In the case that permissions got messed up and the deployment\n # directory is unexpectedly owned by root, we fallback to the\n # `zulip` user as that's the correct value in production.\n return pwd.getpwnam(\"zulip\")\n\n\ndef get_postgres_pwent() -> pwd.struct_passwd:\n try:\n return pwd.getpwnam(\"postgres\")\n except KeyError:\n return get_zulip_pwent()\n\n\ndef su_to_zulip(save_suid: bool = False) -> None:\n \"\"\"Warning: su_to_zulip assumes that the zulip checkout is owned by\n the zulip user (or whatever normal user is running the Zulip\n installation). It should never be run from the installer or other\n production contexts before /home/zulip/deployments/current is\n created.\"\"\"\n pwent = get_zulip_pwent()\n os.setgid(pwent.pw_gid)\n if save_suid:\n os.setresuid(pwent.pw_uid, pwent.pw_uid, os.getuid())\n else:\n os.setuid(pwent.pw_uid)\n os.environ[\"HOME\"] = pwent.pw_dir\n\n\ndef make_deploy_path() -> str:\n timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)\n return os.path.join(DEPLOYMENTS_DIR, timestamp)\n\n\nTEMPLATE_DATABASE_DIR = \"test-backend/databases\"\n\n\ndef get_dev_uuid_var_path(create_if_missing: bool = False) -> str:\n zulip_path = get_deploy_root()\n uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), \".zulip-dev-uuid\")\n if os.path.exists(uuid_path):\n with open(uuid_path) as f:\n zulip_uuid = f.read().strip()\n else:\n if create_if_missing:\n zulip_uuid = str(uuid.uuid4())\n # We need root access here, since the path will be under /srv/ in the\n # development environment.\n run_as_root([\"sh\", \"-c\", 'echo \"$1\" > \"$2\"', \"-\", zulip_uuid, uuid_path])\n else:\n raise AssertionError(\"Missing UUID file; please run tools/provision!\")\n\n result_path = os.path.join(zulip_path, \"var\", zulip_uuid)\n os.makedirs(result_path, exist_ok=True)\n return result_path\n\n\ndef get_deployment_lock(error_rerun_script: str) -> None:\n start_time = time.time()\n got_lock = False\n while time.time() - start_time < 300:\n try:\n os.mkdir(LOCK_DIR)\n got_lock = True\n break\n except OSError:\n print(\n WARNING\n + \"Another deployment in progress; waiting for lock... \"\n + f\"(If no deployment is running, rmdir {LOCK_DIR})\"\n + ENDC,\n flush=True,\n )\n time.sleep(3)\n\n if not got_lock:\n print(\n FAIL\n + \"Deployment already in progress. Please run\\n\"\n + f\" {error_rerun_script}\\n\"\n + \"manually when the previous deployment finishes, or run\\n\"\n + f\" rmdir {LOCK_DIR}\\n\"\n + \"if the previous deployment crashed.\"\n + ENDC\n )\n sys.exit(1)\n\n\ndef release_deployment_lock() -> None:\n shutil.rmtree(LOCK_DIR)\n\n\ndef run(args: Sequence[str], **kwargs: Any) -> None:\n # Output what we're doing in the `set -x` style\n print(\"+ {}\".format(\" \".join(map(shlex.quote, args))), flush=True)\n\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n print()\n print(\n WHITEONRED\n + \"Error running a subcommand of {}: {}\".format(\n sys.argv[0],\n \" \".join(map(shlex.quote, args)),\n )\n + ENDC\n )\n print(WHITEONRED + \"Actual error output for the subcommand is just above this.\" + ENDC)\n print()\n sys.exit(1)\n\n\ndef log_management_command(cmd: Sequence[str], log_path: str) -> None:\n log_dir = os.path.dirname(log_path)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n formatter = logging.Formatter(\"%(asctime)s: %(message)s\")\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n logger = logging.getLogger(\"zulip.management\")\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n\n logger.info(\"Ran %s\", \" \".join(map(shlex.quote, cmd)))\n\n\ndef get_environment() -> str:\n if os.path.exists(DEPLOYMENTS_DIR):\n return \"prod\"\n return \"dev\"\n\n\ndef get_recent_deployments(threshold_days: int) -> Set[str]:\n # Returns a list of deployments not older than threshold days\n # including `/root/zulip` directory if it exists.\n recent = set()\n threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n for dir_name in os.listdir(DEPLOYMENTS_DIR):\n target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name)\n if not os.path.isdir(target_dir):\n # Skip things like uwsgi sockets, symlinks, etc.\n continue\n if not os.path.exists(os.path.join(target_dir, \"zerver\")):\n # Skip things like \"lock\" that aren't actually a deployment directory\n continue\n try:\n date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)\n if date >= threshold_date:\n recent.add(target_dir)\n except ValueError:\n # Always include deployments whose name is not in the format of a timestamp.\n recent.add(target_dir)\n # If it is a symlink then include the target as well.\n if os.path.islink(target_dir):\n recent.add(os.path.realpath(target_dir))\n if os.path.exists(\"/root/zulip\"):\n recent.add(\"/root/zulip\")\n return recent\n\n\ndef get_threshold_timestamp(threshold_days: int) -> int:\n # Given number of days, this function returns timestamp corresponding\n # to the time prior to given number of days.\n threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n threshold_timestamp = int(time.mktime(threshold.utctimetuple()))\n return threshold_timestamp\n\n\ndef get_caches_to_be_purged(\n caches_dir: str, caches_in_use: Set[str], threshold_days: int\n) -> Set[str]:\n # Given a directory containing caches, a list of caches in use\n # and threshold days, this function return a list of caches\n # which can be purged. Remove the cache only if it is:\n # 1: Not in use by the current installation(in dev as well as in prod).\n # 2: Not in use by a deployment not older than `threshold_days`(in prod).\n # 3: Not in use by '/root/zulip'.\n # 4: Not older than `threshold_days`.\n caches_to_purge = set()\n threshold_timestamp = get_threshold_timestamp(threshold_days)\n for cache_dir_base in os.listdir(caches_dir):\n cache_dir = os.path.join(caches_dir, cache_dir_base)\n if cache_dir in caches_in_use:\n # Never purge a cache which is in use.\n continue\n if os.path.getctime(cache_dir) < threshold_timestamp:\n caches_to_purge.add(cache_dir)\n return caches_to_purge\n\n\ndef purge_unused_caches(\n caches_dir: str,\n caches_in_use: Set[str],\n cache_type: str,\n args: argparse.Namespace,\n) -> None:\n all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)}\n caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)\n caches_to_keep = all_caches - caches_to_purge\n\n may_be_perform_purging(\n caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose, args.no_headings\n )\n if args.verbose:\n print(\"Done!\")\n\n\ndef generate_sha1sum_emoji(zulip_path: str) -> str:\n sha = hashlib.sha1()\n\n filenames = [\n \"static/assets/zulip-emoji/zulip.png\",\n \"tools/setup/emoji/emoji_map.json\",\n \"tools/setup/emoji/build_emoji\",\n \"tools/setup/emoji/emoji_setup_utils.py\",\n \"tools/setup/emoji/emoji_names.py\",\n ]\n\n for filename in filenames:\n file_path = os.path.join(zulip_path, filename)\n with open(file_path, \"rb\") as reader:\n sha.update(reader.read())\n\n # Take into account the version of `emoji-datasource-google` package\n # while generating success stamp.\n PACKAGE_FILE_PATH = os.path.join(zulip_path, \"package.json\")\n with open(PACKAGE_FILE_PATH) as fp:\n parsed_package_file = json.load(fp)\n dependency_data = parsed_package_file[\"dependencies\"]\n\n if \"emoji-datasource-google\" in dependency_data:\n with open(os.path.join(zulip_path, \"yarn.lock\")) as fp:\n (emoji_datasource_version,) = re.findall(\n r\"^emoji-datasource-google@\"\n + re.escape(dependency_data[\"emoji-datasource-google\"])\n + r':\\n version \"(.*)\"',\n fp.read(),\n re.M,\n )\n else:\n emoji_datasource_version = \"0\"\n sha.update(emoji_datasource_version.encode())\n\n return sha.hexdigest()\n\n\ndef may_be_perform_purging(\n dirs_to_purge: Set[str],\n dirs_to_keep: Set[str],\n dir_type: str,\n dry_run: bool,\n verbose: bool,\n no_headings: bool,\n) -> None:\n if dry_run:\n print(\"Performing a dry run...\")\n if not no_headings:\n print(f\"Cleaning unused {dir_type}s...\")\n\n for directory in dirs_to_purge:\n if verbose:\n print(f\"Cleaning unused {dir_type}: {directory}\")\n if not dry_run:\n run_as_root([\"rm\", \"-rf\", directory])\n\n for directory in dirs_to_keep:\n if verbose:\n print(f\"Keeping used {dir_type}: {directory}\")\n\n\[email protected]_cache(None)\ndef parse_os_release() -> Dict[str, str]:\n \"\"\"\n Example of the useful subset of the data:\n {\n 'ID': 'ubuntu',\n 'VERSION_ID': '18.04',\n 'NAME': 'Ubuntu',\n 'VERSION': '18.04.3 LTS (Bionic Beaver)',\n 'PRETTY_NAME': 'Ubuntu 18.04.3 LTS',\n }\n\n VERSION_CODENAME (e.g. 'bionic') is nice and readable to Ubuntu\n developers, but we avoid using it, as it is not available on\n RHEL-based platforms.\n \"\"\"\n distro_info = {} # type: Dict[str, str]\n with open(\"/etc/os-release\") as fp:\n for line in fp:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n # The line may be blank or a comment, see:\n # https://www.freedesktop.org/software/systemd/man/os-release.html\n continue\n k, v = line.split(\"=\", 1)\n [distro_info[k]] = shlex.split(v)\n return distro_info\n\n\[email protected]_cache(None)\ndef os_families() -> Set[str]:\n \"\"\"\n Known families:\n debian (includes: debian, ubuntu)\n ubuntu (includes: ubuntu)\n fedora (includes: fedora, rhel, centos)\n rhel (includes: rhel, centos)\n centos (includes: centos)\n \"\"\"\n distro_info = parse_os_release()\n return {distro_info[\"ID\"], *distro_info.get(\"ID_LIKE\", \"\").split()}\n\n\ndef files_and_string_digest(filenames: Sequence[str], extra_strings: Sequence[str]) -> str:\n # see is_digest_obsolete for more context\n sha1sum = hashlib.sha1()\n for fn in filenames:\n with open(fn, \"rb\") as file_to_hash:\n sha1sum.update(file_to_hash.read())\n\n for extra_string in extra_strings:\n sha1sum.update(extra_string.encode())\n\n return sha1sum.hexdigest()\n\n\ndef is_digest_obsolete(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> bool:\n \"\"\"\n In order to determine if we need to run some\n process, we calculate a digest of the important\n files and strings whose respective contents\n or values may indicate such a need.\n\n filenames = files we should hash the contents of\n extra_strings = strings we should hash directly\n\n Grep for callers to see examples of how this is used.\n\n To elaborate on extra_strings, they will typically\n be things like:\n\n - package versions (that we import)\n - settings values (that we stringify with\n json, deterministically)\n \"\"\"\n last_hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n try:\n with open(last_hash_path) as f:\n old_hash = f.read()\n except FileNotFoundError:\n # This is normal for a fresh checkout--a missing\n # digest is an obsolete digest.\n return True\n\n new_hash = files_and_string_digest(filenames, extra_strings)\n\n return new_hash != old_hash\n\n\ndef write_new_digest(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> None:\n hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n new_hash = files_and_string_digest(filenames, extra_strings)\n with open(hash_path, \"w\") as f:\n f.write(new_hash)\n\n # Be a little verbose here--our callers ensure we\n # only write new digests when things have changed, and\n # making this system more transparent to developers\n # can help them troubleshoot provisioning glitches.\n print(\"New digest written to: \" + hash_path)\n\n\ndef is_root() -> bool:\n if \"posix\" in os.name and os.geteuid() == 0:\n return True\n return False\n\n\ndef run_as_root(args: List[str], **kwargs: Any) -> None:\n sudo_args = kwargs.pop(\"sudo_args\", [])\n if not is_root():\n args = [\"sudo\", *sudo_args, \"--\", *args]\n run(args, **kwargs)\n\n\ndef assert_not_running_as_root() -> None:\n script_name = os.path.abspath(sys.argv[0])\n if is_root():\n pwent = get_zulip_pwent()\n msg = (\n \"{shortname} should not be run as root. Use `su {user}` to switch to the 'zulip'\\n\"\n \"user before rerunning this, or use \\n su {user} -c '{name} ...'\\n\"\n \"to switch users and run this as a single command.\"\n ).format(name=script_name, shortname=os.path.basename(script_name), user=pwent.pw_name)\n print(msg)\n sys.exit(1)\n\n\ndef assert_running_as_root(strip_lib_from_paths: bool = False) -> None:\n script_name = os.path.abspath(sys.argv[0])\n # Since these Python scripts are run inside a thin shell wrapper,\n # we need to replace the paths in order to ensure we instruct\n # users to (re)run the right command.\n if strip_lib_from_paths:\n script_name = script_name.replace(\"scripts/lib/upgrade\", \"scripts/upgrade\")\n if not is_root():\n print(f\"{script_name} must be run as root.\")\n sys.exit(1)\n\n\ndef get_config(\n config_file: configparser.RawConfigParser,\n section: str,\n key: str,\n default_value: str = \"\",\n) -> str:\n if config_file.has_option(section, key):\n return config_file.get(section, key)\n return default_value\n\n\ndef get_config_file() -> configparser.RawConfigParser:\n config_file = configparser.RawConfigParser()\n config_file.read(\"/etc/zulip/zulip.conf\")\n return config_file\n\n\ndef get_deploy_options(config_file: configparser.RawConfigParser) -> List[str]:\n return get_config(config_file, \"deployment\", \"deploy_options\", \"\").strip().split()\n\n\ndef run_psql_as_postgres(\n config_file: configparser.RawConfigParser,\n sql_query: str,\n) -> None:\n dbname = get_config(config_file, \"postgresql\", \"database_name\", \"zulip\")\n subcmd = \" \".join(\n map(\n shlex.quote,\n [\n \"psql\",\n \"-v\",\n \"ON_ERROR_STOP=1\",\n \"-d\",\n dbname,\n \"-c\",\n sql_query,\n ],\n )\n )\n subprocess.check_call([\"su\", \"postgres\", \"-c\", subcmd])\n\n\ndef get_tornado_ports(config_file: configparser.RawConfigParser) -> List[int]:\n ports = []\n if config_file.has_section(\"tornado_sharding\"):\n ports = [int(port) for port in config_file.options(\"tornado_sharding\")]\n if not ports:\n ports = [9800]\n return ports\n\n\ndef get_or_create_dev_uuid_var_path(path: str) -> str:\n absolute_path = f\"{get_dev_uuid_var_path()}/{path}\"\n os.makedirs(absolute_path, exist_ok=True)\n return absolute_path\n\n\ndef is_vagrant_env_host(path: str) -> bool:\n return \".vagrant\" in os.listdir(path)\n\n\ndef has_application_server(once: bool = False) -> bool:\n if once:\n return os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip-once.conf\")\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip.conf\")\n )\n\n\ndef list_supervisor_processes(*args: str) -> List[str]:\n worker_status = subprocess.run(\n [\"supervisorctl\", \"status\", *args],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n )\n # `supercisorctl status` returns 3 if any are stopped, which is\n # fine here; and exit code 4 is for no such process, which is\n # handled below.\n if worker_status.returncode not in (0, 3, 4):\n worker_status.check_returncode()\n\n processes = []\n for status_line in worker_status.stdout.splitlines():\n if not re.search(r\"ERROR \\(no such (process|group)\\)\", status_line):\n processes.append(status_line.split()[0])\n return processes\n\n\ndef has_process_fts_updates() -> bool:\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip_db.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip_db.conf\")\n )\n\n\ndef deport(netloc: str) -> str:\n \"\"\"Remove the port from a hostname:port string. Brackets on a literal\n IPv6 address are included.\"\"\"\n r = SplitResult(\"\", netloc, \"\", \"\", \"\")\n assert r.hostname is not None\n return \"[\" + r.hostname + \"]\" if \":\" in r.hostname else r.hostname\n\n\ndef start_arg_parser(action: str, add_help: bool = False) -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(add_help=add_help)\n parser.add_argument(\"--fill-cache\", action=\"store_true\", help=\"Fill the memcached caches\")\n if action == \"restart\":\n parser.add_argument(\n \"--less-graceful\",\n action=\"store_true\",\n help=\"Restart with more concern for expediency than minimizing availability interruption\",\n )\n parser.add_argument(\n \"--skip-tornado\",\n action=\"store_true\",\n help=\"Do not restart Tornado processes\",\n )\n return parser\n\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"make_deploy_path\":\n print(make_deploy_path())\n elif cmd == \"get_dev_uuid\":\n print(get_dev_uuid_var_path())\n", "path": "scripts/lib/zulip_tools.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nimport configparser\nimport datetime\nimport functools\nimport hashlib\nimport json\nimport logging\nimport os\nimport pwd\nimport random\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport uuid\nfrom typing import Any, Dict, List, Sequence, Set\nfrom urllib.parse import SplitResult\n\nDEPLOYMENTS_DIR = \"/home/zulip/deployments\"\nLOCK_DIR = os.path.join(DEPLOYMENTS_DIR, \"lock\")\nTIMESTAMP_FORMAT = \"%Y-%m-%d-%H-%M-%S\"\n\n# Color codes\nOKBLUE = \"\\033[94m\"\nOKGREEN = \"\\033[92m\"\nWARNING = \"\\033[93m\"\nFAIL = \"\\033[91m\"\nENDC = \"\\033[0m\"\nBLACKONYELLOW = \"\\x1b[0;30;43m\"\nWHITEONRED = \"\\x1b[0;37;41m\"\nBOLDRED = \"\\x1B[1;31m\"\n\nGREEN = \"\\x1b[32m\"\nYELLOW = \"\\x1b[33m\"\nBLUE = \"\\x1b[34m\"\nMAGENTA = \"\\x1b[35m\"\nCYAN = \"\\x1b[36m\"\n\n\ndef overwrite_symlink(src: str, dst: str) -> None:\n dir, base = os.path.split(dst)\n while True:\n # Note: creating a temporary filename like this is not generally\n # secure. It\u2019s fine in this case because os.symlink refuses to\n # overwrite an existing target; we handle the error and try again.\n tmp = os.path.join(dir, f\".{base}.{random.randrange(1 << 40):010x}\")\n try:\n os.symlink(src, tmp)\n except FileExistsError:\n continue\n break\n try:\n os.rename(tmp, dst)\n except BaseException:\n os.remove(tmp)\n raise\n\n\ndef parse_cache_script_args(description: str) -> argparse.Namespace:\n # Keep this in sync with clean_unused_caches in provision_inner.py\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"--threshold\",\n dest=\"threshold_days\",\n type=int,\n default=14,\n metavar=\"<days>\",\n help=\"Any cache which is not in \"\n \"use by a deployment not older than threshold days(current \"\n \"installation in dev) and older than threshold days will be \"\n \"deleted. (defaults to 14)\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"If specified then script will only print the caches \"\n \"that it will delete/keep back. It will not delete any cache.\",\n )\n parser.add_argument(\n \"--verbose\",\n action=\"store_true\",\n help=\"If specified then script will print a detailed report \"\n \"of what is being will deleted/kept back.\",\n )\n parser.add_argument(\n \"--no-print-headings\",\n dest=\"no_headings\",\n action=\"store_true\",\n help=\"If specified then script will not print headings for \"\n \"what will be deleted/kept back.\",\n )\n\n args = parser.parse_args()\n args.verbose |= args.dry_run # Always print a detailed report in case of dry run.\n return args\n\n\ndef get_deploy_root() -> str:\n return os.path.realpath(\n os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\")),\n )\n\n\ndef get_deployment_version(extract_path: str) -> str:\n version = \"0.0.0\"\n for item in os.listdir(extract_path):\n item_path = os.path.join(extract_path, item)\n if item.startswith(\"zulip-server\") and os.path.isdir(item_path):\n with open(os.path.join(item_path, \"version.py\")) as f:\n result = re.search('ZULIP_VERSION = \"(.*)\"', f.read())\n if result:\n version = result.groups()[0]\n break\n return version\n\n\ndef is_invalid_upgrade(current_version: str, new_version: str) -> bool:\n if new_version > \"1.4.3\" and current_version <= \"1.3.10\":\n return True\n return False\n\n\ndef get_zulip_pwent() -> pwd.struct_passwd:\n deploy_root_uid = os.stat(get_deploy_root()).st_uid\n if deploy_root_uid != 0:\n return pwd.getpwuid(deploy_root_uid)\n\n # In the case that permissions got messed up and the deployment\n # directory is unexpectedly owned by root, we fallback to the\n # `zulip` user as that's the correct value in production.\n return pwd.getpwnam(\"zulip\")\n\n\ndef get_postgres_pwent() -> pwd.struct_passwd:\n try:\n return pwd.getpwnam(\"postgres\")\n except KeyError:\n return get_zulip_pwent()\n\n\ndef su_to_zulip(save_suid: bool = False) -> None:\n \"\"\"Warning: su_to_zulip assumes that the zulip checkout is owned by\n the zulip user (or whatever normal user is running the Zulip\n installation). It should never be run from the installer or other\n production contexts before /home/zulip/deployments/current is\n created.\"\"\"\n pwent = get_zulip_pwent()\n os.setgid(pwent.pw_gid)\n if save_suid:\n os.setresuid(pwent.pw_uid, pwent.pw_uid, os.getuid())\n else:\n os.setuid(pwent.pw_uid)\n os.environ[\"HOME\"] = pwent.pw_dir\n\n\ndef make_deploy_path() -> str:\n timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)\n return os.path.join(DEPLOYMENTS_DIR, timestamp)\n\n\nTEMPLATE_DATABASE_DIR = \"test-backend/databases\"\n\n\ndef get_dev_uuid_var_path(create_if_missing: bool = False) -> str:\n zulip_path = get_deploy_root()\n uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), \".zulip-dev-uuid\")\n if os.path.exists(uuid_path):\n with open(uuid_path) as f:\n zulip_uuid = f.read().strip()\n else:\n if create_if_missing:\n zulip_uuid = str(uuid.uuid4())\n # We need root access here, since the path will be under /srv/ in the\n # development environment.\n run_as_root([\"sh\", \"-c\", 'echo \"$1\" > \"$2\"', \"-\", zulip_uuid, uuid_path])\n else:\n raise AssertionError(\"Missing UUID file; please run tools/provision!\")\n\n result_path = os.path.join(zulip_path, \"var\", zulip_uuid)\n os.makedirs(result_path, exist_ok=True)\n return result_path\n\n\ndef get_deployment_lock(error_rerun_script: str) -> None:\n start_time = time.time()\n got_lock = False\n while time.time() - start_time < 300:\n try:\n os.mkdir(LOCK_DIR)\n got_lock = True\n break\n except OSError:\n print(\n WARNING\n + \"Another deployment in progress; waiting for lock... \"\n + f\"(If no deployment is running, rmdir {LOCK_DIR})\"\n + ENDC,\n flush=True,\n )\n time.sleep(3)\n\n if not got_lock:\n print(\n FAIL\n + \"Deployment already in progress. Please run\\n\"\n + f\" {error_rerun_script}\\n\"\n + \"manually when the previous deployment finishes, or run\\n\"\n + f\" rmdir {LOCK_DIR}\\n\"\n + \"if the previous deployment crashed.\"\n + ENDC\n )\n sys.exit(1)\n\n\ndef release_deployment_lock() -> None:\n shutil.rmtree(LOCK_DIR)\n\n\ndef run(args: Sequence[str], **kwargs: Any) -> None:\n # Output what we're doing in the `set -x` style\n print(\"+ {}\".format(\" \".join(map(shlex.quote, args))), flush=True)\n\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n print()\n print(\n WHITEONRED\n + \"Error running a subcommand of {}: {}\".format(\n sys.argv[0],\n \" \".join(map(shlex.quote, args)),\n )\n + ENDC\n )\n print(WHITEONRED + \"Actual error output for the subcommand is just above this.\" + ENDC)\n print()\n sys.exit(1)\n\n\ndef log_management_command(cmd: Sequence[str], log_path: str) -> None:\n log_dir = os.path.dirname(log_path)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n formatter = logging.Formatter(\"%(asctime)s: %(message)s\")\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n logger = logging.getLogger(\"zulip.management\")\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n\n logger.info(\"Ran %s\", \" \".join(map(shlex.quote, cmd)))\n\n\ndef get_environment() -> str:\n if os.path.exists(DEPLOYMENTS_DIR):\n return \"prod\"\n return \"dev\"\n\n\ndef get_recent_deployments(threshold_days: int) -> Set[str]:\n # Returns a list of deployments not older than threshold days\n # including `/root/zulip` directory if it exists.\n recent = set()\n threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n for dir_name in os.listdir(DEPLOYMENTS_DIR):\n target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name)\n if not os.path.isdir(target_dir):\n # Skip things like uwsgi sockets, symlinks, etc.\n continue\n if not os.path.exists(os.path.join(target_dir, \"zerver\")):\n # Skip things like \"lock\" that aren't actually a deployment directory\n continue\n try:\n date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)\n if date >= threshold_date:\n recent.add(target_dir)\n except ValueError:\n # Always include deployments whose name is not in the format of a timestamp.\n recent.add(target_dir)\n # If it is a symlink then include the target as well.\n if os.path.islink(target_dir):\n recent.add(os.path.realpath(target_dir))\n if os.path.exists(\"/root/zulip\"):\n recent.add(\"/root/zulip\")\n return recent\n\n\ndef get_threshold_timestamp(threshold_days: int) -> int:\n # Given number of days, this function returns timestamp corresponding\n # to the time prior to given number of days.\n threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n threshold_timestamp = int(time.mktime(threshold.utctimetuple()))\n return threshold_timestamp\n\n\ndef get_caches_to_be_purged(\n caches_dir: str, caches_in_use: Set[str], threshold_days: int\n) -> Set[str]:\n # Given a directory containing caches, a list of caches in use\n # and threshold days, this function return a list of caches\n # which can be purged. Remove the cache only if it is:\n # 1: Not in use by the current installation(in dev as well as in prod).\n # 2: Not in use by a deployment not older than `threshold_days`(in prod).\n # 3: Not in use by '/root/zulip'.\n # 4: Not older than `threshold_days`.\n caches_to_purge = set()\n threshold_timestamp = get_threshold_timestamp(threshold_days)\n for cache_dir_base in os.listdir(caches_dir):\n cache_dir = os.path.join(caches_dir, cache_dir_base)\n if cache_dir in caches_in_use:\n # Never purge a cache which is in use.\n continue\n if os.path.getctime(cache_dir) < threshold_timestamp:\n caches_to_purge.add(cache_dir)\n return caches_to_purge\n\n\ndef purge_unused_caches(\n caches_dir: str,\n caches_in_use: Set[str],\n cache_type: str,\n args: argparse.Namespace,\n) -> None:\n all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)}\n caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)\n caches_to_keep = all_caches - caches_to_purge\n\n may_be_perform_purging(\n caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose, args.no_headings\n )\n if args.verbose:\n print(\"Done!\")\n\n\ndef generate_sha1sum_emoji(zulip_path: str) -> str:\n sha = hashlib.sha1()\n\n filenames = [\n \"static/assets/zulip-emoji/zulip.png\",\n \"tools/setup/emoji/emoji_map.json\",\n \"tools/setup/emoji/build_emoji\",\n \"tools/setup/emoji/emoji_setup_utils.py\",\n \"tools/setup/emoji/emoji_names.py\",\n ]\n\n for filename in filenames:\n file_path = os.path.join(zulip_path, filename)\n with open(file_path, \"rb\") as reader:\n sha.update(reader.read())\n\n # Take into account the version of `emoji-datasource-google` package\n # while generating success stamp.\n PACKAGE_FILE_PATH = os.path.join(zulip_path, \"package.json\")\n with open(PACKAGE_FILE_PATH) as fp:\n parsed_package_file = json.load(fp)\n dependency_data = parsed_package_file[\"dependencies\"]\n\n if \"emoji-datasource-google\" in dependency_data:\n with open(os.path.join(zulip_path, \"yarn.lock\")) as fp:\n (emoji_datasource_version,) = re.findall(\n r\"^emoji-datasource-google@\"\n + re.escape(dependency_data[\"emoji-datasource-google\"])\n + r':\\n version \"(.*)\"',\n fp.read(),\n re.M,\n )\n else:\n emoji_datasource_version = \"0\"\n sha.update(emoji_datasource_version.encode())\n\n return sha.hexdigest()\n\n\ndef may_be_perform_purging(\n dirs_to_purge: Set[str],\n dirs_to_keep: Set[str],\n dir_type: str,\n dry_run: bool,\n verbose: bool,\n no_headings: bool,\n) -> None:\n if dry_run:\n print(\"Performing a dry run...\")\n if not no_headings:\n print(f\"Cleaning unused {dir_type}s...\")\n\n for directory in dirs_to_purge:\n if verbose:\n print(f\"Cleaning unused {dir_type}: {directory}\")\n if not dry_run:\n run_as_root([\"rm\", \"-rf\", directory])\n\n for directory in dirs_to_keep:\n if verbose:\n print(f\"Keeping used {dir_type}: {directory}\")\n\n\[email protected]_cache(None)\ndef parse_os_release() -> Dict[str, str]:\n \"\"\"\n Example of the useful subset of the data:\n {\n 'ID': 'ubuntu',\n 'VERSION_ID': '18.04',\n 'NAME': 'Ubuntu',\n 'VERSION': '18.04.3 LTS (Bionic Beaver)',\n 'PRETTY_NAME': 'Ubuntu 18.04.3 LTS',\n }\n\n VERSION_CODENAME (e.g. 'bionic') is nice and readable to Ubuntu\n developers, but we avoid using it, as it is not available on\n RHEL-based platforms.\n \"\"\"\n distro_info = {} # type: Dict[str, str]\n with open(\"/etc/os-release\") as fp:\n for line in fp:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n # The line may be blank or a comment, see:\n # https://www.freedesktop.org/software/systemd/man/os-release.html\n continue\n k, v = line.split(\"=\", 1)\n [distro_info[k]] = shlex.split(v)\n return distro_info\n\n\[email protected]_cache(None)\ndef os_families() -> Set[str]:\n \"\"\"\n Known families:\n debian (includes: debian, ubuntu)\n ubuntu (includes: ubuntu)\n fedora (includes: fedora, rhel, centos)\n rhel (includes: rhel, centos)\n centos (includes: centos)\n \"\"\"\n distro_info = parse_os_release()\n return {distro_info[\"ID\"], *distro_info.get(\"ID_LIKE\", \"\").split()}\n\n\ndef files_and_string_digest(filenames: Sequence[str], extra_strings: Sequence[str]) -> str:\n # see is_digest_obsolete for more context\n sha1sum = hashlib.sha1()\n for fn in filenames:\n with open(fn, \"rb\") as file_to_hash:\n sha1sum.update(file_to_hash.read())\n\n for extra_string in extra_strings:\n sha1sum.update(extra_string.encode())\n\n return sha1sum.hexdigest()\n\n\ndef is_digest_obsolete(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> bool:\n \"\"\"\n In order to determine if we need to run some\n process, we calculate a digest of the important\n files and strings whose respective contents\n or values may indicate such a need.\n\n filenames = files we should hash the contents of\n extra_strings = strings we should hash directly\n\n Grep for callers to see examples of how this is used.\n\n To elaborate on extra_strings, they will typically\n be things like:\n\n - package versions (that we import)\n - settings values (that we stringify with\n json, deterministically)\n \"\"\"\n last_hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n try:\n with open(last_hash_path) as f:\n old_hash = f.read()\n except FileNotFoundError:\n # This is normal for a fresh checkout--a missing\n # digest is an obsolete digest.\n return True\n\n new_hash = files_and_string_digest(filenames, extra_strings)\n\n return new_hash != old_hash\n\n\ndef write_new_digest(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> None:\n hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n new_hash = files_and_string_digest(filenames, extra_strings)\n with open(hash_path, \"w\") as f:\n f.write(new_hash)\n\n # Be a little verbose here--our callers ensure we\n # only write new digests when things have changed, and\n # making this system more transparent to developers\n # can help them troubleshoot provisioning glitches.\n print(\"New digest written to: \" + hash_path)\n\n\ndef is_root() -> bool:\n if \"posix\" in os.name and os.geteuid() == 0:\n return True\n return False\n\n\ndef run_as_root(args: List[str], **kwargs: Any) -> None:\n sudo_args = kwargs.pop(\"sudo_args\", [])\n if not is_root():\n args = [\"sudo\", *sudo_args, \"--\", *args]\n run(args, **kwargs)\n\n\ndef assert_not_running_as_root() -> None:\n script_name = os.path.abspath(sys.argv[0])\n if is_root():\n pwent = get_zulip_pwent()\n msg = (\n \"{shortname} should not be run as root. Use `su {user}` to switch to the 'zulip'\\n\"\n \"user before rerunning this, or use \\n su {user} -c '{name} ...'\\n\"\n \"to switch users and run this as a single command.\"\n ).format(name=script_name, shortname=os.path.basename(script_name), user=pwent.pw_name)\n print(msg)\n sys.exit(1)\n\n\ndef assert_running_as_root(strip_lib_from_paths: bool = False) -> None:\n script_name = os.path.abspath(sys.argv[0])\n # Since these Python scripts are run inside a thin shell wrapper,\n # we need to replace the paths in order to ensure we instruct\n # users to (re)run the right command.\n if strip_lib_from_paths:\n script_name = script_name.replace(\"scripts/lib/upgrade\", \"scripts/upgrade\")\n if not is_root():\n print(f\"{script_name} must be run as root.\")\n sys.exit(1)\n\n\ndef get_config(\n config_file: configparser.RawConfigParser,\n section: str,\n key: str,\n default_value: str = \"\",\n) -> str:\n if config_file.has_option(section, key):\n return config_file.get(section, key)\n return default_value\n\n\ndef get_config_file() -> configparser.RawConfigParser:\n config_file = configparser.RawConfigParser()\n config_file.read(\"/etc/zulip/zulip.conf\")\n return config_file\n\n\ndef get_deploy_options(config_file: configparser.RawConfigParser) -> List[str]:\n return get_config(config_file, \"deployment\", \"deploy_options\", \"\").strip().split()\n\n\ndef run_psql_as_postgres(\n config_file: configparser.RawConfigParser,\n sql_query: str,\n) -> None:\n dbname = get_config(config_file, \"postgresql\", \"database_name\", \"zulip\")\n subcmd = \" \".join(\n map(\n shlex.quote,\n [\n \"psql\",\n \"-v\",\n \"ON_ERROR_STOP=1\",\n \"-d\",\n dbname,\n \"-c\",\n sql_query,\n ],\n )\n )\n subprocess.check_call([\"su\", \"postgres\", \"-c\", subcmd])\n\n\ndef get_tornado_ports(config_file: configparser.RawConfigParser) -> List[int]:\n ports = []\n if config_file.has_section(\"tornado_sharding\"):\n ports = [int(port) for port in config_file.options(\"tornado_sharding\")]\n if not ports:\n ports = [9800]\n return ports\n\n\ndef get_or_create_dev_uuid_var_path(path: str) -> str:\n absolute_path = f\"{get_dev_uuid_var_path()}/{path}\"\n os.makedirs(absolute_path, exist_ok=True)\n return absolute_path\n\n\ndef is_vagrant_env_host(path: str) -> bool:\n return \".vagrant\" in os.listdir(path)\n\n\ndef has_application_server(once: bool = False) -> bool:\n if once:\n return os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip-once.conf\")\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip.conf\")\n )\n\n\ndef list_supervisor_processes(*args: str) -> List[str]:\n worker_status = subprocess.run(\n [\"supervisorctl\", \"status\", *args],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n )\n # `supervisorctl status` returns 3 if any are stopped, which is\n # fine here; and exit code 4 is for no such process, which is\n # handled below.\n if worker_status.returncode not in (0, 3, 4):\n worker_status.check_returncode()\n\n processes = []\n for status_line in worker_status.stdout.splitlines():\n if not re.search(r\"ERROR \\(no such (process|group)\\)\", status_line):\n processes.append(status_line.split()[0])\n return processes\n\n\ndef has_process_fts_updates() -> bool:\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip_db.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip_db.conf\")\n )\n\n\ndef deport(netloc: str) -> str:\n \"\"\"Remove the port from a hostname:port string. Brackets on a literal\n IPv6 address are included.\"\"\"\n r = SplitResult(\"\", netloc, \"\", \"\", \"\")\n assert r.hostname is not None\n return \"[\" + r.hostname + \"]\" if \":\" in r.hostname else r.hostname\n\n\ndef start_arg_parser(action: str, add_help: bool = False) -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(add_help=add_help)\n parser.add_argument(\"--fill-cache\", action=\"store_true\", help=\"Fill the memcached caches\")\n if action == \"restart\":\n parser.add_argument(\n \"--less-graceful\",\n action=\"store_true\",\n help=\"Restart with more concern for expediency than minimizing availability interruption\",\n )\n parser.add_argument(\n \"--skip-tornado\",\n action=\"store_true\",\n help=\"Do not restart Tornado processes\",\n )\n return parser\n\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"make_deploy_path\":\n print(make_deploy_path())\n elif cmd == \"get_dev_uuid\":\n print(get_dev_uuid_var_path())\n", "path": "scripts/lib/zulip_tools.py"}]} |
gh_patches_debug_1456 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ebola page: loading second page of datasets reloads to top of page
Would it be easy to have it load the page at the `Datasets [41]` line?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py`
Content:
```
1 '''
2 Created on Nov 3, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 import logging
8 import datetime as dt
9 import decimal
10
11 import pylons.config as config
12
13 import ckan.lib.base as base
14 import ckan.logic as logic
15 import ckan.model as model
16 import ckan.common as common
17 import ckan.lib.helpers as h
18
19 render = base.render
20 get_action = logic.get_action
21 c = common.c
22 request = common.request
23 _ = common._
24
25 Decimal = decimal.Decimal
26
27 log = logging.getLogger(__name__)
28
29
30 class CrisisController(base.BaseController):
31
32 def show(self):
33
34 context = {'model': model, 'session': model.Session,
35 'user': c.user or c.author, 'for_view': True,
36 'auth_user_obj': c.userobj}
37
38 datastore_resource_id = self._get_datastore_resource_id(
39 context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))
40 if datastore_resource_id:
41 c.top_line_items = self._get_top_line_items(
42 context, datastore_resource_id)
43
44 limit = 25
45 c.q = u'ebola'
46
47 page = int(request.params.get('page', 1))
48 data_dict = {'sort': u'metadata_modified desc',
49 'fq': '+dataset_type:dataset',
50 'rows': limit,
51 'q': c.q,
52 'start': (page - 1) * limit
53 }
54 query = get_action("package_search")(context, data_dict)
55
56 def pager_url(q=None, page=None):
57 return h.url_for('show_crisis', page=page)
58
59 c.page = h.Page(
60 collection=query['results'],
61 page=page,
62 url=pager_url,
63 item_count=query['count'],
64 items_per_page=limit
65 )
66 c.items = query['results']
67 c.item_count = query['count']
68
69 c.other_links = {}
70 c.other_links['show_more'] = h.url_for(
71 "search", **{'q': u'ebola', 'sort': u'metadata_modified desc',
72 'ext_indicator': '0'})
73
74 return render('crisis/crisis.html')
75
76 def _get_decimal_value(self, value):
77 decimal_value = Decimal(str(value)).quantize(
78 Decimal('.1'), rounding=decimal.ROUND_HALF_UP)
79 return decimal_value
80
81 def _format_results(self, result):
82 for r in result['records']:
83 d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')
84 r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')
85
86 modified_value = r[u'value']
87 if r[u'units'] == 'ratio':
88 modified_value *= 100.0
89 elif r[u'units'] == 'million':
90 modified_value /= 1000000.0
91
92 int_value = int(modified_value)
93 if int_value == modified_value:
94 r[u'formatted_value'] = '{:,}'.format(int_value)
95 else:
96 if r[u'units'] == 'ratio':
97 r[u'formatted_value'] = '{:,.1f}'.format(
98 self._get_decimal_value(modified_value))
99 elif r[u'units'] == 'million':
100 r[u'formatted_value'] = '{:,.1f}'.format(
101 self._get_decimal_value(modified_value))
102 #r[u'formatted_value'] += ' ' + _('million')
103
104 def _get_top_line_items(self, context, datastore_resource_id):
105 modified_context = dict(context)
106 modified_context['ignore_auth'] = True
107 result = get_action('datastore_search')(
108 modified_context, {'resource_id': datastore_resource_id})
109 if 'records' in result:
110 self._format_results(result)
111 return result['records']
112 return []
113
114 def _get_datastore_resource_id(self, context, dataset_id, resource_name):
115 try:
116 modified_context = dict(context)
117 modified_context['ignore_auth'] = True
118 dataset = get_action('package_show')(
119 modified_context, {'id': dataset_id})
120
121 if 'resources' in dataset:
122 for r in dataset['resources']:
123 if 'datastore_active' in r and r['datastore_active'] \
124 and r['name'] == resource_name:
125 return r['id']
126 return None
127 except:
128 log.warning('No dataset with id ' + dataset_id)
129 return None
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
@@ -54,7 +54,8 @@
query = get_action("package_search")(context, data_dict)
def pager_url(q=None, page=None):
- return h.url_for('show_crisis', page=page)
+ url = h.url_for('show_crisis', page=page) + '#datasets-section'
+ return url
c.page = h.Page(
collection=query['results'],
| {"golden_diff": "diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n@@ -54,7 +54,8 @@\n query = get_action(\"package_search\")(context, data_dict)\n \n def pager_url(q=None, page=None):\n- return h.url_for('show_crisis', page=page)\n+ url = h.url_for('show_crisis', page=page) + '#datasets-section'\n+ return url\n \n c.page = h.Page(\n collection=query['results'],\n", "issue": "Ebola page: loading second page of datasets reloads to top of page\nWould it be easy to have it load the page at the `Datasets [41]` line?\n\n", "before_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}], "after_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n url = h.url_for('show_crisis', page=page) + '#datasets-section'\n return url\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}]} |
gh_patches_debug_1457 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3529 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow to migrate to Volto after updating a site to Plone 6
When Plone is updated to 6 the upgrade-view (@@plone-upgrade) should display a link to the migration to Volto.
See https://github.com/plone/plone.volto/issues/55 for the migration itself.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/admin.py`
Content:
```
1 from AccessControl import getSecurityManager
2 from AccessControl.Permissions import view as View
3 from OFS.interfaces import IApplication
4 from Products.CMFCore.permissions import ManagePortal
5 from Products.CMFPlone.factory import _DEFAULT_PROFILE
6 from Products.CMFPlone.factory import addPloneSite
7 from plone.base.interfaces import INonInstallable
8 from plone.base.interfaces import IPloneSiteRoot
9 from Products.CMFPlone.utils import get_installer
10 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
11 from Products.GenericSetup import BASE, EXTENSION
12 from Products.GenericSetup import profile_registry
13 from Products.GenericSetup.upgrade import normalize_version
14 from ZPublisher.BaseRequest import DefaultPublishTraverse
15 from collections import OrderedDict
16 from plone.i18n.locales.interfaces import IContentLanguageAvailability
17 from plone.keyring.interfaces import IKeyManager
18 from plone.protect.authenticator import check as checkCSRF
19 from plone.protect.interfaces import IDisableCSRFProtection
20 from urllib import parse
21 from ZODB.broken import Broken
22 from zope.component import adapts
23 from zope.component import getAllUtilitiesRegisteredFor
24 from zope.component import getUtility
25 from zope.component import queryMultiAdapter
26 from zope.component import queryUtility
27 from zope.i18n.interfaces import IUserPreferredLanguages
28 from zope.i18n.locales import locales, LoadLocaleError
29 from zope.interface import Interface
30 from zope.interface import alsoProvides
31 from zope.publisher.browser import BrowserView
32 from zope.publisher.interfaces import IRequest
33 from zope.schema.interfaces import IVocabularyFactory
34
35 import logging
36 import pkg_resources
37
38
39 try:
40 pkg_resources.get_distribution("plone.volto")
41 HAS_VOLTO = True
42 except pkg_resources.DistributionNotFound:
43 HAS_VOLTO = False
44 LOGGER = logging.getLogger('Products.CMFPlone')
45
46
47 class AppTraverser(DefaultPublishTraverse):
48 adapts(IApplication, IRequest)
49
50 def publishTraverse(self, request, name):
51 if name == 'index_html':
52 view = queryMultiAdapter(
53 (self.context, request), Interface, 'plone-overview')
54 if view is not None:
55 return view
56 return DefaultPublishTraverse.publishTraverse(self, request, name)
57
58
59 class Overview(BrowserView):
60 has_volto = HAS_VOLTO
61
62 def sites(self, root=None):
63 if root is None:
64 root = self.context
65
66 result = []
67 secman = getSecurityManager()
68 candidates = (
69 obj for obj in root.values() if not isinstance(obj, Broken)
70 )
71 for obj in candidates:
72 if obj.meta_type == 'Folder':
73 result = result + self.sites(obj)
74 elif IPloneSiteRoot.providedBy(obj):
75 if secman.checkPermission(View, obj):
76 result.append(obj)
77 elif obj.getId() in getattr(root, '_mount_points', {}):
78 result.extend(self.sites(root=obj))
79 return result
80
81 def outdated(self, obj):
82 # Try to pick the portal_migration as an attribute
83 # (Plone 5 unmigrated site root) or as an item
84 mig = (
85 getattr(obj, "portal_migration", None)
86 or obj.get('portal_migration', None)
87 )
88 if mig is not None:
89 return mig.needUpgrading()
90 return False
91
92 def can_manage(self):
93 secman = getSecurityManager()
94 return secman.checkPermission(ManagePortal, self.context)
95
96 def upgrade_url(self, site, can_manage=None):
97 if can_manage is None:
98 can_manage = self.can_manage()
99 if can_manage:
100 return site.absolute_url() + '/@@plone-upgrade'
101 else:
102 return self.context.absolute_url() + '/@@plone-root-login'
103
104
105 class RootLoginRedirect(BrowserView):
106 """ @@plone-root-login
107
108 This view of the Zope root forces authentication via the root
109 acl_users and then redirects elsewhere.
110 """
111
112 def __call__(self, came_from=None):
113 if came_from is not None:
114 # see if this is a relative url or an absolute
115 if len(parse.urlparse(came_from)[1]) == 0:
116 # No host specified, so url is relative. Get an absolute url.
117 # Note: '\\domain.org' is not recognised as host,
118 # which is good.
119 came_from = parse.urljoin(
120 self.context.absolute_url() + '/', came_from,
121 )
122 elif not came_from.startswith(self.context.absolute_url()):
123 # Note: we cannot use portal_url.isURLInPortal here, because we
124 # are not in a Plone portal, but in the Zope root.
125 came_from = None
126 if came_from is None:
127 came_from = self.context.absolute_url()
128 self.request.response.redirect(came_from)
129
130
131 class RootLogout(BrowserView):
132 """ @@plone-root-logout """
133
134 logout = ViewPageTemplateFile('templates/plone-admin-logged-out.pt')
135
136 def __call__(self):
137 response = self.request.response
138 realm = response.realm
139 response.setStatus(401)
140 response.setHeader('WWW-Authenticate', 'basic realm="%s"' % realm, 1)
141 response.setBody(self.logout())
142 return
143
144
145 class FrontPage(BrowserView):
146
147 index = ViewPageTemplateFile('templates/plone-frontpage.pt')
148
149
150 class AddPloneSite(BrowserView):
151
152 # Profiles that are installed by default,
153 # but can be removed later.
154 default_extension_profiles = (
155 'plone.app.caching:default',
156 'plonetheme.barceloneta:default',
157 )
158 # Let's have a separate list for Volto.
159 volto_default_extension_profiles = (
160 'plone.app.caching:default',
161 # We could choose to not install Barceloneta:
162 'plonetheme.barceloneta:default',
163 'plone.volto:default',
164 'plone.volto:default-homepage'
165 )
166
167 def profiles(self):
168 base_profiles = []
169 extension_profiles = []
170 if HAS_VOLTO and not self.request.get('classic'):
171 selected_extension_profiles = self.volto_default_extension_profiles
172 else:
173 selected_extension_profiles = self.default_extension_profiles
174
175 # profiles available for install/uninstall, but hidden at the time
176 # the Plone site is created
177 not_installable = [
178 'Products.CMFPlacefulWorkflow:CMFPlacefulWorkflow',
179 ]
180 utils = getAllUtilitiesRegisteredFor(INonInstallable)
181 for util in utils:
182 not_installable.extend(util.getNonInstallableProfiles())
183
184 for info in profile_registry.listProfileInfo():
185 if info.get('type') == EXTENSION and \
186 info.get('for') in (IPloneSiteRoot, None):
187 profile_id = info.get('id')
188 if profile_id not in not_installable:
189 if profile_id in selected_extension_profiles:
190 info['selected'] = 'selected'
191 extension_profiles.append(info)
192
193 def _key(v):
194 # Make sure implicitly selected items come first
195 selected = v.get('selected') and 'automatic' or 'manual'
196 return '{}-{}'.format(selected, v.get('title', ''))
197 extension_profiles.sort(key=_key)
198
199 for info in profile_registry.listProfileInfo():
200 if info.get('type') == BASE and \
201 info.get('for') in (IPloneSiteRoot, None):
202 base_profiles.append(info)
203
204 return dict(
205 base=tuple(base_profiles),
206 default=_DEFAULT_PROFILE,
207 extensions=tuple(extension_profiles),
208 )
209
210 def browser_language(self):
211 language = 'en'
212 pl = IUserPreferredLanguages(self.request)
213 if pl is not None:
214 languages = pl.getPreferredLanguages()
215 for httplang in languages:
216 parts = (httplang.split('-') + [None, None])[:3]
217 if parts[0] == parts[1]:
218 # Avoid creating a country code for simple languages codes
219 parts = [parts[0], None, None]
220 try:
221 locale = locales.getLocale(*parts)
222 language = locale.getLocaleID().replace('_', '-').lower()
223 break
224 except LoadLocaleError:
225 # Just try the next combination
226 pass
227 return language
228
229 def grouped_languages(self, default='en'):
230 util = queryUtility(IContentLanguageAvailability)
231 available = util.getLanguages(combined=True)
232 languages = dict(util.getLanguageListing())
233
234 # Group country specific versions by language
235 grouped = OrderedDict()
236 for langcode, data in available.items():
237 lang = langcode.split('-')[0]
238 language = languages.get(lang, lang) # Label
239
240 struct = grouped.get(lang, {'label': language, 'languages': []})
241
242 langs = struct['languages']
243 langs.append({
244 'langcode': langcode,
245 'label': data.get('native', data.get('name')),
246 })
247
248 grouped[lang] = struct
249
250 # Sort list by language, next by country
251 data = sorted(grouped.values(), key=lambda k: k['label'])
252 for item in data:
253 item['languages'] = sorted(
254 item['languages'], key=lambda k: k['label'].lower())
255 return data
256
257 def timezones(self):
258 tz_vocab = getUtility(
259 IVocabularyFactory,
260 'plone.app.vocabularies.CommonTimezones'
261 )(self.context)
262
263 grouped = OrderedDict()
264 tz_values = [it.value for it in tz_vocab]
265 for value in tz_values:
266 splitted = value.split('/')
267 group = splitted.pop(0)
268 label = '/'.join(splitted)
269
270 entries = grouped.get(group, [])
271 entries.append({'label': label or group, 'value': value})
272 grouped[group] = entries
273
274 return grouped
275
276 def __call__(self):
277 context = self.context
278 form = self.request.form
279 submitted = form.get('form.submitted', False)
280 if submitted:
281 site_id = form.get('site_id', 'Plone')
282
283 # CSRF protect. DO NOT use auto CSRF protection for adding a site
284 alsoProvides(self.request, IDisableCSRFProtection)
285
286 # check if keyring is installed on root, disable CSRF protection
287 # if it is because it is not installed until a plone site
288 # is created
289 if queryUtility(IKeyManager) is None:
290 LOGGER.info('CSRF protection disabled on initial site '
291 'creation')
292 else:
293 # we have a keymanager, check csrf protection manually now
294 checkCSRF(self.request)
295 site = addPloneSite(
296 context, site_id,
297 title=form.get('title', ''),
298 profile_id=form.get('profile_id', _DEFAULT_PROFILE),
299 extension_ids=form.get('extension_ids', ()),
300 setup_content=form.get('setup_content', False),
301 default_language=form.get('default_language', 'en'),
302 portal_timezone=form.get('portal_timezone', 'UTC')
303 )
304 self.request.response.redirect(site.absolute_url())
305 return ''
306
307 return self.index()
308
309
310 class Upgrade(BrowserView):
311
312 def upgrades(self):
313 pm = getattr(self.context, 'portal_migration')
314 return pm.listUpgrades()
315
316 def versions(self):
317 pm = getattr(self.context, 'portal_migration')
318 result = {}
319 result['instance'] = pm.getInstanceVersion()
320 result['fs'] = pm.getFileSystemVersion()
321 result['equal'] = result['instance'] == result['fs']
322 instance_version = normalize_version(result['instance'])
323 fs_version = normalize_version(result['fs'])
324 result['instance_gt'] = instance_version > fs_version
325 result['instance_lt'] = instance_version < fs_version
326 result['corelist'] = pm.coreVersions()
327 return result
328
329 def __call__(self):
330 form = self.request.form
331 submitted = form.get('form.submitted', False)
332 if submitted:
333 # CSRF protect. DO NOT use auto CSRF protection for upgrading sites
334 alsoProvides(self.request, IDisableCSRFProtection)
335
336 pm = getattr(self.context, 'portal_migration')
337 report = pm.upgrade(
338 REQUEST=self.request,
339 dry_run=form.get('dry_run', False),
340 )
341 return self.index(
342 report=report,
343 )
344
345 return self.index()
346
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/browser/admin.py b/Products/CMFPlone/browser/admin.py
--- a/Products/CMFPlone/browser/admin.py
+++ b/Products/CMFPlone/browser/admin.py
@@ -343,3 +343,16 @@
)
return self.index()
+
+ def can_migrate_to_volto(self):
+ if not HAS_VOLTO:
+ return False
+ pm = getattr(self.context, 'portal_migration')
+ if pm.getInstanceVersion() < "6005":
+ return False
+ try:
+ from plone.volto.browser import migrate_to_volto
+ except ImportError:
+ return False
+ installer = get_installer(self.context, self.request)
+ return not installer.is_product_installed("plone.volto")
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/admin.py b/Products/CMFPlone/browser/admin.py\n--- a/Products/CMFPlone/browser/admin.py\n+++ b/Products/CMFPlone/browser/admin.py\n@@ -343,3 +343,16 @@\n )\n \n return self.index()\n+\n+ def can_migrate_to_volto(self):\n+ if not HAS_VOLTO:\n+ return False\n+ pm = getattr(self.context, 'portal_migration')\n+ if pm.getInstanceVersion() < \"6005\":\n+ return False\n+ try:\n+ from plone.volto.browser import migrate_to_volto\n+ except ImportError:\n+ return False\n+ installer = get_installer(self.context, self.request)\n+ return not installer.is_product_installed(\"plone.volto\")\n", "issue": "Allow to migrate to Volto after updating a site to Plone 6\nWhen Plone is updated to 6 the upgrade-view (@@plone-upgrade) should display a link to the migration to Volto.\r\nSee https://github.com/plone/plone.volto/issues/55 for the migration itself.\n", "before_files": [{"content": "from AccessControl import getSecurityManager\nfrom AccessControl.Permissions import view as View\nfrom OFS.interfaces import IApplication\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFPlone.factory import _DEFAULT_PROFILE\nfrom Products.CMFPlone.factory import addPloneSite\nfrom plone.base.interfaces import INonInstallable\nfrom plone.base.interfaces import IPloneSiteRoot\nfrom Products.CMFPlone.utils import get_installer\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.GenericSetup import BASE, EXTENSION\nfrom Products.GenericSetup import profile_registry\nfrom Products.GenericSetup.upgrade import normalize_version\nfrom ZPublisher.BaseRequest import DefaultPublishTraverse\nfrom collections import OrderedDict\nfrom plone.i18n.locales.interfaces import IContentLanguageAvailability\nfrom plone.keyring.interfaces import IKeyManager\nfrom plone.protect.authenticator import check as checkCSRF\nfrom plone.protect.interfaces import IDisableCSRFProtection\nfrom urllib import parse\nfrom ZODB.broken import Broken\nfrom zope.component import adapts\nfrom zope.component import getAllUtilitiesRegisteredFor\nfrom zope.component import getUtility\nfrom zope.component import queryMultiAdapter\nfrom zope.component import queryUtility\nfrom zope.i18n.interfaces import IUserPreferredLanguages\nfrom zope.i18n.locales import locales, LoadLocaleError\nfrom zope.interface import Interface\nfrom zope.interface import alsoProvides\nfrom zope.publisher.browser import BrowserView\nfrom zope.publisher.interfaces import IRequest\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport logging\nimport pkg_resources\n\n\ntry:\n pkg_resources.get_distribution(\"plone.volto\")\n HAS_VOLTO = True\nexcept pkg_resources.DistributionNotFound:\n HAS_VOLTO = False\nLOGGER = logging.getLogger('Products.CMFPlone')\n\n\nclass AppTraverser(DefaultPublishTraverse):\n adapts(IApplication, IRequest)\n\n def publishTraverse(self, request, name):\n if name == 'index_html':\n view = queryMultiAdapter(\n (self.context, request), Interface, 'plone-overview')\n if view is not None:\n return view\n return DefaultPublishTraverse.publishTraverse(self, request, name)\n\n\nclass Overview(BrowserView):\n has_volto = HAS_VOLTO\n\n def sites(self, root=None):\n if root is None:\n root = self.context\n\n result = []\n secman = getSecurityManager()\n candidates = (\n obj for obj in root.values() if not isinstance(obj, Broken)\n )\n for obj in candidates:\n if obj.meta_type == 'Folder':\n result = result + self.sites(obj)\n elif IPloneSiteRoot.providedBy(obj):\n if secman.checkPermission(View, obj):\n result.append(obj)\n elif obj.getId() in getattr(root, '_mount_points', {}):\n result.extend(self.sites(root=obj))\n return result\n\n def outdated(self, obj):\n # Try to pick the portal_migration as an attribute\n # (Plone 5 unmigrated site root) or as an item\n mig = (\n getattr(obj, \"portal_migration\", None)\n or obj.get('portal_migration', None)\n )\n if mig is not None:\n return mig.needUpgrading()\n return False\n\n def can_manage(self):\n secman = getSecurityManager()\n return secman.checkPermission(ManagePortal, self.context)\n\n def upgrade_url(self, site, can_manage=None):\n if can_manage is None:\n can_manage = self.can_manage()\n if can_manage:\n return site.absolute_url() + '/@@plone-upgrade'\n else:\n return self.context.absolute_url() + '/@@plone-root-login'\n\n\nclass RootLoginRedirect(BrowserView):\n \"\"\" @@plone-root-login\n\n This view of the Zope root forces authentication via the root\n acl_users and then redirects elsewhere.\n \"\"\"\n\n def __call__(self, came_from=None):\n if came_from is not None:\n # see if this is a relative url or an absolute\n if len(parse.urlparse(came_from)[1]) == 0:\n # No host specified, so url is relative. Get an absolute url.\n # Note: '\\\\domain.org' is not recognised as host,\n # which is good.\n came_from = parse.urljoin(\n self.context.absolute_url() + '/', came_from,\n )\n elif not came_from.startswith(self.context.absolute_url()):\n # Note: we cannot use portal_url.isURLInPortal here, because we\n # are not in a Plone portal, but in the Zope root.\n came_from = None\n if came_from is None:\n came_from = self.context.absolute_url()\n self.request.response.redirect(came_from)\n\n\nclass RootLogout(BrowserView):\n \"\"\" @@plone-root-logout \"\"\"\n\n logout = ViewPageTemplateFile('templates/plone-admin-logged-out.pt')\n\n def __call__(self):\n response = self.request.response\n realm = response.realm\n response.setStatus(401)\n response.setHeader('WWW-Authenticate', 'basic realm=\"%s\"' % realm, 1)\n response.setBody(self.logout())\n return\n\n\nclass FrontPage(BrowserView):\n\n index = ViewPageTemplateFile('templates/plone-frontpage.pt')\n\n\nclass AddPloneSite(BrowserView):\n\n # Profiles that are installed by default,\n # but can be removed later.\n default_extension_profiles = (\n 'plone.app.caching:default',\n 'plonetheme.barceloneta:default',\n )\n # Let's have a separate list for Volto.\n volto_default_extension_profiles = (\n 'plone.app.caching:default',\n # We could choose to not install Barceloneta:\n 'plonetheme.barceloneta:default',\n 'plone.volto:default',\n 'plone.volto:default-homepage'\n )\n\n def profiles(self):\n base_profiles = []\n extension_profiles = []\n if HAS_VOLTO and not self.request.get('classic'):\n selected_extension_profiles = self.volto_default_extension_profiles\n else:\n selected_extension_profiles = self.default_extension_profiles\n\n # profiles available for install/uninstall, but hidden at the time\n # the Plone site is created\n not_installable = [\n 'Products.CMFPlacefulWorkflow:CMFPlacefulWorkflow',\n ]\n utils = getAllUtilitiesRegisteredFor(INonInstallable)\n for util in utils:\n not_installable.extend(util.getNonInstallableProfiles())\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == EXTENSION and \\\n info.get('for') in (IPloneSiteRoot, None):\n profile_id = info.get('id')\n if profile_id not in not_installable:\n if profile_id in selected_extension_profiles:\n info['selected'] = 'selected'\n extension_profiles.append(info)\n\n def _key(v):\n # Make sure implicitly selected items come first\n selected = v.get('selected') and 'automatic' or 'manual'\n return '{}-{}'.format(selected, v.get('title', ''))\n extension_profiles.sort(key=_key)\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == BASE and \\\n info.get('for') in (IPloneSiteRoot, None):\n base_profiles.append(info)\n\n return dict(\n base=tuple(base_profiles),\n default=_DEFAULT_PROFILE,\n extensions=tuple(extension_profiles),\n )\n\n def browser_language(self):\n language = 'en'\n pl = IUserPreferredLanguages(self.request)\n if pl is not None:\n languages = pl.getPreferredLanguages()\n for httplang in languages:\n parts = (httplang.split('-') + [None, None])[:3]\n if parts[0] == parts[1]:\n # Avoid creating a country code for simple languages codes\n parts = [parts[0], None, None]\n try:\n locale = locales.getLocale(*parts)\n language = locale.getLocaleID().replace('_', '-').lower()\n break\n except LoadLocaleError:\n # Just try the next combination\n pass\n return language\n\n def grouped_languages(self, default='en'):\n util = queryUtility(IContentLanguageAvailability)\n available = util.getLanguages(combined=True)\n languages = dict(util.getLanguageListing())\n\n # Group country specific versions by language\n grouped = OrderedDict()\n for langcode, data in available.items():\n lang = langcode.split('-')[0]\n language = languages.get(lang, lang) # Label\n\n struct = grouped.get(lang, {'label': language, 'languages': []})\n\n langs = struct['languages']\n langs.append({\n 'langcode': langcode,\n 'label': data.get('native', data.get('name')),\n })\n\n grouped[lang] = struct\n\n # Sort list by language, next by country\n data = sorted(grouped.values(), key=lambda k: k['label'])\n for item in data:\n item['languages'] = sorted(\n item['languages'], key=lambda k: k['label'].lower())\n return data\n\n def timezones(self):\n tz_vocab = getUtility(\n IVocabularyFactory,\n 'plone.app.vocabularies.CommonTimezones'\n )(self.context)\n\n grouped = OrderedDict()\n tz_values = [it.value for it in tz_vocab]\n for value in tz_values:\n splitted = value.split('/')\n group = splitted.pop(0)\n label = '/'.join(splitted)\n\n entries = grouped.get(group, [])\n entries.append({'label': label or group, 'value': value})\n grouped[group] = entries\n\n return grouped\n\n def __call__(self):\n context = self.context\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n site_id = form.get('site_id', 'Plone')\n\n # CSRF protect. DO NOT use auto CSRF protection for adding a site\n alsoProvides(self.request, IDisableCSRFProtection)\n\n # check if keyring is installed on root, disable CSRF protection\n # if it is because it is not installed until a plone site\n # is created\n if queryUtility(IKeyManager) is None:\n LOGGER.info('CSRF protection disabled on initial site '\n 'creation')\n else:\n # we have a keymanager, check csrf protection manually now\n checkCSRF(self.request)\n site = addPloneSite(\n context, site_id,\n title=form.get('title', ''),\n profile_id=form.get('profile_id', _DEFAULT_PROFILE),\n extension_ids=form.get('extension_ids', ()),\n setup_content=form.get('setup_content', False),\n default_language=form.get('default_language', 'en'),\n portal_timezone=form.get('portal_timezone', 'UTC')\n )\n self.request.response.redirect(site.absolute_url())\n return ''\n\n return self.index()\n\n\nclass Upgrade(BrowserView):\n\n def upgrades(self):\n pm = getattr(self.context, 'portal_migration')\n return pm.listUpgrades()\n\n def versions(self):\n pm = getattr(self.context, 'portal_migration')\n result = {}\n result['instance'] = pm.getInstanceVersion()\n result['fs'] = pm.getFileSystemVersion()\n result['equal'] = result['instance'] == result['fs']\n instance_version = normalize_version(result['instance'])\n fs_version = normalize_version(result['fs'])\n result['instance_gt'] = instance_version > fs_version\n result['instance_lt'] = instance_version < fs_version\n result['corelist'] = pm.coreVersions()\n return result\n\n def __call__(self):\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n # CSRF protect. DO NOT use auto CSRF protection for upgrading sites\n alsoProvides(self.request, IDisableCSRFProtection)\n\n pm = getattr(self.context, 'portal_migration')\n report = pm.upgrade(\n REQUEST=self.request,\n dry_run=form.get('dry_run', False),\n )\n return self.index(\n report=report,\n )\n\n return self.index()\n", "path": "Products/CMFPlone/browser/admin.py"}], "after_files": [{"content": "from AccessControl import getSecurityManager\nfrom AccessControl.Permissions import view as View\nfrom OFS.interfaces import IApplication\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFPlone.factory import _DEFAULT_PROFILE\nfrom Products.CMFPlone.factory import addPloneSite\nfrom plone.base.interfaces import INonInstallable\nfrom plone.base.interfaces import IPloneSiteRoot\nfrom Products.CMFPlone.utils import get_installer\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.GenericSetup import BASE, EXTENSION\nfrom Products.GenericSetup import profile_registry\nfrom Products.GenericSetup.upgrade import normalize_version\nfrom ZPublisher.BaseRequest import DefaultPublishTraverse\nfrom collections import OrderedDict\nfrom plone.i18n.locales.interfaces import IContentLanguageAvailability\nfrom plone.keyring.interfaces import IKeyManager\nfrom plone.protect.authenticator import check as checkCSRF\nfrom plone.protect.interfaces import IDisableCSRFProtection\nfrom urllib import parse\nfrom ZODB.broken import Broken\nfrom zope.component import adapts\nfrom zope.component import getAllUtilitiesRegisteredFor\nfrom zope.component import getUtility\nfrom zope.component import queryMultiAdapter\nfrom zope.component import queryUtility\nfrom zope.i18n.interfaces import IUserPreferredLanguages\nfrom zope.i18n.locales import locales, LoadLocaleError\nfrom zope.interface import Interface\nfrom zope.interface import alsoProvides\nfrom zope.publisher.browser import BrowserView\nfrom zope.publisher.interfaces import IRequest\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport logging\nimport pkg_resources\n\n\ntry:\n pkg_resources.get_distribution(\"plone.volto\")\n HAS_VOLTO = True\nexcept pkg_resources.DistributionNotFound:\n HAS_VOLTO = False\nLOGGER = logging.getLogger('Products.CMFPlone')\n\n\nclass AppTraverser(DefaultPublishTraverse):\n adapts(IApplication, IRequest)\n\n def publishTraverse(self, request, name):\n if name == 'index_html':\n view = queryMultiAdapter(\n (self.context, request), Interface, 'plone-overview')\n if view is not None:\n return view\n return DefaultPublishTraverse.publishTraverse(self, request, name)\n\n\nclass Overview(BrowserView):\n has_volto = HAS_VOLTO\n\n def sites(self, root=None):\n if root is None:\n root = self.context\n\n result = []\n secman = getSecurityManager()\n candidates = (\n obj for obj in root.values() if not isinstance(obj, Broken)\n )\n for obj in candidates:\n if obj.meta_type == 'Folder':\n result = result + self.sites(obj)\n elif IPloneSiteRoot.providedBy(obj):\n if secman.checkPermission(View, obj):\n result.append(obj)\n elif obj.getId() in getattr(root, '_mount_points', {}):\n result.extend(self.sites(root=obj))\n return result\n\n def outdated(self, obj):\n # Try to pick the portal_migration as an attribute\n # (Plone 5 unmigrated site root) or as an item\n mig = (\n getattr(obj, \"portal_migration\", None)\n or obj.get('portal_migration', None)\n )\n if mig is not None:\n return mig.needUpgrading()\n return False\n\n def can_manage(self):\n secman = getSecurityManager()\n return secman.checkPermission(ManagePortal, self.context)\n\n def upgrade_url(self, site, can_manage=None):\n if can_manage is None:\n can_manage = self.can_manage()\n if can_manage:\n return site.absolute_url() + '/@@plone-upgrade'\n else:\n return self.context.absolute_url() + '/@@plone-root-login'\n\n\nclass RootLoginRedirect(BrowserView):\n \"\"\" @@plone-root-login\n\n This view of the Zope root forces authentication via the root\n acl_users and then redirects elsewhere.\n \"\"\"\n\n def __call__(self, came_from=None):\n if came_from is not None:\n # see if this is a relative url or an absolute\n if len(parse.urlparse(came_from)[1]) == 0:\n # No host specified, so url is relative. Get an absolute url.\n # Note: '\\\\domain.org' is not recognised as host,\n # which is good.\n came_from = parse.urljoin(\n self.context.absolute_url() + '/', came_from,\n )\n elif not came_from.startswith(self.context.absolute_url()):\n # Note: we cannot use portal_url.isURLInPortal here, because we\n # are not in a Plone portal, but in the Zope root.\n came_from = None\n if came_from is None:\n came_from = self.context.absolute_url()\n self.request.response.redirect(came_from)\n\n\nclass RootLogout(BrowserView):\n \"\"\" @@plone-root-logout \"\"\"\n\n logout = ViewPageTemplateFile('templates/plone-admin-logged-out.pt')\n\n def __call__(self):\n response = self.request.response\n realm = response.realm\n response.setStatus(401)\n response.setHeader('WWW-Authenticate', 'basic realm=\"%s\"' % realm, 1)\n response.setBody(self.logout())\n return\n\n\nclass FrontPage(BrowserView):\n\n index = ViewPageTemplateFile('templates/plone-frontpage.pt')\n\n\nclass AddPloneSite(BrowserView):\n\n # Profiles that are installed by default,\n # but can be removed later.\n default_extension_profiles = (\n 'plone.app.caching:default',\n 'plonetheme.barceloneta:default',\n )\n # Let's have a separate list for Volto.\n volto_default_extension_profiles = (\n 'plone.app.caching:default',\n # We could choose to not install Barceloneta:\n 'plonetheme.barceloneta:default',\n 'plone.volto:default',\n 'plone.volto:default-homepage'\n )\n\n def profiles(self):\n base_profiles = []\n extension_profiles = []\n if HAS_VOLTO and not self.request.get('classic'):\n selected_extension_profiles = self.volto_default_extension_profiles\n else:\n selected_extension_profiles = self.default_extension_profiles\n\n # profiles available for install/uninstall, but hidden at the time\n # the Plone site is created\n not_installable = [\n 'Products.CMFPlacefulWorkflow:CMFPlacefulWorkflow',\n ]\n utils = getAllUtilitiesRegisteredFor(INonInstallable)\n for util in utils:\n not_installable.extend(util.getNonInstallableProfiles())\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == EXTENSION and \\\n info.get('for') in (IPloneSiteRoot, None):\n profile_id = info.get('id')\n if profile_id not in not_installable:\n if profile_id in selected_extension_profiles:\n info['selected'] = 'selected'\n extension_profiles.append(info)\n\n def _key(v):\n # Make sure implicitly selected items come first\n selected = v.get('selected') and 'automatic' or 'manual'\n return '{}-{}'.format(selected, v.get('title', ''))\n extension_profiles.sort(key=_key)\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == BASE and \\\n info.get('for') in (IPloneSiteRoot, None):\n base_profiles.append(info)\n\n return dict(\n base=tuple(base_profiles),\n default=_DEFAULT_PROFILE,\n extensions=tuple(extension_profiles),\n )\n\n def browser_language(self):\n language = 'en'\n pl = IUserPreferredLanguages(self.request)\n if pl is not None:\n languages = pl.getPreferredLanguages()\n for httplang in languages:\n parts = (httplang.split('-') + [None, None])[:3]\n if parts[0] == parts[1]:\n # Avoid creating a country code for simple languages codes\n parts = [parts[0], None, None]\n try:\n locale = locales.getLocale(*parts)\n language = locale.getLocaleID().replace('_', '-').lower()\n break\n except LoadLocaleError:\n # Just try the next combination\n pass\n return language\n\n def grouped_languages(self, default='en'):\n util = queryUtility(IContentLanguageAvailability)\n available = util.getLanguages(combined=True)\n languages = dict(util.getLanguageListing())\n\n # Group country specific versions by language\n grouped = OrderedDict()\n for langcode, data in available.items():\n lang = langcode.split('-')[0]\n language = languages.get(lang, lang) # Label\n\n struct = grouped.get(lang, {'label': language, 'languages': []})\n\n langs = struct['languages']\n langs.append({\n 'langcode': langcode,\n 'label': data.get('native', data.get('name')),\n })\n\n grouped[lang] = struct\n\n # Sort list by language, next by country\n data = sorted(grouped.values(), key=lambda k: k['label'])\n for item in data:\n item['languages'] = sorted(\n item['languages'], key=lambda k: k['label'].lower())\n return data\n\n def timezones(self):\n tz_vocab = getUtility(\n IVocabularyFactory,\n 'plone.app.vocabularies.CommonTimezones'\n )(self.context)\n\n grouped = OrderedDict()\n tz_values = [it.value for it in tz_vocab]\n for value in tz_values:\n splitted = value.split('/')\n group = splitted.pop(0)\n label = '/'.join(splitted)\n\n entries = grouped.get(group, [])\n entries.append({'label': label or group, 'value': value})\n grouped[group] = entries\n\n return grouped\n\n def __call__(self):\n context = self.context\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n site_id = form.get('site_id', 'Plone')\n\n # CSRF protect. DO NOT use auto CSRF protection for adding a site\n alsoProvides(self.request, IDisableCSRFProtection)\n\n # check if keyring is installed on root, disable CSRF protection\n # if it is because it is not installed until a plone site\n # is created\n if queryUtility(IKeyManager) is None:\n LOGGER.info('CSRF protection disabled on initial site '\n 'creation')\n else:\n # we have a keymanager, check csrf protection manually now\n checkCSRF(self.request)\n site = addPloneSite(\n context, site_id,\n title=form.get('title', ''),\n profile_id=form.get('profile_id', _DEFAULT_PROFILE),\n extension_ids=form.get('extension_ids', ()),\n setup_content=form.get('setup_content', False),\n default_language=form.get('default_language', 'en'),\n portal_timezone=form.get('portal_timezone', 'UTC')\n )\n self.request.response.redirect(site.absolute_url())\n return ''\n\n return self.index()\n\n\nclass Upgrade(BrowserView):\n\n def upgrades(self):\n pm = getattr(self.context, 'portal_migration')\n return pm.listUpgrades()\n\n def versions(self):\n pm = getattr(self.context, 'portal_migration')\n result = {}\n result['instance'] = pm.getInstanceVersion()\n result['fs'] = pm.getFileSystemVersion()\n result['equal'] = result['instance'] == result['fs']\n instance_version = normalize_version(result['instance'])\n fs_version = normalize_version(result['fs'])\n result['instance_gt'] = instance_version > fs_version\n result['instance_lt'] = instance_version < fs_version\n result['corelist'] = pm.coreVersions()\n return result\n\n def __call__(self):\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n # CSRF protect. DO NOT use auto CSRF protection for upgrading sites\n alsoProvides(self.request, IDisableCSRFProtection)\n\n pm = getattr(self.context, 'portal_migration')\n report = pm.upgrade(\n REQUEST=self.request,\n dry_run=form.get('dry_run', False),\n )\n return self.index(\n report=report,\n )\n\n return self.index()\n\n def can_migrate_to_volto(self):\n if not HAS_VOLTO:\n return False\n pm = getattr(self.context, 'portal_migration')\n if pm.getInstanceVersion() < \"6005\":\n return False\n try:\n from plone.volto.browser import migrate_to_volto\n except ImportError:\n return False\n installer = get_installer(self.context, self.request)\n return not installer.is_product_installed(\"plone.volto\")\n", "path": "Products/CMFPlone/browser/admin.py"}]} |
gh_patches_debug_1458 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-597 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Confusing KerError message for flash registry
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior:
```
from flash.image import ImageClassificationData, ImageClassifier
print(ImageClassifier.backbones.get('abcd'))
```
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
It should throw a keyerror.
### Environment
- PyTorch Version (e.g., 1.0):
- OS (e.g., Linux):
- How you installed PyTorch (`conda`, `pip`, source):
- Build command you used (if compiling from source):
- Python version:
- CUDA/cuDNN version:
- GPU models and configuration:
- Any other relevant information:
### Additional context
Sending in PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/core/registry.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from functools import partial
15 from types import FunctionType
16 from typing import Any, Callable, Dict, List, Optional, Union
17
18 from pytorch_lightning.utilities import rank_zero_info
19 from pytorch_lightning.utilities.exceptions import MisconfigurationException
20
21 _REGISTERED_FUNCTION = Dict[str, Any]
22
23
24 class FlashRegistry:
25 """This class is used to register function or :class:`functools.partial` class to a registry."""
26
27 def __init__(self, name: str, verbose: bool = False) -> None:
28 self.name = name
29 self.functions: List[_REGISTERED_FUNCTION] = []
30 self._verbose = verbose
31
32 def __len__(self) -> int:
33 return len(self.functions)
34
35 def __contains__(self, key) -> bool:
36 return any(key == e["name"] for e in self.functions)
37
38 def __repr__(self) -> str:
39 return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})'
40
41 def get(
42 self,
43 key: str,
44 with_metadata: bool = False,
45 strict: bool = True,
46 **metadata,
47 ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]:
48 """
49 This function is used to gather matches from the registry:
50
51 Args:
52 key: Name of the registered function.
53 with_metadata: Whether to include the associated metadata in the return value.
54 strict: Whether to return all matches or just one.
55 metadata: Metadata used to filter against existing registry item's metadata.
56 """
57 matches = [e for e in self.functions if key == e["name"]]
58 if not matches:
59 raise KeyError(f"Key: {key} is not in {repr(self)}")
60
61 if metadata:
62 matches = [m for m in matches if metadata.items() <= m["metadata"].items()]
63 if not matches:
64 raise KeyError("Found no matches that fit your metadata criteria. Try removing some metadata")
65
66 matches = [e if with_metadata else e["fn"] for e in matches]
67 return matches[0] if strict else matches
68
69 def remove(self, key: str) -> None:
70 self.functions = [f for f in self.functions if f["name"] != key]
71
72 def _register_function(
73 self,
74 fn: Callable,
75 name: Optional[str] = None,
76 override: bool = False,
77 metadata: Optional[Dict[str, Any]] = None
78 ):
79 if not isinstance(fn, FunctionType) and not isinstance(fn, partial):
80 raise MisconfigurationException(f"You can only register a function, found: {fn}")
81
82 name = name or fn.__name__
83
84 if self._verbose:
85 rank_zero_info(f"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}")
86
87 item = {"fn": fn, "name": name, "metadata": metadata or {}}
88
89 matching_index = self._find_matching_index(item)
90 if override and matching_index is not None:
91 self.functions[matching_index] = item
92 else:
93 if matching_index is not None:
94 raise MisconfigurationException(
95 f"Function with name: {name} and metadata: {metadata} is already present within {self}."
96 " HINT: Use `override=True`."
97 )
98 self.functions.append(item)
99
100 def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]:
101 for idx, fn in enumerate(self.functions):
102 if all(fn[k] == item[k] for k in ("fn", "name", "metadata")):
103 return idx
104
105 def __call__(
106 self,
107 fn: Optional[Callable[..., Any]] = None,
108 name: Optional[str] = None,
109 override: bool = False,
110 **metadata
111 ) -> Callable:
112 """
113 This function is used to register new functions to the registry along their metadata.
114
115 Functions can be filtered using metadata using the ``get`` function.
116
117 """
118 if fn is not None:
119 self._register_function(fn=fn, name=name, override=override, metadata=metadata)
120 return fn
121
122 # raise the error ahead of time
123 if not (name is None or isinstance(name, str)):
124 raise TypeError(f'`name` must be a str, found {name}')
125
126 def _register(cls):
127 self._register_function(fn=cls, name=name, override=override, metadata=metadata)
128 return cls
129
130 return _register
131
132 def available_keys(self) -> List[str]:
133 return sorted(v["name"] for v in self.functions)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flash/core/registry.py b/flash/core/registry.py
--- a/flash/core/registry.py
+++ b/flash/core/registry.py
@@ -56,7 +56,7 @@
"""
matches = [e for e in self.functions if key == e["name"]]
if not matches:
- raise KeyError(f"Key: {key} is not in {repr(self)}")
+ raise KeyError(f"Key: {key} is not in {type(self).__name__}")
if metadata:
matches = [m for m in matches if metadata.items() <= m["metadata"].items()]
| {"golden_diff": "diff --git a/flash/core/registry.py b/flash/core/registry.py\n--- a/flash/core/registry.py\n+++ b/flash/core/registry.py\n@@ -56,7 +56,7 @@\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n- raise KeyError(f\"Key: {key} is not in {repr(self)}\")\n+ raise KeyError(f\"Key: {key} is not in {type(self).__name__}\")\n \n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n", "issue": "Confusing KerError message for flash registry\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```\r\nfrom flash.image import ImageClassificationData, ImageClassifier\r\n\r\nprint(ImageClassifier.backbones.get('abcd'))\r\n```\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\n\r\nIt should throw a keyerror.\r\n\r\n### Environment\r\n\r\n - PyTorch Version (e.g., 1.0):\r\n - OS (e.g., Linux):\r\n - How you installed PyTorch (`conda`, `pip`, source):\r\n - Build command you used (if compiling from source):\r\n - Python version:\r\n - CUDA/cuDNN version:\r\n - GPU models and configuration:\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\nSending in PR.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n_REGISTERED_FUNCTION = Dict[str, Any]\n\n\nclass FlashRegistry:\n \"\"\"This class is used to register function or :class:`functools.partial` class to a registry.\"\"\"\n\n def __init__(self, name: str, verbose: bool = False) -> None:\n self.name = name\n self.functions: List[_REGISTERED_FUNCTION] = []\n self._verbose = verbose\n\n def __len__(self) -> int:\n return len(self.functions)\n\n def __contains__(self, key) -> bool:\n return any(key == e[\"name\"] for e in self.functions)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})'\n\n def get(\n self,\n key: str,\n with_metadata: bool = False,\n strict: bool = True,\n **metadata,\n ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]:\n \"\"\"\n This function is used to gather matches from the registry:\n\n Args:\n key: Name of the registered function.\n with_metadata: Whether to include the associated metadata in the return value.\n strict: Whether to return all matches or just one.\n metadata: Metadata used to filter against existing registry item's metadata.\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n raise KeyError(f\"Key: {key} is not in {repr(self)}\")\n\n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n if not matches:\n raise KeyError(\"Found no matches that fit your metadata criteria. Try removing some metadata\")\n\n matches = [e if with_metadata else e[\"fn\"] for e in matches]\n return matches[0] if strict else matches\n\n def remove(self, key: str) -> None:\n self.functions = [f for f in self.functions if f[\"name\"] != key]\n\n def _register_function(\n self,\n fn: Callable,\n name: Optional[str] = None,\n override: bool = False,\n metadata: Optional[Dict[str, Any]] = None\n ):\n if not isinstance(fn, FunctionType) and not isinstance(fn, partial):\n raise MisconfigurationException(f\"You can only register a function, found: {fn}\")\n\n name = name or fn.__name__\n\n if self._verbose:\n rank_zero_info(f\"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}\")\n\n item = {\"fn\": fn, \"name\": name, \"metadata\": metadata or {}}\n\n matching_index = self._find_matching_index(item)\n if override and matching_index is not None:\n self.functions[matching_index] = item\n else:\n if matching_index is not None:\n raise MisconfigurationException(\n f\"Function with name: {name} and metadata: {metadata} is already present within {self}.\"\n \" HINT: Use `override=True`.\"\n )\n self.functions.append(item)\n\n def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]:\n for idx, fn in enumerate(self.functions):\n if all(fn[k] == item[k] for k in (\"fn\", \"name\", \"metadata\")):\n return idx\n\n def __call__(\n self,\n fn: Optional[Callable[..., Any]] = None,\n name: Optional[str] = None,\n override: bool = False,\n **metadata\n ) -> Callable:\n \"\"\"\n This function is used to register new functions to the registry along their metadata.\n\n Functions can be filtered using metadata using the ``get`` function.\n\n \"\"\"\n if fn is not None:\n self._register_function(fn=fn, name=name, override=override, metadata=metadata)\n return fn\n\n # raise the error ahead of time\n if not (name is None or isinstance(name, str)):\n raise TypeError(f'`name` must be a str, found {name}')\n\n def _register(cls):\n self._register_function(fn=cls, name=name, override=override, metadata=metadata)\n return cls\n\n return _register\n\n def available_keys(self) -> List[str]:\n return sorted(v[\"name\"] for v in self.functions)\n", "path": "flash/core/registry.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n_REGISTERED_FUNCTION = Dict[str, Any]\n\n\nclass FlashRegistry:\n \"\"\"This class is used to register function or :class:`functools.partial` class to a registry.\"\"\"\n\n def __init__(self, name: str, verbose: bool = False) -> None:\n self.name = name\n self.functions: List[_REGISTERED_FUNCTION] = []\n self._verbose = verbose\n\n def __len__(self) -> int:\n return len(self.functions)\n\n def __contains__(self, key) -> bool:\n return any(key == e[\"name\"] for e in self.functions)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})'\n\n def get(\n self,\n key: str,\n with_metadata: bool = False,\n strict: bool = True,\n **metadata,\n ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]:\n \"\"\"\n This function is used to gather matches from the registry:\n\n Args:\n key: Name of the registered function.\n with_metadata: Whether to include the associated metadata in the return value.\n strict: Whether to return all matches or just one.\n metadata: Metadata used to filter against existing registry item's metadata.\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n raise KeyError(f\"Key: {key} is not in {type(self).__name__}\")\n\n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n if not matches:\n raise KeyError(\"Found no matches that fit your metadata criteria. Try removing some metadata\")\n\n matches = [e if with_metadata else e[\"fn\"] for e in matches]\n return matches[0] if strict else matches\n\n def remove(self, key: str) -> None:\n self.functions = [f for f in self.functions if f[\"name\"] != key]\n\n def _register_function(\n self,\n fn: Callable,\n name: Optional[str] = None,\n override: bool = False,\n metadata: Optional[Dict[str, Any]] = None\n ):\n if not isinstance(fn, FunctionType) and not isinstance(fn, partial):\n raise MisconfigurationException(f\"You can only register a function, found: {fn}\")\n\n name = name or fn.__name__\n\n if self._verbose:\n rank_zero_info(f\"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}\")\n\n item = {\"fn\": fn, \"name\": name, \"metadata\": metadata or {}}\n\n matching_index = self._find_matching_index(item)\n if override and matching_index is not None:\n self.functions[matching_index] = item\n else:\n if matching_index is not None:\n raise MisconfigurationException(\n f\"Function with name: {name} and metadata: {metadata} is already present within {self}.\"\n \" HINT: Use `override=True`.\"\n )\n self.functions.append(item)\n\n def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]:\n for idx, fn in enumerate(self.functions):\n if all(fn[k] == item[k] for k in (\"fn\", \"name\", \"metadata\")):\n return idx\n\n def __call__(\n self,\n fn: Optional[Callable[..., Any]] = None,\n name: Optional[str] = None,\n override: bool = False,\n **metadata\n ) -> Callable:\n \"\"\"\n This function is used to register new functions to the registry along their metadata.\n\n Functions can be filtered using metadata using the ``get`` function.\n\n \"\"\"\n if fn is not None:\n self._register_function(fn=fn, name=name, override=override, metadata=metadata)\n return fn\n\n # raise the error ahead of time\n if not (name is None or isinstance(name, str)):\n raise TypeError(f'`name` must be a str, found {name}')\n\n def _register(cls):\n self._register_function(fn=cls, name=name, override=override, metadata=metadata)\n return cls\n\n return _register\n\n def available_keys(self) -> List[str]:\n return sorted(v[\"name\"] for v in self.functions)\n", "path": "flash/core/registry.py"}]} |
gh_patches_debug_1459 | rasdani/github-patches | git_diff | benoitc__gunicorn-1071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include request URL in error message
It would be really helpful if the logged error message were "Error handling request http://host/path/etc" instead of just "Error handling request".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/workers/base.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import os
8 import signal
9 import sys
10 import time
11 import traceback
12 from random import randint
13
14
15 from gunicorn import util
16 from gunicorn.workers.workertmp import WorkerTmp
17 from gunicorn.reloader import Reloader
18 from gunicorn.http.errors import (
19 InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,
20 InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,
21 )
22 from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
23 from gunicorn.http.wsgi import default_environ, Response
24 from gunicorn.six import MAXSIZE
25
26
27 class Worker(object):
28
29 SIGNALS = [getattr(signal, "SIG%s" % x)
30 for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]
31
32 PIPE = []
33
34 def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
35 """\
36 This is called pre-fork so it shouldn't do anything to the
37 current process. If there's a need to make process wide
38 changes you'll want to do that in ``self.init_process()``.
39 """
40 self.age = age
41 self.ppid = ppid
42 self.sockets = sockets
43 self.app = app
44 self.timeout = timeout
45 self.cfg = cfg
46 self.booted = False
47 self.aborted = False
48 self.reloader = None
49
50 self.nr = 0
51 jitter = randint(0, cfg.max_requests_jitter)
52 self.max_requests = cfg.max_requests + jitter or MAXSIZE
53 self.alive = True
54 self.log = log
55 self.tmp = WorkerTmp(cfg)
56
57 def __str__(self):
58 return "<Worker %s>" % self.pid
59
60 @property
61 def pid(self):
62 return os.getpid()
63
64 def notify(self):
65 """\
66 Your worker subclass must arrange to have this method called
67 once every ``self.timeout`` seconds. If you fail in accomplishing
68 this task, the master process will murder your workers.
69 """
70 self.tmp.notify()
71
72 def run(self):
73 """\
74 This is the mainloop of a worker process. You should override
75 this method in a subclass to provide the intended behaviour
76 for your particular evil schemes.
77 """
78 raise NotImplementedError()
79
80 def init_process(self):
81 """\
82 If you override this method in a subclass, the last statement
83 in the function should be to call this method with
84 super(MyWorkerClass, self).init_process() so that the ``run()``
85 loop is initiated.
86 """
87
88 # start the reloader
89 if self.cfg.reload:
90 def changed(fname):
91 self.log.info("Worker reloading: %s modified", fname)
92 os.kill(self.pid, signal.SIGQUIT)
93 self.reloader = Reloader(callback=changed)
94 self.reloader.start()
95
96 # set environment' variables
97 if self.cfg.env:
98 for k, v in self.cfg.env.items():
99 os.environ[k] = v
100
101 util.set_owner_process(self.cfg.uid, self.cfg.gid)
102
103 # Reseed the random number generator
104 util.seed()
105
106 # For waking ourselves up
107 self.PIPE = os.pipe()
108 for p in self.PIPE:
109 util.set_non_blocking(p)
110 util.close_on_exec(p)
111
112 # Prevent fd inheritance
113 [util.close_on_exec(s) for s in self.sockets]
114 util.close_on_exec(self.tmp.fileno())
115
116 self.log.close_on_exec()
117
118 self.init_signals()
119
120 self.cfg.post_worker_init(self)
121
122 self.load_wsgi()
123
124 # Enter main run loop
125 self.booted = True
126 self.run()
127
128 def load_wsgi(self):
129 try:
130 self.wsgi = self.app.wsgi()
131 except SyntaxError as e:
132 if not self.cfg.reload:
133 raise
134
135 self.log.exception(e)
136
137 exc_type, exc_val, exc_tb = sys.exc_info()
138 self.reloader.add_extra_file(exc_val.filename)
139
140 tb_string = traceback.format_exc(exc_tb)
141 self.wsgi = util.make_fail_app(tb_string)
142
143 def init_signals(self):
144 # reset signaling
145 [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]
146 # init new signaling
147 signal.signal(signal.SIGQUIT, self.handle_quit)
148 signal.signal(signal.SIGTERM, self.handle_exit)
149 signal.signal(signal.SIGINT, self.handle_quit)
150 signal.signal(signal.SIGWINCH, self.handle_winch)
151 signal.signal(signal.SIGUSR1, self.handle_usr1)
152 signal.signal(signal.SIGABRT, self.handle_abort)
153
154 # Don't let SIGTERM and SIGUSR1 disturb active requests
155 # by interrupting system calls
156 if hasattr(signal, 'siginterrupt'): # python >= 2.6
157 signal.siginterrupt(signal.SIGTERM, False)
158 signal.siginterrupt(signal.SIGUSR1, False)
159
160 def handle_usr1(self, sig, frame):
161 self.log.reopen_files()
162
163 def handle_exit(self, sig, frame):
164 self.alive = False
165
166 def handle_quit(self, sig, frame):
167 self.alive = False
168 # worker_int callback
169 self.cfg.worker_int(self)
170 time.sleep(0.1)
171 sys.exit(0)
172
173 def handle_abort(self, sig, frame):
174 self.alive = False
175 self.cfg.worker_abort(self)
176 sys.exit(1)
177
178 def handle_error(self, req, client, addr, exc):
179 request_start = datetime.now()
180 addr = addr or ('', -1) # unix socket case
181 if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,
182 InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
183 LimitRequestLine, LimitRequestHeaders,
184 InvalidProxyLine, ForbiddenProxyRequest)):
185
186 status_int = 400
187 reason = "Bad Request"
188
189 if isinstance(exc, InvalidRequestLine):
190 mesg = "Invalid Request Line '%s'" % str(exc)
191 elif isinstance(exc, InvalidRequestMethod):
192 mesg = "Invalid Method '%s'" % str(exc)
193 elif isinstance(exc, InvalidHTTPVersion):
194 mesg = "Invalid HTTP Version '%s'" % str(exc)
195 elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
196 mesg = "%s" % str(exc)
197 if not req and hasattr(exc, "req"):
198 req = exc.req # for access log
199 elif isinstance(exc, LimitRequestLine):
200 mesg = "%s" % str(exc)
201 elif isinstance(exc, LimitRequestHeaders):
202 mesg = "Error parsing headers: '%s'" % str(exc)
203 elif isinstance(exc, InvalidProxyLine):
204 mesg = "'%s'" % str(exc)
205 elif isinstance(exc, ForbiddenProxyRequest):
206 reason = "Forbidden"
207 mesg = "Request forbidden"
208 status_int = 403
209
210 msg = "Invalid request from ip={ip}: {error}"
211 self.log.debug(msg.format(ip=addr[0], error=str(exc)))
212 else:
213 self.log.exception("Error handling request")
214
215 status_int = 500
216 reason = "Internal Server Error"
217 mesg = ""
218
219 if req is not None:
220 request_time = datetime.now() - request_start
221 environ = default_environ(req, client, self.cfg)
222 environ['REMOTE_ADDR'] = addr[0]
223 environ['REMOTE_PORT'] = str(addr[1])
224 resp = Response(req, client, self.cfg)
225 resp.status = "%s %s" % (status_int, reason)
226 resp.response_length = len(mesg)
227 self.log.access(resp, req, environ, request_time)
228
229 try:
230 util.write_error(client, status_int, reason, mesg)
231 except:
232 self.log.debug("Failed to send error message.")
233
234 def handle_winch(self, sig, fname):
235 # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
236 return
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py
--- a/gunicorn/workers/base.py
+++ b/gunicorn/workers/base.py
@@ -210,7 +210,7 @@
msg = "Invalid request from ip={ip}: {error}"
self.log.debug(msg.format(ip=addr[0], error=str(exc)))
else:
- self.log.exception("Error handling request")
+ self.log.exception("Error handling request %s", req.uri)
status_int = 500
reason = "Internal Server Error"
| {"golden_diff": "diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py\n--- a/gunicorn/workers/base.py\n+++ b/gunicorn/workers/base.py\n@@ -210,7 +210,7 @@\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n- self.log.exception(\"Error handling request\")\n+ self.log.exception(\"Error handling request %s\", req.uri)\n \n status_int = 500\n reason = \"Internal Server Error\"\n", "issue": "Include request URL in error message\nIt would be really helpful if the logged error message were \"Error handling request http://host/path/etc\" instead of just \"Error handling request\".\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request\")\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request %s\", req.uri)\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}]} |
gh_patches_debug_1460 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace `sphinxcontrib-napoleon`
It is now bundled with `sphinx` as `sphinx.ext.napoleon`.
So, we need to remove this dependency from both:
- `pyproject.toml`
- `docs/requirements.txt`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/master/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14
15 import os
16 import sys
17 sys.path.insert(0, os.path.abspath('..'))
18
19
20 # -- Project information -----------------------------------------------------
21
22 def _get_project_meta():
23 import tomlkit
24
25 with open('../pyproject.toml') as pyproject:
26 contents = pyproject.read()
27
28 return tomlkit.parse(contents)['tool']['poetry']
29
30
31 pkg_meta = _get_project_meta()
32 project = pkg_meta['name']
33 copyright = '2018, wemake.services'
34 author = 'wemake.services'
35
36 # The short X.Y version
37 version = pkg_meta['version']
38 # The full version, including alpha/beta/rc tags
39 release = version
40
41
42 # -- General configuration ---------------------------------------------------
43
44 # If your documentation needs a minimal Sphinx version, state it here.
45 #
46 # needs_sphinx = '1.0'
47
48 # Add any Sphinx extension module names here, as strings. They can be
49 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
50 # ones.
51 extensions = [
52 'sphinx.ext.autodoc',
53 'sphinx.ext.doctest',
54 'sphinx.ext.todo',
55 'sphinx.ext.coverage',
56 'sphinx.ext.viewcode',
57 'sphinx.ext.autosummary',
58
59 # Used to include .md files:
60 'm2r',
61
62 # Used to write python docstrings in a readable way:
63 'sphinxcontrib.napoleon',
64
65 # Used to insert typehints into the final docs:
66 'sphinx_autodoc_typehints',
67
68 # Used to embed values from the source code into the docs:
69 'added_value',
70 ]
71
72 autoclass_content = 'class'
73 autodoc_member_order = 'bysource'
74
75 autodoc_mock_imports = [
76 'attr',
77 ]
78
79 autodoc_member_order = 'bysource'
80 autodoc_default_flags = {
81 'members': '',
82 'undoc-members': 'code,error_template',
83 'exclude-members': '__dict__,__weakref__',
84 }
85
86 # Add any paths that contain templates here, relative to this directory.
87 templates_path = ['_templates']
88
89 # The suffix(es) of source filenames.
90 # You can specify multiple suffix as a list of string:
91
92 source_suffix = ['.rst', '.md']
93
94 # The master toctree document.
95 master_doc = 'index'
96
97 # The language for content autogenerated by Sphinx. Refer to documentation
98 # for a list of supported languages.
99 #
100 # This is also used if you do content translation via gettext catalogs.
101 # Usually you set "language" from the command line for these cases.
102 language = None
103
104 # List of patterns, relative to source directory, that match files and
105 # directories to ignore when looking for source files.
106 # This pattern also affects html_static_path and html_extra_path .
107 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
108
109 # The name of the Pygments (syntax highlighting) style to use.
110 pygments_style = 'sphinx'
111
112 add_module_names = False
113
114 autodoc_default_options = {
115 'show-inheritance': True,
116 }
117
118
119 # -- Options for HTML output -------------------------------------------------
120
121 # The theme to use for HTML and HTML Help pages. See the documentation for
122 # a list of builtin themes.
123 #
124 html_theme = 'alabaster'
125
126 # Theme options are theme-specific and customize the look and feel of a theme
127 # further. For a list of options available for each theme, see the
128 # documentation.
129 html_theme_options = {
130 'sidebar_collapse': False,
131 'show_powered_by': False,
132 }
133
134 # Add any paths that contain custom static files (such as style sheets) here,
135 # relative to this directory. They are copied after the builtin static files,
136 # so a file named "default.css" will overwrite the builtin "default.css".
137 html_static_path = ['_static']
138
139 # Custom sidebar templates, must be a dictionary that maps document names
140 # to template names.
141 #
142 # This is required for the alabaster theme
143 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
144 html_sidebars = {
145 '**': [
146 'about.html',
147 'navigation.html',
148 'moreinfo.html',
149 'github.html',
150 'searchbox.html',
151 ]
152 }
153
154
155 # -- Options for HTMLHelp output ---------------------------------------------
156
157 # Output file base name for HTML help builder.
158 htmlhelp_basename = 'wemake-python-styleguidedoc'
159
160
161 # -- Options for LaTeX output ------------------------------------------------
162
163 latex_elements = {
164 # The paper size ('letterpaper' or 'a4paper').
165 #
166 # 'papersize': 'letterpaper',
167
168 # The font size ('10pt', '11pt' or '12pt').
169 #
170 # 'pointsize': '10pt',
171
172 # Additional stuff for the LaTeX preamble.
173 #
174 # 'preamble': '',
175
176 # Latex figure (float) alignment
177 #
178 # 'figure_align': 'htbp',
179 }
180
181 # Grouping the document tree into LaTeX files. List of tuples
182 # (source start file, target name, title,
183 # author, documentclass [howto, manual, or own class]).
184 latex_documents = [
185 (
186 master_doc,
187 'wemake-python-styleguide.tex',
188 'wemake-python-styleguide Documentation',
189 'wemake.services',
190 'manual',
191 ),
192 ]
193
194
195 # -- Options for manual page output ------------------------------------------
196
197 # One entry per manual page. List of tuples
198 # (source start file, name, description, authors, manual section).
199 man_pages = [
200 (
201 master_doc,
202 'wemake-python-styleguide',
203 'wemake-python-styleguide Documentation',
204 [author],
205 1,
206 )
207 ]
208
209
210 # -- Options for Texinfo output ----------------------------------------------
211
212 # Grouping the document tree into Texinfo files. List of tuples
213 # (source start file, target name, title, author,
214 # dir menu entry, description, category)
215 texinfo_documents = [
216 (
217 master_doc,
218 'wemake-python-styleguide',
219 'wemake-python-styleguide Documentation',
220 author,
221 'wemake-python-styleguide',
222 'One line description of project.',
223 'Miscellaneous',
224 ),
225 ]
226
227
228 # -- Extension configuration -------------------------------------------------
229
230 napoleon_numpy_docstring = False
231
232 # -- Options for todo extension ----------------------------------------------
233
234 # If true, `todo` and `todoList` produce output, else they produce nothing.
235 todo_include_todos = True
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,13 +55,11 @@
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
+ 'sphinx.ext.napoleon',
# Used to include .md files:
'm2r',
- # Used to write python docstrings in a readable way:
- 'sphinxcontrib.napoleon',
-
# Used to insert typehints into the final docs:
'sphinx_autodoc_typehints',
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -55,13 +55,11 @@\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n+ 'sphinx.ext.napoleon',\n \n # Used to include .md files:\n 'm2r',\n \n- # Used to write python docstrings in a readable way:\n- 'sphinxcontrib.napoleon',\n-\n # Used to insert typehints into the final docs:\n 'sphinx_autodoc_typehints',\n", "issue": "Replace `sphinxcontrib-napoleon`\nIt is now bundled with `sphinx` as `sphinx.ext.napoleon`.\r\n\r\nSo, we need to remove this dependency from both:\r\n- `pyproject.toml`\r\n- `docs/requirements.txt`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\ndef _get_project_meta():\n import tomlkit\n\n with open('../pyproject.toml') as pyproject:\n contents = pyproject.read()\n\n return tomlkit.parse(contents)['tool']['poetry']\n\n\npkg_meta = _get_project_meta()\nproject = pkg_meta['name']\ncopyright = '2018, wemake.services'\nauthor = 'wemake.services'\n\n# The short X.Y version\nversion = pkg_meta['version']\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n\n # Used to include .md files:\n 'm2r',\n\n # Used to write python docstrings in a readable way:\n 'sphinxcontrib.napoleon',\n\n # Used to insert typehints into the final docs:\n 'sphinx_autodoc_typehints',\n\n # Used to embed values from the source code into the docs:\n 'added_value',\n]\n\nautoclass_content = 'class'\nautodoc_member_order = 'bysource'\n\nautodoc_mock_imports = [\n 'attr',\n]\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = {\n 'members': '',\n 'undoc-members': 'code,error_template',\n 'exclude-members': '__dict__,__weakref__',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\nadd_module_names = False\n\nautodoc_default_options = {\n 'show-inheritance': True,\n}\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'sidebar_collapse': False,\n 'show_powered_by': False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'moreinfo.html',\n 'github.html',\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'wemake-python-styleguidedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'wemake-python-styleguide.tex',\n 'wemake-python-styleguide Documentation',\n 'wemake.services',\n 'manual',\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n [author],\n 1,\n )\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n author,\n 'wemake-python-styleguide',\n 'One line description of project.',\n 'Miscellaneous',\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\nnapoleon_numpy_docstring = False\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\ndef _get_project_meta():\n import tomlkit\n\n with open('../pyproject.toml') as pyproject:\n contents = pyproject.read()\n\n return tomlkit.parse(contents)['tool']['poetry']\n\n\npkg_meta = _get_project_meta()\nproject = pkg_meta['name']\ncopyright = '2018, wemake.services'\nauthor = 'wemake.services'\n\n# The short X.Y version\nversion = pkg_meta['version']\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.napoleon',\n\n # Used to include .md files:\n 'm2r',\n\n # Used to insert typehints into the final docs:\n 'sphinx_autodoc_typehints',\n\n # Used to embed values from the source code into the docs:\n 'added_value',\n]\n\nautoclass_content = 'class'\nautodoc_member_order = 'bysource'\n\nautodoc_mock_imports = [\n 'attr',\n]\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = {\n 'members': '',\n 'undoc-members': 'code,error_template',\n 'exclude-members': '__dict__,__weakref__',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\nadd_module_names = False\n\nautodoc_default_options = {\n 'show-inheritance': True,\n}\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'sidebar_collapse': False,\n 'show_powered_by': False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'moreinfo.html',\n 'github.html',\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'wemake-python-styleguidedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'wemake-python-styleguide.tex',\n 'wemake-python-styleguide Documentation',\n 'wemake.services',\n 'manual',\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n [author],\n 1,\n )\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n author,\n 'wemake-python-styleguide',\n 'One line description of project.',\n 'Miscellaneous',\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\nnapoleon_numpy_docstring = False\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}]} |
gh_patches_debug_1461 | rasdani/github-patches | git_diff | LibraryOfCongress__concordia-1208 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create URL path for staff page
We will create a simple page for staff. Need to create the URL path.
ex - `crowd.loc.gov/for-staff`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `concordia/urls.py`
Content:
```
1 from django.conf import settings
2 from django.conf.urls import url
3 from django.contrib import admin
4 from django.http import Http404, HttpResponseForbidden
5 from django.urls import include, path
6 from django.urls.converters import register_converter
7 from django.views.defaults import page_not_found, permission_denied, server_error
8 from django.views.generic import RedirectView
9
10 from exporter import views as exporter_views
11
12 from . import converters, views
13
14 register_converter(converters.UnicodeSlugConverter, "uslug")
15 register_converter(converters.ItemIdConverter, "item_id")
16
17 tx_urlpatterns = (
18 [
19 path("", views.CampaignListView.as_view(), name="campaign-list"),
20 path(
21 "<uslug:slug>/", views.CampaignDetailView.as_view(), name="campaign-detail"
22 ),
23 path(
24 "<uslug:campaign_slug>/export/csv/",
25 exporter_views.ExportCampaignToCSV.as_view(),
26 name="campaign-export-csv",
27 ),
28 path(
29 "<uslug:campaign_slug>/export/bagit/",
30 exporter_views.ExportCampaignToBagIt.as_view(),
31 name="campaign-export-bagit",
32 ),
33 path(
34 "<uslug:campaign_slug>/<uslug:project_slug>/export/bagit/",
35 exporter_views.ExportProjectToBagIt.as_view(),
36 name="project-export-bagit",
37 ),
38 path(
39 (
40 "<uslug:campaign_slug>/<uslug:project_slug>/"
41 "<item_id:item_id>/export/bagit/"
42 ),
43 exporter_views.ExportItemToBagIt.as_view(),
44 name="item-export-bagit",
45 ),
46 path(
47 "<uslug:campaign_slug>/report/",
48 views.ReportCampaignView.as_view(),
49 name="campaign-report",
50 ),
51 path(
52 (
53 "<uslug:campaign_slug>/<uslug:project_slug>/"
54 "<item_id:item_id>/<uslug:slug>/"
55 ),
56 views.AssetDetailView.as_view(),
57 name="asset-detail",
58 ),
59 # n.b. this must be above project-detail to avoid being seen as a project slug:
60 path(
61 "<uslug:campaign_slug>/next-transcribable-asset/",
62 views.redirect_to_next_transcribable_campaign_asset,
63 name="redirect-to-next-transcribable-campaign-asset",
64 ),
65 path(
66 "<uslug:campaign_slug>/next-reviewable-asset/",
67 views.redirect_to_next_reviewable_campaign_asset,
68 name="redirect-to-next-reviewable-campaign-asset",
69 ),
70 path(
71 "<uslug:campaign_slug>/<uslug:slug>/",
72 views.ProjectDetailView.as_view(),
73 name="project-detail",
74 ),
75 path(
76 "<uslug:campaign_slug>/<uslug:project_slug>/<item_id:item_id>/",
77 views.ItemDetailView.as_view(),
78 name="item-detail",
79 ),
80 ],
81 "transcriptions",
82 )
83
84 urlpatterns = [
85 path("", views.HomeView.as_view(), name="homepage"),
86 path("healthz", views.healthz, name="health-check"),
87 path("about/", views.simple_page, name="about"),
88 path("help-center/", views.simple_page, name="help-center"),
89 path("help-center/welcome-guide/", views.simple_page, name="welcome-guide"),
90 path("help-center/how-to-transcribe/", views.simple_page, name="how-to-transcribe"),
91 path("help-center/how-to-review/", views.simple_page, name="how-to-review"),
92 path("help-center/how-to-tag/", views.simple_page, name="how-to-tag"),
93 path(
94 "help-center/welcome-guide-esp/",
95 views.simple_page,
96 name="welcome-guide-spanish",
97 ),
98 path(
99 "help-center/how-to-transcribe-esp/",
100 views.simple_page,
101 name="how-to-transcribe-spanish",
102 ),
103 path(
104 "help-center/how-to-review-esp/",
105 views.simple_page,
106 name="how-to-review-spanish",
107 ),
108 path("help-center/how-to-tag-esp/", views.simple_page, name="how-to-tag-spanish"),
109 path("for-educators/", views.simple_page, name="for-educators"),
110 path("resources/", views.simple_page, name="resources"),
111 path(
112 "latest/",
113 RedirectView.as_view(pattern_name="about", permanent=True, query_string=True),
114 ),
115 path("questions/", views.simple_page, name="questions"),
116 path("contact/", views.ContactUsView.as_view(), name="contact"),
117 path("act/", views.action_app, name="action-app"),
118 path(
119 "campaigns-topics/",
120 views.CampaignTopicListView.as_view(),
121 name="campaign-topic-list",
122 ),
123 path("topics/", views.TopicListView.as_view(), name="topic-list"),
124 path("topics/<uslug:slug>/", views.TopicDetailView.as_view(), name="topic-detail"),
125 path(
126 "topics/<uslug:topic_slug>/next-transcribable-asset/",
127 views.redirect_to_next_transcribable_topic_asset,
128 name="redirect-to-next-transcribable-topic-asset",
129 ),
130 path(
131 "topics/<uslug:topic_slug>/next-reviewable-asset/",
132 views.redirect_to_next_reviewable_topic_asset,
133 name="redirect-to-next-reviewable-topic-asset",
134 ),
135 path(
136 "next-transcribable-asset/",
137 views.redirect_to_next_transcribable_asset,
138 name="redirect-to-next-transcribable-asset",
139 ),
140 path(
141 "next-reviewable-asset/",
142 views.redirect_to_next_reviewable_asset,
143 name="redirect-to-next-reviewable-asset",
144 ),
145 path("campaigns/", include(tx_urlpatterns, namespace="transcriptions")),
146 path("reserve-asset/<int:asset_pk>/", views.reserve_asset, name="reserve-asset"),
147 path(
148 "assets/<int:asset_pk>/transcriptions/save/",
149 views.save_transcription,
150 name="save-transcription",
151 ),
152 path(
153 "transcriptions/<int:pk>/submit/",
154 views.submit_transcription,
155 name="submit-transcription",
156 ),
157 path(
158 "transcriptions/<int:pk>/review/",
159 views.review_transcription,
160 name="review-transcription",
161 ),
162 path("assets/<int:asset_pk>/tags/submit/", views.submit_tags, name="submit-tags"),
163 path("assets/", views.AssetListView.as_view(), name="asset-list"),
164 path(
165 "transcribe/", views.TranscribeListView.as_view(), name="transcribe-asset-list"
166 ),
167 path("review/", views.ReviewListView.as_view(), name="review-asset-list"),
168 path("account/ajax-status/", views.ajax_session_status, name="ajax-session-status"),
169 path("account/ajax-messages/", views.ajax_messages, name="ajax-messages"),
170 path(
171 "account/register/",
172 views.ConcordiaRegistrationView.as_view(),
173 name="registration_register",
174 ),
175 path(
176 "account/login/", views.ConcordiaLoginView.as_view(), name="registration_login"
177 ),
178 path("account/profile/", views.AccountProfileView.as_view(), name="user-profile"),
179 path(
180 "account/password_reset/",
181 views.ConcordiaPasswordResetRequestView.as_view(),
182 name="password_reset",
183 ),
184 path(
185 "account/reset/<uidb64>/<token>/",
186 views.ConcordiaPasswordResetConfirmView.as_view(),
187 name="password_reset_confirm",
188 ),
189 path("account/", include("django_registration.backends.activation.urls")),
190 path("account/", include("django.contrib.auth.urls")),
191 path(
192 ".well-known/change-password", # https://wicg.github.io/change-password-url/
193 RedirectView.as_view(pattern_name="password_change"),
194 ),
195 path("captcha/ajax/", views.ajax_captcha, name="ajax-captcha"),
196 path("captcha/", include("captcha.urls")),
197 path("admin/", admin.site.urls),
198 # Internal support assists:
199 path("error/500/", server_error),
200 path("error/404/", page_not_found, {"exception": Http404()}),
201 path("error/429/", views.ratelimit_view),
202 path("error/403/", permission_denied, {"exception": HttpResponseForbidden()}),
203 url("", include("django_prometheus_metrics.urls")),
204 path("robots.txt", include("robots.urls")),
205 ]
206
207 if settings.DEBUG:
208 import debug_toolbar
209 from django.conf.urls.static import static
210
211 urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
212
213 urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/concordia/urls.py b/concordia/urls.py
--- a/concordia/urls.py
+++ b/concordia/urls.py
@@ -107,6 +107,7 @@
),
path("help-center/how-to-tag-esp/", views.simple_page, name="how-to-tag-spanish"),
path("for-educators/", views.simple_page, name="for-educators"),
+ path("for-staff/", views.simple_page, name="for-staff"),
path("resources/", views.simple_page, name="resources"),
path(
"latest/",
| {"golden_diff": "diff --git a/concordia/urls.py b/concordia/urls.py\n--- a/concordia/urls.py\n+++ b/concordia/urls.py\n@@ -107,6 +107,7 @@\n ),\n path(\"help-center/how-to-tag-esp/\", views.simple_page, name=\"how-to-tag-spanish\"),\n path(\"for-educators/\", views.simple_page, name=\"for-educators\"),\n+ path(\"for-staff/\", views.simple_page, name=\"for-staff\"),\n path(\"resources/\", views.simple_page, name=\"resources\"),\n path(\n \"latest/\",\n", "issue": "Create URL path for staff page\nWe will create a simple page for staff. Need to create the URL path. \r\n\r\nex - `crowd.loc.gov/for-staff`\n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.urls import include, path\nfrom django.urls.converters import register_converter\nfrom django.views.defaults import page_not_found, permission_denied, server_error\nfrom django.views.generic import RedirectView\n\nfrom exporter import views as exporter_views\n\nfrom . import converters, views\n\nregister_converter(converters.UnicodeSlugConverter, \"uslug\")\nregister_converter(converters.ItemIdConverter, \"item_id\")\n\ntx_urlpatterns = (\n [\n path(\"\", views.CampaignListView.as_view(), name=\"campaign-list\"),\n path(\n \"<uslug:slug>/\", views.CampaignDetailView.as_view(), name=\"campaign-detail\"\n ),\n path(\n \"<uslug:campaign_slug>/export/csv/\",\n exporter_views.ExportCampaignToCSV.as_view(),\n name=\"campaign-export-csv\",\n ),\n path(\n \"<uslug:campaign_slug>/export/bagit/\",\n exporter_views.ExportCampaignToBagIt.as_view(),\n name=\"campaign-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/export/bagit/\",\n exporter_views.ExportProjectToBagIt.as_view(),\n name=\"project-export-bagit\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/export/bagit/\"\n ),\n exporter_views.ExportItemToBagIt.as_view(),\n name=\"item-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/report/\",\n views.ReportCampaignView.as_view(),\n name=\"campaign-report\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/<uslug:slug>/\"\n ),\n views.AssetDetailView.as_view(),\n name=\"asset-detail\",\n ),\n # n.b. this must be above project-detail to avoid being seen as a project slug:\n path(\n \"<uslug:campaign_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_campaign_asset,\n name=\"redirect-to-next-transcribable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_campaign_asset,\n name=\"redirect-to-next-reviewable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:slug>/\",\n views.ProjectDetailView.as_view(),\n name=\"project-detail\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/<item_id:item_id>/\",\n views.ItemDetailView.as_view(),\n name=\"item-detail\",\n ),\n ],\n \"transcriptions\",\n)\n\nurlpatterns = [\n path(\"\", views.HomeView.as_view(), name=\"homepage\"),\n path(\"healthz\", views.healthz, name=\"health-check\"),\n path(\"about/\", views.simple_page, name=\"about\"),\n path(\"help-center/\", views.simple_page, name=\"help-center\"),\n path(\"help-center/welcome-guide/\", views.simple_page, name=\"welcome-guide\"),\n path(\"help-center/how-to-transcribe/\", views.simple_page, name=\"how-to-transcribe\"),\n path(\"help-center/how-to-review/\", views.simple_page, name=\"how-to-review\"),\n path(\"help-center/how-to-tag/\", views.simple_page, name=\"how-to-tag\"),\n path(\n \"help-center/welcome-guide-esp/\",\n views.simple_page,\n name=\"welcome-guide-spanish\",\n ),\n path(\n \"help-center/how-to-transcribe-esp/\",\n views.simple_page,\n name=\"how-to-transcribe-spanish\",\n ),\n path(\n \"help-center/how-to-review-esp/\",\n views.simple_page,\n name=\"how-to-review-spanish\",\n ),\n path(\"help-center/how-to-tag-esp/\", views.simple_page, name=\"how-to-tag-spanish\"),\n path(\"for-educators/\", views.simple_page, name=\"for-educators\"),\n path(\"resources/\", views.simple_page, name=\"resources\"),\n path(\n \"latest/\",\n RedirectView.as_view(pattern_name=\"about\", permanent=True, query_string=True),\n ),\n path(\"questions/\", views.simple_page, name=\"questions\"),\n path(\"contact/\", views.ContactUsView.as_view(), name=\"contact\"),\n path(\"act/\", views.action_app, name=\"action-app\"),\n path(\n \"campaigns-topics/\",\n views.CampaignTopicListView.as_view(),\n name=\"campaign-topic-list\",\n ),\n path(\"topics/\", views.TopicListView.as_view(), name=\"topic-list\"),\n path(\"topics/<uslug:slug>/\", views.TopicDetailView.as_view(), name=\"topic-detail\"),\n path(\n \"topics/<uslug:topic_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_topic_asset,\n name=\"redirect-to-next-transcribable-topic-asset\",\n ),\n path(\n \"topics/<uslug:topic_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_topic_asset,\n name=\"redirect-to-next-reviewable-topic-asset\",\n ),\n path(\n \"next-transcribable-asset/\",\n views.redirect_to_next_transcribable_asset,\n name=\"redirect-to-next-transcribable-asset\",\n ),\n path(\n \"next-reviewable-asset/\",\n views.redirect_to_next_reviewable_asset,\n name=\"redirect-to-next-reviewable-asset\",\n ),\n path(\"campaigns/\", include(tx_urlpatterns, namespace=\"transcriptions\")),\n path(\"reserve-asset/<int:asset_pk>/\", views.reserve_asset, name=\"reserve-asset\"),\n path(\n \"assets/<int:asset_pk>/transcriptions/save/\",\n views.save_transcription,\n name=\"save-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/submit/\",\n views.submit_transcription,\n name=\"submit-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/review/\",\n views.review_transcription,\n name=\"review-transcription\",\n ),\n path(\"assets/<int:asset_pk>/tags/submit/\", views.submit_tags, name=\"submit-tags\"),\n path(\"assets/\", views.AssetListView.as_view(), name=\"asset-list\"),\n path(\n \"transcribe/\", views.TranscribeListView.as_view(), name=\"transcribe-asset-list\"\n ),\n path(\"review/\", views.ReviewListView.as_view(), name=\"review-asset-list\"),\n path(\"account/ajax-status/\", views.ajax_session_status, name=\"ajax-session-status\"),\n path(\"account/ajax-messages/\", views.ajax_messages, name=\"ajax-messages\"),\n path(\n \"account/register/\",\n views.ConcordiaRegistrationView.as_view(),\n name=\"registration_register\",\n ),\n path(\n \"account/login/\", views.ConcordiaLoginView.as_view(), name=\"registration_login\"\n ),\n path(\"account/profile/\", views.AccountProfileView.as_view(), name=\"user-profile\"),\n path(\n \"account/password_reset/\",\n views.ConcordiaPasswordResetRequestView.as_view(),\n name=\"password_reset\",\n ),\n path(\n \"account/reset/<uidb64>/<token>/\",\n views.ConcordiaPasswordResetConfirmView.as_view(),\n name=\"password_reset_confirm\",\n ),\n path(\"account/\", include(\"django_registration.backends.activation.urls\")),\n path(\"account/\", include(\"django.contrib.auth.urls\")),\n path(\n \".well-known/change-password\", # https://wicg.github.io/change-password-url/\n RedirectView.as_view(pattern_name=\"password_change\"),\n ),\n path(\"captcha/ajax/\", views.ajax_captcha, name=\"ajax-captcha\"),\n path(\"captcha/\", include(\"captcha.urls\")),\n path(\"admin/\", admin.site.urls),\n # Internal support assists:\n path(\"error/500/\", server_error),\n path(\"error/404/\", page_not_found, {\"exception\": Http404()}),\n path(\"error/429/\", views.ratelimit_view),\n path(\"error/403/\", permission_denied, {\"exception\": HttpResponseForbidden()}),\n url(\"\", include(\"django_prometheus_metrics.urls\")),\n path(\"robots.txt\", include(\"robots.urls\")),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n from django.conf.urls.static import static\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "concordia/urls.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.urls import include, path\nfrom django.urls.converters import register_converter\nfrom django.views.defaults import page_not_found, permission_denied, server_error\nfrom django.views.generic import RedirectView\n\nfrom exporter import views as exporter_views\n\nfrom . import converters, views\n\nregister_converter(converters.UnicodeSlugConverter, \"uslug\")\nregister_converter(converters.ItemIdConverter, \"item_id\")\n\ntx_urlpatterns = (\n [\n path(\"\", views.CampaignListView.as_view(), name=\"campaign-list\"),\n path(\n \"<uslug:slug>/\", views.CampaignDetailView.as_view(), name=\"campaign-detail\"\n ),\n path(\n \"<uslug:campaign_slug>/export/csv/\",\n exporter_views.ExportCampaignToCSV.as_view(),\n name=\"campaign-export-csv\",\n ),\n path(\n \"<uslug:campaign_slug>/export/bagit/\",\n exporter_views.ExportCampaignToBagIt.as_view(),\n name=\"campaign-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/export/bagit/\",\n exporter_views.ExportProjectToBagIt.as_view(),\n name=\"project-export-bagit\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/export/bagit/\"\n ),\n exporter_views.ExportItemToBagIt.as_view(),\n name=\"item-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/report/\",\n views.ReportCampaignView.as_view(),\n name=\"campaign-report\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/<uslug:slug>/\"\n ),\n views.AssetDetailView.as_view(),\n name=\"asset-detail\",\n ),\n # n.b. this must be above project-detail to avoid being seen as a project slug:\n path(\n \"<uslug:campaign_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_campaign_asset,\n name=\"redirect-to-next-transcribable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_campaign_asset,\n name=\"redirect-to-next-reviewable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:slug>/\",\n views.ProjectDetailView.as_view(),\n name=\"project-detail\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/<item_id:item_id>/\",\n views.ItemDetailView.as_view(),\n name=\"item-detail\",\n ),\n ],\n \"transcriptions\",\n)\n\nurlpatterns = [\n path(\"\", views.HomeView.as_view(), name=\"homepage\"),\n path(\"healthz\", views.healthz, name=\"health-check\"),\n path(\"about/\", views.simple_page, name=\"about\"),\n path(\"help-center/\", views.simple_page, name=\"help-center\"),\n path(\"help-center/welcome-guide/\", views.simple_page, name=\"welcome-guide\"),\n path(\"help-center/how-to-transcribe/\", views.simple_page, name=\"how-to-transcribe\"),\n path(\"help-center/how-to-review/\", views.simple_page, name=\"how-to-review\"),\n path(\"help-center/how-to-tag/\", views.simple_page, name=\"how-to-tag\"),\n path(\n \"help-center/welcome-guide-esp/\",\n views.simple_page,\n name=\"welcome-guide-spanish\",\n ),\n path(\n \"help-center/how-to-transcribe-esp/\",\n views.simple_page,\n name=\"how-to-transcribe-spanish\",\n ),\n path(\n \"help-center/how-to-review-esp/\",\n views.simple_page,\n name=\"how-to-review-spanish\",\n ),\n path(\"help-center/how-to-tag-esp/\", views.simple_page, name=\"how-to-tag-spanish\"),\n path(\"for-educators/\", views.simple_page, name=\"for-educators\"),\n path(\"for-staff/\", views.simple_page, name=\"for-staff\"),\n path(\"resources/\", views.simple_page, name=\"resources\"),\n path(\n \"latest/\",\n RedirectView.as_view(pattern_name=\"about\", permanent=True, query_string=True),\n ),\n path(\"questions/\", views.simple_page, name=\"questions\"),\n path(\"contact/\", views.ContactUsView.as_view(), name=\"contact\"),\n path(\"act/\", views.action_app, name=\"action-app\"),\n path(\n \"campaigns-topics/\",\n views.CampaignTopicListView.as_view(),\n name=\"campaign-topic-list\",\n ),\n path(\"topics/\", views.TopicListView.as_view(), name=\"topic-list\"),\n path(\"topics/<uslug:slug>/\", views.TopicDetailView.as_view(), name=\"topic-detail\"),\n path(\n \"topics/<uslug:topic_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_topic_asset,\n name=\"redirect-to-next-transcribable-topic-asset\",\n ),\n path(\n \"topics/<uslug:topic_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_topic_asset,\n name=\"redirect-to-next-reviewable-topic-asset\",\n ),\n path(\n \"next-transcribable-asset/\",\n views.redirect_to_next_transcribable_asset,\n name=\"redirect-to-next-transcribable-asset\",\n ),\n path(\n \"next-reviewable-asset/\",\n views.redirect_to_next_reviewable_asset,\n name=\"redirect-to-next-reviewable-asset\",\n ),\n path(\"campaigns/\", include(tx_urlpatterns, namespace=\"transcriptions\")),\n path(\"reserve-asset/<int:asset_pk>/\", views.reserve_asset, name=\"reserve-asset\"),\n path(\n \"assets/<int:asset_pk>/transcriptions/save/\",\n views.save_transcription,\n name=\"save-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/submit/\",\n views.submit_transcription,\n name=\"submit-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/review/\",\n views.review_transcription,\n name=\"review-transcription\",\n ),\n path(\"assets/<int:asset_pk>/tags/submit/\", views.submit_tags, name=\"submit-tags\"),\n path(\"assets/\", views.AssetListView.as_view(), name=\"asset-list\"),\n path(\n \"transcribe/\", views.TranscribeListView.as_view(), name=\"transcribe-asset-list\"\n ),\n path(\"review/\", views.ReviewListView.as_view(), name=\"review-asset-list\"),\n path(\"account/ajax-status/\", views.ajax_session_status, name=\"ajax-session-status\"),\n path(\"account/ajax-messages/\", views.ajax_messages, name=\"ajax-messages\"),\n path(\n \"account/register/\",\n views.ConcordiaRegistrationView.as_view(),\n name=\"registration_register\",\n ),\n path(\n \"account/login/\", views.ConcordiaLoginView.as_view(), name=\"registration_login\"\n ),\n path(\"account/profile/\", views.AccountProfileView.as_view(), name=\"user-profile\"),\n path(\n \"account/password_reset/\",\n views.ConcordiaPasswordResetRequestView.as_view(),\n name=\"password_reset\",\n ),\n path(\n \"account/reset/<uidb64>/<token>/\",\n views.ConcordiaPasswordResetConfirmView.as_view(),\n name=\"password_reset_confirm\",\n ),\n path(\"account/\", include(\"django_registration.backends.activation.urls\")),\n path(\"account/\", include(\"django.contrib.auth.urls\")),\n path(\n \".well-known/change-password\", # https://wicg.github.io/change-password-url/\n RedirectView.as_view(pattern_name=\"password_change\"),\n ),\n path(\"captcha/ajax/\", views.ajax_captcha, name=\"ajax-captcha\"),\n path(\"captcha/\", include(\"captcha.urls\")),\n path(\"admin/\", admin.site.urls),\n # Internal support assists:\n path(\"error/500/\", server_error),\n path(\"error/404/\", page_not_found, {\"exception\": Http404()}),\n path(\"error/429/\", views.ratelimit_view),\n path(\"error/403/\", permission_denied, {\"exception\": HttpResponseForbidden()}),\n url(\"\", include(\"django_prometheus_metrics.urls\")),\n path(\"robots.txt\", include(\"robots.urls\")),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n from django.conf.urls.static import static\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "concordia/urls.py"}]} |
gh_patches_debug_1462 | rasdani/github-patches | git_diff | spack__spack-23320 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue: bzip2: python error in package.py
#23230 appeared to introduce a typo/python error in package.py
### Steps to reproduce the issue
```console
$ spack install bzip2
...
==> Installing bzip2-1.0.8-4efigg64jltb6topl5suvz4dmpvupmei
==> No binary for bzip2-1.0.8-4efigg64jltb6topl5suvz4dmpvupmei found: installing from source
==> Warning: included configuration files should be updated manually [files=/software/spack/dev-environments/gcc840/packages-gcc840.yaml, /software/spack/dev-environments/common/packages-common.yaml]
==> Using cached archive: /software/spack/git.2021.04.28/var/spack/cache/_source-cache/archive/ab/ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269.tar.gz
==> Error: NameError: name 'spec' is not defined
/software/spack/git.2021.04.28/var/spack/repos/builtin/packages/bzip2/package.py:57, in patch:
56 def patch(self):
>> 57 if spec.satisfies('+debug'):
58 for makefile in ['Makefile', 'Makefile-libbz2_so']:
59 filter_file(r'-O ', '-O0 ', makefile)
60 filter_file(r'-O2 ', '-O0 ', makefile)
...
```
### Information on your system
```console
$ spack debug report
* **Spack:** 0.16.1-2429-f5e6c32495
* **Python:** 3.6.8
* **Platform:** linux-rhel8-x86_64
* **Concretizer:** original
```
### Additional information
Does not reach point of creating spack-build-out.txt, etc
No maintainers, I believe issue was added by @scheibelp in #23230
### General information
- [X ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [X ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [X ] I have uploaded the build log and environment files
(Not applicable/none generated)
- [X ] I have searched the issues of this repo and believe this is not a duplicate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/bzip2/package.py`
Content:
```
1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 import re
7
8 from spack import *
9
10
11 class Bzip2(Package, SourcewarePackage):
12 """bzip2 is a freely available, patent free high-quality data
13 compressor. It typically compresses files to within 10% to 15%
14 of the best available techniques (the PPM family of statistical
15 compressors), whilst being around twice as fast at compression
16 and six times faster at decompression."""
17
18 homepage = "https://sourceware.org/bzip2/"
19 sourceware_mirror_path = "bzip2/bzip2-1.0.8.tar.gz"
20
21 executables = [r'^bzip2$']
22
23 version('1.0.8', sha256='ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269')
24 version('1.0.7', sha256='e768a87c5b1a79511499beb41500bcc4caf203726fff46a6f5f9ad27fe08ab2b')
25 version('1.0.6', sha256='a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd')
26
27 variant('shared', default=True, description='Enables the build of shared libraries.')
28 variant('pic', default=False, description='Build static libraries with PIC')
29 variant('debug', default=False, description='Enable debug symbols and disable optimization')
30
31 depends_on('diffutils', type='build')
32
33 @classmethod
34 def determine_version(cls, exe):
35 output = Executable(exe)('--help', output=str, error=str)
36 match = re.search(r'bzip2, a block-sorting file compressor.'
37 ' Version ([^,]+)', output)
38 return match.group(1) if match else None
39
40 # override default implementation
41 @property
42 def libs(self):
43 shared = '+shared' in self.spec
44 return find_libraries(
45 'libbz2', root=self.prefix, shared=shared, recursive=True
46 )
47
48 def flag_handler(self, name, flags):
49 if name == 'cflags':
50 if '+pic' in self.spec:
51 flags.append(self.compiler.cc_pic_flag)
52 if '+debug' in self.spec:
53 flags.append('-g')
54 return(flags, None, None)
55
56 def patch(self):
57 if self.spec.satisfies('+debug'):
58 for makefile in ['Makefile', 'Makefile-libbz2_so']:
59 filter_file(r'-O ', '-O0 ', makefile)
60 filter_file(r'-O2 ', '-O0 ', makefile)
61
62 # bzip2 comes with two separate Makefiles for static and dynamic builds
63 # Tell both to use Spack's compiler wrapper instead of GCC
64 filter_file(r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile')
65 filter_file(
66 r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile-libbz2_so'
67 )
68
69 # The Makefiles use GCC flags that are incompatible with PGI
70 if self.spec.satisfies('%pgi') or self.spec.satisfies('%nvhpc@:20.11'):
71 filter_file('-Wall -Winline', '-Minform=inform', 'Makefile')
72 filter_file('-Wall -Winline', '-Minform=inform',
73 'Makefile-libbz2_so')
74
75 # Patch the link line to use RPATHs on macOS
76 if 'darwin' in self.spec.architecture:
77 v = self.spec.version
78 v1, v2, v3 = (v.up_to(i) for i in (1, 2, 3))
79
80 kwargs = {'ignore_absent': False, 'backup': False, 'string': True}
81
82 mf = FileFilter('Makefile-libbz2_so')
83 mf.filter('$(CC) -shared -Wl,-soname -Wl,libbz2.so.{0} -o libbz2.so.{1} $(OBJS)' # noqa
84 .format(v2, v3),
85 '$(CC) -dynamiclib -Wl,-install_name -Wl,@rpath/libbz2.{0}.dylib -current_version {1} -compatibility_version {2} -o libbz2.{3}.dylib $(OBJS)' # noqa
86 .format(v1, v2, v3, v3),
87 **kwargs)
88
89 mf.filter(
90 '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.so.{0}'.format(v3), # noqa
91 '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.{0}.dylib'
92 .format(v3), **kwargs)
93 mf.filter(
94 'rm -f libbz2.so.{0}'.format(v2),
95 'rm -f libbz2.{0}.dylib'.format(v2), **kwargs)
96 mf.filter(
97 'ln -s libbz2.so.{0} libbz2.so.{1}'.format(v3, v2),
98 'ln -s libbz2.{0}.dylib libbz2.{1}.dylib'.format(v3, v2),
99 **kwargs)
100
101 def install(self, spec, prefix):
102 # Build the dynamic library first
103 if '+shared' in spec:
104 make('-f', 'Makefile-libbz2_so')
105
106 # Build the static library and everything else
107 make()
108 make('install', 'PREFIX={0}'.format(prefix))
109
110 if '+shared' in spec:
111 install('bzip2-shared', join_path(prefix.bin, 'bzip2'))
112
113 v1, v2, v3 = (self.spec.version.up_to(i) for i in (1, 2, 3))
114 if 'darwin' in self.spec.architecture:
115 lib = 'libbz2.dylib'
116 lib1, lib2, lib3 = ('libbz2.{0}.dylib'.format(v)
117 for v in (v1, v2, v3))
118 else:
119 lib = 'libbz2.so'
120 lib1, lib2, lib3 = ('libbz2.so.{0}'.format(v)
121 for v in (v1, v2, v3))
122
123 install(lib3, join_path(prefix.lib, lib3))
124 with working_dir(prefix.lib):
125 for libname in (lib, lib1, lib2):
126 symlink(lib3, libname)
127
128 with working_dir(prefix.bin):
129 force_remove('bunzip2', 'bzcat')
130 symlink('bzip2', 'bunzip2')
131 symlink('bzip2', 'bzcat')
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/bzip2/package.py b/var/spack/repos/builtin/packages/bzip2/package.py
--- a/var/spack/repos/builtin/packages/bzip2/package.py
+++ b/var/spack/repos/builtin/packages/bzip2/package.py
@@ -54,7 +54,7 @@
return(flags, None, None)
def patch(self):
- if spec.satisfies('+debug'):
+ if self.spec.satisfies('+debug'):
for makefile in ['Makefile', 'Makefile-libbz2_so']:
filter_file(r'-O ', '-O0 ', makefile)
filter_file(r'-O2 ', '-O0 ', makefile)
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/bzip2/package.py b/var/spack/repos/builtin/packages/bzip2/package.py\n--- a/var/spack/repos/builtin/packages/bzip2/package.py\n+++ b/var/spack/repos/builtin/packages/bzip2/package.py\n@@ -54,7 +54,7 @@\n return(flags, None, None)\n \n def patch(self):\n- if spec.satisfies('+debug'):\n+ if self.spec.satisfies('+debug'):\n for makefile in ['Makefile', 'Makefile-libbz2_so']:\n filter_file(r'-O ', '-O0 ', makefile)\n filter_file(r'-O2 ', '-O0 ', makefile)\n", "issue": "Installation issue: bzip2: python error in package.py\n#23230 appeared to introduce a typo/python error in package.py\r\n\r\n### Steps to reproduce the issue\r\n\r\n```console\r\n$ spack install bzip2\r\n...\r\n==> Installing bzip2-1.0.8-4efigg64jltb6topl5suvz4dmpvupmei\r\n==> No binary for bzip2-1.0.8-4efigg64jltb6topl5suvz4dmpvupmei found: installing from source\r\n==> Warning: included configuration files should be updated manually [files=/software/spack/dev-environments/gcc840/packages-gcc840.yaml, /software/spack/dev-environments/common/packages-common.yaml]\r\n==> Using cached archive: /software/spack/git.2021.04.28/var/spack/cache/_source-cache/archive/ab/ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269.tar.gz\r\n==> Error: NameError: name 'spec' is not defined\r\n\r\n/software/spack/git.2021.04.28/var/spack/repos/builtin/packages/bzip2/package.py:57, in patch:\r\n 56 def patch(self):\r\n >> 57 if spec.satisfies('+debug'):\r\n 58 for makefile in ['Makefile', 'Makefile-libbz2_so']:\r\n 59 filter_file(r'-O ', '-O0 ', makefile)\r\n 60 filter_file(r'-O2 ', '-O0 ', makefile)\r\n...\r\n```\r\n\r\n### Information on your system\r\n```console\r\n$ spack debug report\r\n* **Spack:** 0.16.1-2429-f5e6c32495\r\n* **Python:** 3.6.8\r\n* **Platform:** linux-rhel8-x86_64\r\n* **Concretizer:** original\r\n```\r\n\r\n### Additional information\r\nDoes not reach point of creating spack-build-out.txt, etc\r\n\r\nNo maintainers, I believe issue was added by @scheibelp in #23230\r\n\r\n\r\n### General information\r\n\r\n\r\n- [X ] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [X ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers\r\n- [X ] I have uploaded the build log and environment files \r\n(Not applicable/none generated)\r\n- [X ] I have searched the issues of this repo and believe this is not a duplicate\r\n\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport re\n\nfrom spack import *\n\n\nclass Bzip2(Package, SourcewarePackage):\n \"\"\"bzip2 is a freely available, patent free high-quality data\n compressor. It typically compresses files to within 10% to 15%\n of the best available techniques (the PPM family of statistical\n compressors), whilst being around twice as fast at compression\n and six times faster at decompression.\"\"\"\n\n homepage = \"https://sourceware.org/bzip2/\"\n sourceware_mirror_path = \"bzip2/bzip2-1.0.8.tar.gz\"\n\n executables = [r'^bzip2$']\n\n version('1.0.8', sha256='ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269')\n version('1.0.7', sha256='e768a87c5b1a79511499beb41500bcc4caf203726fff46a6f5f9ad27fe08ab2b')\n version('1.0.6', sha256='a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd')\n\n variant('shared', default=True, description='Enables the build of shared libraries.')\n variant('pic', default=False, description='Build static libraries with PIC')\n variant('debug', default=False, description='Enable debug symbols and disable optimization')\n\n depends_on('diffutils', type='build')\n\n @classmethod\n def determine_version(cls, exe):\n output = Executable(exe)('--help', output=str, error=str)\n match = re.search(r'bzip2, a block-sorting file compressor.'\n ' Version ([^,]+)', output)\n return match.group(1) if match else None\n\n # override default implementation\n @property\n def libs(self):\n shared = '+shared' in self.spec\n return find_libraries(\n 'libbz2', root=self.prefix, shared=shared, recursive=True\n )\n\n def flag_handler(self, name, flags):\n if name == 'cflags':\n if '+pic' in self.spec:\n flags.append(self.compiler.cc_pic_flag)\n if '+debug' in self.spec:\n flags.append('-g')\n return(flags, None, None)\n\n def patch(self):\n if self.spec.satisfies('+debug'):\n for makefile in ['Makefile', 'Makefile-libbz2_so']:\n filter_file(r'-O ', '-O0 ', makefile)\n filter_file(r'-O2 ', '-O0 ', makefile)\n\n # bzip2 comes with two separate Makefiles for static and dynamic builds\n # Tell both to use Spack's compiler wrapper instead of GCC\n filter_file(r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile')\n filter_file(\n r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile-libbz2_so'\n )\n\n # The Makefiles use GCC flags that are incompatible with PGI\n if self.spec.satisfies('%pgi') or self.spec.satisfies('%nvhpc@:20.11'):\n filter_file('-Wall -Winline', '-Minform=inform', 'Makefile')\n filter_file('-Wall -Winline', '-Minform=inform',\n 'Makefile-libbz2_so')\n\n # Patch the link line to use RPATHs on macOS\n if 'darwin' in self.spec.architecture:\n v = self.spec.version\n v1, v2, v3 = (v.up_to(i) for i in (1, 2, 3))\n\n kwargs = {'ignore_absent': False, 'backup': False, 'string': True}\n\n mf = FileFilter('Makefile-libbz2_so')\n mf.filter('$(CC) -shared -Wl,-soname -Wl,libbz2.so.{0} -o libbz2.so.{1} $(OBJS)' # noqa\n .format(v2, v3),\n '$(CC) -dynamiclib -Wl,-install_name -Wl,@rpath/libbz2.{0}.dylib -current_version {1} -compatibility_version {2} -o libbz2.{3}.dylib $(OBJS)' # noqa\n .format(v1, v2, v3, v3),\n **kwargs)\n\n mf.filter(\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.so.{0}'.format(v3), # noqa\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.{0}.dylib'\n .format(v3), **kwargs)\n mf.filter(\n 'rm -f libbz2.so.{0}'.format(v2),\n 'rm -f libbz2.{0}.dylib'.format(v2), **kwargs)\n mf.filter(\n 'ln -s libbz2.so.{0} libbz2.so.{1}'.format(v3, v2),\n 'ln -s libbz2.{0}.dylib libbz2.{1}.dylib'.format(v3, v2),\n **kwargs)\n\n def install(self, spec, prefix):\n # Build the dynamic library first\n if '+shared' in spec:\n make('-f', 'Makefile-libbz2_so')\n\n # Build the static library and everything else\n make()\n make('install', 'PREFIX={0}'.format(prefix))\n\n if '+shared' in spec:\n install('bzip2-shared', join_path(prefix.bin, 'bzip2'))\n\n v1, v2, v3 = (self.spec.version.up_to(i) for i in (1, 2, 3))\n if 'darwin' in self.spec.architecture:\n lib = 'libbz2.dylib'\n lib1, lib2, lib3 = ('libbz2.{0}.dylib'.format(v)\n for v in (v1, v2, v3))\n else:\n lib = 'libbz2.so'\n lib1, lib2, lib3 = ('libbz2.so.{0}'.format(v)\n for v in (v1, v2, v3))\n\n install(lib3, join_path(prefix.lib, lib3))\n with working_dir(prefix.lib):\n for libname in (lib, lib1, lib2):\n symlink(lib3, libname)\n\n with working_dir(prefix.bin):\n force_remove('bunzip2', 'bzcat')\n symlink('bzip2', 'bunzip2')\n symlink('bzip2', 'bzcat')\n", "path": "var/spack/repos/builtin/packages/bzip2/package.py"}], "after_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport re\n\nfrom spack import *\n\n\nclass Bzip2(Package, SourcewarePackage):\n \"\"\"bzip2 is a freely available, patent free high-quality data\n compressor. It typically compresses files to within 10% to 15%\n of the best available techniques (the PPM family of statistical\n compressors), whilst being around twice as fast at compression\n and six times faster at decompression.\"\"\"\n\n homepage = \"https://sourceware.org/bzip2/\"\n sourceware_mirror_path = \"bzip2/bzip2-1.0.8.tar.gz\"\n\n executables = [r'^bzip2$']\n\n version('1.0.8', sha256='ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269')\n version('1.0.7', sha256='e768a87c5b1a79511499beb41500bcc4caf203726fff46a6f5f9ad27fe08ab2b')\n version('1.0.6', sha256='a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd')\n\n variant('shared', default=True, description='Enables the build of shared libraries.')\n variant('pic', default=False, description='Build static libraries with PIC')\n variant('debug', default=False, description='Enable debug symbols and disable optimization')\n\n depends_on('diffutils', type='build')\n\n @classmethod\n def determine_version(cls, exe):\n output = Executable(exe)('--help', output=str, error=str)\n match = re.search(r'bzip2, a block-sorting file compressor.'\n ' Version ([^,]+)', output)\n return match.group(1) if match else None\n\n # override default implementation\n @property\n def libs(self):\n shared = '+shared' in self.spec\n return find_libraries(\n 'libbz2', root=self.prefix, shared=shared, recursive=True\n )\n\n def flag_handler(self, name, flags):\n if name == 'cflags':\n if '+pic' in self.spec:\n flags.append(self.compiler.cc_pic_flag)\n if '+debug' in self.spec:\n flags.append('-g')\n return(flags, None, None)\n\n def patch(self):\n if self.spec.satisfies('+debug'):\n for makefile in ['Makefile', 'Makefile-libbz2_so']:\n filter_file(r'-O ', '-O0 ', makefile)\n filter_file(r'-O2 ', '-O0 ', makefile)\n\n # bzip2 comes with two separate Makefiles for static and dynamic builds\n # Tell both to use Spack's compiler wrapper instead of GCC\n filter_file(r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile')\n filter_file(\n r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile-libbz2_so'\n )\n\n # The Makefiles use GCC flags that are incompatible with PGI\n if self.spec.satisfies('%pgi') or self.spec.satisfies('%nvhpc@:20.11'):\n filter_file('-Wall -Winline', '-Minform=inform', 'Makefile')\n filter_file('-Wall -Winline', '-Minform=inform',\n 'Makefile-libbz2_so')\n\n # Patch the link line to use RPATHs on macOS\n if 'darwin' in self.spec.architecture:\n v = self.spec.version\n v1, v2, v3 = (v.up_to(i) for i in (1, 2, 3))\n\n kwargs = {'ignore_absent': False, 'backup': False, 'string': True}\n\n mf = FileFilter('Makefile-libbz2_so')\n mf.filter('$(CC) -shared -Wl,-soname -Wl,libbz2.so.{0} -o libbz2.so.{1} $(OBJS)' # noqa\n .format(v2, v3),\n '$(CC) -dynamiclib -Wl,-install_name -Wl,@rpath/libbz2.{0}.dylib -current_version {1} -compatibility_version {2} -o libbz2.{3}.dylib $(OBJS)' # noqa\n .format(v1, v2, v3, v3),\n **kwargs)\n\n mf.filter(\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.so.{0}'.format(v3), # noqa\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.{0}.dylib'\n .format(v3), **kwargs)\n mf.filter(\n 'rm -f libbz2.so.{0}'.format(v2),\n 'rm -f libbz2.{0}.dylib'.format(v2), **kwargs)\n mf.filter(\n 'ln -s libbz2.so.{0} libbz2.so.{1}'.format(v3, v2),\n 'ln -s libbz2.{0}.dylib libbz2.{1}.dylib'.format(v3, v2),\n **kwargs)\n\n def install(self, spec, prefix):\n # Build the dynamic library first\n if '+shared' in spec:\n make('-f', 'Makefile-libbz2_so')\n\n # Build the static library and everything else\n make()\n make('install', 'PREFIX={0}'.format(prefix))\n\n if '+shared' in spec:\n install('bzip2-shared', join_path(prefix.bin, 'bzip2'))\n\n v1, v2, v3 = (self.spec.version.up_to(i) for i in (1, 2, 3))\n if 'darwin' in self.spec.architecture:\n lib = 'libbz2.dylib'\n lib1, lib2, lib3 = ('libbz2.{0}.dylib'.format(v)\n for v in (v1, v2, v3))\n else:\n lib = 'libbz2.so'\n lib1, lib2, lib3 = ('libbz2.so.{0}'.format(v)\n for v in (v1, v2, v3))\n\n install(lib3, join_path(prefix.lib, lib3))\n with working_dir(prefix.lib):\n for libname in (lib, lib1, lib2):\n symlink(lib3, libname)\n\n with working_dir(prefix.bin):\n force_remove('bunzip2', 'bzcat')\n symlink('bzip2', 'bunzip2')\n symlink('bzip2', 'bzcat')\n", "path": "var/spack/repos/builtin/packages/bzip2/package.py"}]} |
gh_patches_debug_1463 | rasdani/github-patches | git_diff | ranaroussi__yfinance-1257 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
utils.py: list index out of range
There is a strange behaviour with yfinance 0.1.94 when I try to read ticker "G7W.DU":
Sometimes it works and sometimes the utils.py gets a list index out of range error.
What I expect (and sometimes works):
```
$ python
Python 3.10.9 (main, Dec 11 2022, 14:50:46) [GCC 11.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import yfinance as yf
>>> t = "G7W.DU"
>>> ticker = yf.Ticker(t)
>>> ticker.info["regularMarketPrice"]
97
```
What I often get:
```
$ python
Python 3.10.9 (main, Dec 11 2022, 14:50:46) [GCC 11.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import yfinance as yf
>>> t = "G7W.DU"
>>> ticker = yf.Ticker(t)
>>> ticker.info["regularMarketPrice"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/ticker.py", line 147, in info
return self.get_info()
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/base.py", line 742, in get_info
self._get_info(proxy)
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/base.py", line 424, in _get_info
data = utils.get_json(ticker_url, proxy, self.session)
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/utils.py", line 205, in get_json
json_str = html.split('root.App.main =')[1].split(
IndexError: list index out of range
```
There seems to be something special with G7W.DU because I only get the error there, so far. I tried 5 tickers so far and only that one creates this error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yfinance/utils.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # yfinance - market data downloader
5 # https://github.com/ranaroussi/yfinance
6 #
7 # Copyright 2017-2019 Ran Aroussi
8 #
9 # Licensed under the Apache License, Version 2.0 (the "License");
10 # you may not use this file except in compliance with the License.
11 # You may obtain a copy of the License at
12 #
13 # http://www.apache.org/licenses/LICENSE-2.0
14 #
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS,
17 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 # See the License for the specific language governing permissions and
19 # limitations under the License.
20 #
21
22 from __future__ import print_function
23
24 import datetime as _datetime
25 import pytz as _tz
26 import requests as _requests
27 import re as _re
28 import pandas as _pd
29 import numpy as _np
30 import sys as _sys
31 import os as _os
32 import appdirs as _ad
33
34 from base64 import b64decode
35 import hashlib
36 usePycryptodome = False # slightly faster
37 # usePycryptodome = True
38 if usePycryptodome:
39 # NOTE: if decide to use 'pycryptodome', set min version to 3.6.6
40 from Crypto.Cipher import AES
41 from Crypto.Util.Padding import unpad
42 else:
43 from cryptography.hazmat.primitives import padding
44 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
45
46 from threading import Lock
47 mutex = Lock()
48
49 try:
50 import ujson as _json
51 except ImportError:
52 import json as _json
53
54
55 user_agent_headers = {
56 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
57
58
59 def is_isin(string):
60 return bool(_re.match("^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$", string))
61
62
63 def get_all_by_isin(isin, proxy=None, session=None):
64 if not(is_isin(isin)):
65 raise ValueError("Invalid ISIN number")
66
67 from .base import _BASE_URL_
68 session = session or _requests
69 url = "{}/v1/finance/search?q={}".format(_BASE_URL_, isin)
70 data = session.get(url=url, proxies=proxy, headers=user_agent_headers)
71 try:
72 data = data.json()
73 ticker = data.get('quotes', [{}])[0]
74 return {
75 'ticker': {
76 'symbol': ticker['symbol'],
77 'shortname': ticker['shortname'],
78 'longname': ticker['longname'],
79 'type': ticker['quoteType'],
80 'exchange': ticker['exchDisp'],
81 },
82 'news': data.get('news', [])
83 }
84 except Exception:
85 return {}
86
87
88 def get_ticker_by_isin(isin, proxy=None, session=None):
89 data = get_all_by_isin(isin, proxy, session)
90 return data.get('ticker', {}).get('symbol', '')
91
92
93 def get_info_by_isin(isin, proxy=None, session=None):
94 data = get_all_by_isin(isin, proxy, session)
95 return data.get('ticker', {})
96
97
98 def get_news_by_isin(isin, proxy=None, session=None):
99 data = get_all_by_isin(isin, proxy, session)
100 return data.get('news', {})
101
102
103 def empty_df(index=[]):
104 empty = _pd.DataFrame(index=index, data={
105 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,
106 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})
107 empty.index.name = 'Date'
108 return empty
109
110
111 def empty_earnings_dates_df():
112 empty = _pd.DataFrame(
113 columns=["Symbol", "Company", "Earnings Date",
114 "EPS Estimate", "Reported EPS", "Surprise(%)"])
115 return empty
116
117
118 def get_html(url, proxy=None, session=None):
119 session = session or _requests
120 html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text
121 return html
122
123
124
125 def decrypt_cryptojs_stores(data):
126 """
127 Yahoo has started encrypting data stores, this method decrypts it.
128 :param data: Python dict of the json data
129 :return: The decrypted string data in data['context']['dispatcher']['stores']
130 """
131
132 _cs = data["_cs"]
133 # Assumes _cr has format like: '{"words":[-449732894,601032952,157396918,2056341829],"sigBytes":16}';
134 _cr = _json.loads(data["_cr"])
135 _cr = b"".join(int.to_bytes(i, length=4, byteorder="big", signed=True) for i in _cr["words"])
136
137 password = hashlib.pbkdf2_hmac("sha1", _cs.encode("utf8"), _cr, 1, dklen=32).hex()
138
139 encrypted_stores = data['context']['dispatcher']['stores']
140 encrypted_stores = b64decode(encrypted_stores)
141 assert encrypted_stores[0:8] == b"Salted__"
142 salt = encrypted_stores[8:16]
143 encrypted_stores = encrypted_stores[16:]
144
145 key, iv = _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5")
146
147 if usePycryptodome:
148 cipher = AES.new(key, AES.MODE_CBC, iv=iv)
149 plaintext = cipher.decrypt(encrypted_stores)
150 plaintext = unpad(plaintext, 16, style="pkcs7")
151 else:
152 cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
153 decryptor = cipher.decryptor()
154 plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()
155 unpadder = padding.PKCS7(128).unpadder()
156 plaintext = unpadder.update(plaintext) + unpadder.finalize()
157 plaintext = plaintext.decode("utf-8")
158
159 return plaintext
160
161 def _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5") -> tuple:
162 """OpenSSL EVP Key Derivation Function
163 Args:
164 password (Union[str, bytes, bytearray]): Password to generate key from.
165 salt (Union[bytes, bytearray]): Salt to use.
166 keySize (int, optional): Output key length in bytes. Defaults to 32.
167 ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.
168 iterations (int, optional): Number of iterations to perform. Defaults to 1.
169 hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.
170 Returns:
171 key, iv: Derived key and Initialization Vector (IV) bytes.
172
173 Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3
174 OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78
175 """
176
177 assert iterations > 0, "Iterations can not be less than 1."
178
179 if isinstance(password, str):
180 password = password.encode("utf-8")
181
182 final_length = keySize + ivSize
183 key_iv = b""
184 block = None
185
186 while len(key_iv) < final_length:
187 hasher = hashlib.new(hashAlgorithm)
188 if block:
189 hasher.update(block)
190 hasher.update(password)
191 hasher.update(salt)
192 block = hasher.digest()
193 for _ in range(1, iterations):
194 block = hashlib.new(hashAlgorithm, block).digest()
195 key_iv += block
196
197 key, iv = key_iv[:keySize], key_iv[keySize:final_length]
198 return key, iv
199
200
201 def get_json(url, proxy=None, session=None):
202 session = session or _requests
203 html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text
204
205 json_str = html.split('root.App.main =')[1].split(
206 '(this)')[0].split(';\n}')[0].strip()
207 data = _json.loads(json_str)
208
209 if "_cs" in data and "_cr" in data:
210 data_stores = _json.loads(decrypt_cryptojs_stores(data))
211 else:
212 if "context" in data and "dispatcher" in data["context"]:
213 # Keep old code, just in case
214 data_stores = data['context']['dispatcher']['stores']
215 else:
216 data_stores = data
217
218 if not 'QuoteSummaryStore' in data_stores:
219 # Problem in data. Either delisted, or Yahoo spam triggered
220 return {}
221
222 data = data_stores['QuoteSummaryStore']
223 # add data about Shares Outstanding for companies' tickers if they are available
224 try:
225 data['annualBasicAverageShares'] = \
226 data_stores['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']
227 except Exception:
228 pass
229
230 # return data
231 new_data = _json.dumps(data).replace('{}', 'null')
232 new_data = _re.sub(
233 r'\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}', r'\1', new_data)
234
235 return _json.loads(new_data)
236
237
238 def camel2title(o):
239 return [_re.sub("([a-z])([A-Z])", r"\g<1> \g<2>", i).title() for i in o]
240
241
242 def _parse_user_dt(dt, exchange_tz):
243 if isinstance(dt, int):
244 ## Should already be epoch, test with conversion:
245 _datetime.datetime.fromtimestamp(dt)
246 else:
247 # Convert str/date -> datetime, set tzinfo=exchange, get timestamp:
248 if isinstance(dt, str):
249 dt = _datetime.datetime.strptime(str(dt), '%Y-%m-%d')
250 if isinstance(dt, _datetime.date) and not isinstance(dt, _datetime.datetime):
251 dt = _datetime.datetime.combine(dt, _datetime.time(0))
252 if isinstance(dt, _datetime.datetime) and dt.tzinfo is None:
253 # Assume user is referring to exchange's timezone
254 dt = _tz.timezone(exchange_tz).localize(dt)
255 dt = int(dt.timestamp())
256 return dt
257
258
259 def auto_adjust(data):
260 df = data.copy()
261 ratio = df["Close"] / df["Adj Close"]
262 df["Adj Open"] = df["Open"] / ratio
263 df["Adj High"] = df["High"] / ratio
264 df["Adj Low"] = df["Low"] / ratio
265
266 df.drop(
267 ["Open", "High", "Low", "Close"],
268 axis=1, inplace=True)
269
270 df.rename(columns={
271 "Adj Open": "Open", "Adj High": "High",
272 "Adj Low": "Low", "Adj Close": "Close"
273 }, inplace=True)
274
275 df = df[["Open", "High", "Low", "Close", "Volume"]]
276 return df[["Open", "High", "Low", "Close", "Volume"]]
277
278
279 def back_adjust(data):
280 """ back-adjusted data to mimic true historical prices """
281
282 df = data.copy()
283 ratio = df["Adj Close"] / df["Close"]
284 df["Adj Open"] = df["Open"] * ratio
285 df["Adj High"] = df["High"] * ratio
286 df["Adj Low"] = df["Low"] * ratio
287
288 df.drop(
289 ["Open", "High", "Low", "Adj Close"],
290 axis=1, inplace=True)
291
292 df.rename(columns={
293 "Adj Open": "Open", "Adj High": "High",
294 "Adj Low": "Low"
295 }, inplace=True)
296
297 return df[["Open", "High", "Low", "Close", "Volume"]]
298
299
300 def parse_quotes(data):
301 timestamps = data["timestamp"]
302 ohlc = data["indicators"]["quote"][0]
303 volumes = ohlc["volume"]
304 opens = ohlc["open"]
305 closes = ohlc["close"]
306 lows = ohlc["low"]
307 highs = ohlc["high"]
308
309 adjclose = closes
310 if "adjclose" in data["indicators"]:
311 adjclose = data["indicators"]["adjclose"][0]["adjclose"]
312
313 quotes = _pd.DataFrame({"Open": opens,
314 "High": highs,
315 "Low": lows,
316 "Close": closes,
317 "Adj Close": adjclose,
318 "Volume": volumes})
319
320 quotes.index = _pd.to_datetime(timestamps, unit="s")
321 quotes.sort_index(inplace=True)
322
323 return quotes
324
325
326 def parse_actions(data):
327 dividends = _pd.DataFrame(
328 columns=["Dividends"], index=_pd.DatetimeIndex([]))
329 splits = _pd.DataFrame(
330 columns=["Stock Splits"], index=_pd.DatetimeIndex([]))
331
332 if "events" in data:
333 if "dividends" in data["events"]:
334 dividends = _pd.DataFrame(
335 data=list(data["events"]["dividends"].values()))
336 dividends.set_index("date", inplace=True)
337 dividends.index = _pd.to_datetime(dividends.index, unit="s")
338 dividends.sort_index(inplace=True)
339
340 dividends.columns = ["Dividends"]
341
342 if "splits" in data["events"]:
343 splits = _pd.DataFrame(
344 data=list(data["events"]["splits"].values()))
345 splits.set_index("date", inplace=True)
346 splits.index = _pd.to_datetime(splits.index, unit="s")
347 splits.sort_index(inplace=True)
348 splits["Stock Splits"] = splits["numerator"] / \
349 splits["denominator"]
350 splits = splits["Stock Splits"]
351
352 return dividends, splits
353
354
355 def fix_Yahoo_dst_issue(df, interval):
356 if interval in ["1d","1w","1wk"]:
357 # These intervals should start at time 00:00. But for some combinations of date and timezone,
358 # Yahoo has time off by few hours (e.g. Brazil 23:00 around Jan-2022). Suspect DST problem.
359 # The clue is (a) minutes=0 and (b) hour near 0.
360 # Obviously Yahoo meant 00:00, so ensure this doesn't affect date conversion:
361 f_pre_midnight = (df.index.minute == 0) & (df.index.hour.isin([22,23]))
362 dst_error_hours = _np.array([0]*df.shape[0])
363 dst_error_hours[f_pre_midnight] = 24-df.index[f_pre_midnight].hour
364 df.index += _pd.TimedeltaIndex(dst_error_hours, 'h')
365 return df
366
367
368 class ProgressBar:
369 def __init__(self, iterations, text='completed'):
370 self.text = text
371 self.iterations = iterations
372 self.prog_bar = '[]'
373 self.fill_char = '*'
374 self.width = 50
375 self.__update_amount(0)
376 self.elapsed = 1
377
378 def completed(self):
379 if self.elapsed > self.iterations:
380 self.elapsed = self.iterations
381 self.update_iteration(1)
382 print('\r' + str(self), end='')
383 _sys.stdout.flush()
384 print()
385
386 def animate(self, iteration=None):
387 if iteration is None:
388 self.elapsed += 1
389 iteration = self.elapsed
390 else:
391 self.elapsed += iteration
392
393 print('\r' + str(self), end='')
394 _sys.stdout.flush()
395 self.update_iteration()
396
397 def update_iteration(self, val=None):
398 val = val if val is not None else self.elapsed / float(self.iterations)
399 self.__update_amount(val * 100.0)
400 self.prog_bar += ' %s of %s %s' % (
401 self.elapsed, self.iterations, self.text)
402
403 def __update_amount(self, new_amount):
404 percent_done = int(round((new_amount / 100.0) * 100.0))
405 all_full = self.width - 2
406 num_hashes = int(round((percent_done / 100.0) * all_full))
407 self.prog_bar = '[' + self.fill_char * \
408 num_hashes + ' ' * (all_full - num_hashes) + ']'
409 pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
410 pct_string = '%d%%' % percent_done
411 self.prog_bar = self.prog_bar[0:pct_place] + \
412 (pct_string + self.prog_bar[pct_place + len(pct_string):])
413
414 def __str__(self):
415 return str(self.prog_bar)
416
417
418 # Simple file cache of ticker->timezone:
419 _cache_dp = None
420 def get_cache_dirpath():
421 if _cache_dp is None:
422 dp = _os.path.join(_ad.user_cache_dir(), "py-yfinance")
423 else:
424 dp = _os.path.join(_cache_dp, "py-yfinance")
425 return dp
426 def set_tz_cache_location(dp):
427 global _cache_dp
428 _cache_dp = dp
429
430 def cache_lookup_tkr_tz(tkr):
431 fp = _os.path.join(get_cache_dirpath(), "tkr-tz.csv")
432 if not _os.path.isfile(fp):
433 return None
434
435 mutex.acquire()
436 df = _pd.read_csv(fp, index_col="Ticker", on_bad_lines="skip")
437 mutex.release()
438 if tkr in df.index:
439 return df.loc[tkr,"Tz"]
440 else:
441 return None
442 def cache_store_tkr_tz(tkr,tz):
443
444 dp = get_cache_dirpath()
445 fp = _os.path.join(dp, "tkr-tz.csv")
446 mutex.acquire()
447 if not _os.path.isdir(dp):
448 _os.makedirs(dp)
449 if (not _os.path.isfile(fp)) and (tz is not None):
450 df = _pd.DataFrame({"Tz":[tz]}, index=[tkr])
451 df.index.name = "Ticker"
452 df.to_csv(fp)
453
454 else:
455 df = _pd.read_csv(fp, index_col="Ticker", on_bad_lines="skip")
456 if tz is None:
457 # Delete if in cache:
458 if tkr in df.index:
459 df.drop(tkr).to_csv(fp)
460 else:
461 if tkr in df.index:
462 raise Exception("Tkr {} tz already in cache".format(tkr))
463 df.loc[tkr,"Tz"] = tz
464 df.to_csv(fp)
465
466 mutex.release()
467
468
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yfinance/utils.py b/yfinance/utils.py
--- a/yfinance/utils.py
+++ b/yfinance/utils.py
@@ -202,6 +202,9 @@
session = session or _requests
html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text
+ if not "root.App.main =" in html:
+ return {}
+
json_str = html.split('root.App.main =')[1].split(
'(this)')[0].split(';\n}')[0].strip()
data = _json.loads(json_str)
| {"golden_diff": "diff --git a/yfinance/utils.py b/yfinance/utils.py\n--- a/yfinance/utils.py\n+++ b/yfinance/utils.py\n@@ -202,6 +202,9 @@\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n \n+ if not \"root.App.main =\" in html:\n+ return {}\n+\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)\n", "issue": "utils.py: list index out of range\nThere is a strange behaviour with yfinance 0.1.94 when I try to read ticker \"G7W.DU\":\r\nSometimes it works and sometimes the utils.py gets a list index out of range error.\r\n\r\nWhat I expect (and sometimes works):\r\n```\r\n$ python\r\nPython 3.10.9 (main, Dec 11 2022, 14:50:46) [GCC 11.3.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import yfinance as yf\r\n>>> t = \"G7W.DU\"\r\n>>> ticker = yf.Ticker(t)\r\n>>> ticker.info[\"regularMarketPrice\"]\r\n97\r\n```\r\n\r\nWhat I often get:\r\n```\r\n$ python\r\nPython 3.10.9 (main, Dec 11 2022, 14:50:46) [GCC 11.3.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import yfinance as yf\r\n>>> t = \"G7W.DU\"\r\n>>> ticker = yf.Ticker(t)\r\n>>> ticker.info[\"regularMarketPrice\"]\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/foo/.local/lib/python3.10/site-packages/yfinance/ticker.py\", line 147, in info\r\n return self.get_info()\r\n File \"/home/foo/.local/lib/python3.10/site-packages/yfinance/base.py\", line 742, in get_info\r\n self._get_info(proxy)\r\n File \"/home/foo/.local/lib/python3.10/site-packages/yfinance/base.py\", line 424, in _get_info\r\n data = utils.get_json(ticker_url, proxy, self.session)\r\n File \"/home/foo/.local/lib/python3.10/site-packages/yfinance/utils.py\", line 205, in get_json\r\n json_str = html.split('root.App.main =')[1].split(\r\nIndexError: list index out of range\r\n```\r\n\r\nThere seems to be something special with G7W.DU because I only get the error there, so far. I tried 5 tickers so far and only that one creates this error.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport datetime as _datetime\nimport pytz as _tz\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport os as _os\nimport appdirs as _ad\n\nfrom base64 import b64decode\nimport hashlib\nusePycryptodome = False # slightly faster\n# usePycryptodome = True\nif usePycryptodome:\n # NOTE: if decide to use 'pycryptodome', set min version to 3.6.6\n from Crypto.Cipher import AES\n from Crypto.Util.Padding import unpad\nelse:\n from cryptography.hazmat.primitives import padding\n from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\nfrom threading import Lock\nmutex = Lock()\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\nuser_agent_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\ndef is_isin(string):\n return bool(_re.match(\"^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$\", string))\n\n\ndef get_all_by_isin(isin, proxy=None, session=None):\n if not(is_isin(isin)):\n raise ValueError(\"Invalid ISIN number\")\n\n from .base import _BASE_URL_\n session = session or _requests\n url = \"{}/v1/finance/search?q={}\".format(_BASE_URL_, isin)\n data = session.get(url=url, proxies=proxy, headers=user_agent_headers)\n try:\n data = data.json()\n ticker = data.get('quotes', [{}])[0]\n return {\n 'ticker': {\n 'symbol': ticker['symbol'],\n 'shortname': ticker['shortname'],\n 'longname': ticker['longname'],\n 'type': ticker['quoteType'],\n 'exchange': ticker['exchDisp'],\n },\n 'news': data.get('news', [])\n }\n except Exception:\n return {}\n\n\ndef get_ticker_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {}).get('symbol', '')\n\n\ndef get_info_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {})\n\n\ndef get_news_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('news', {})\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef empty_earnings_dates_df():\n empty = _pd.DataFrame(\n columns=[\"Symbol\", \"Company\", \"Earnings Date\",\n \"EPS Estimate\", \"Reported EPS\", \"Surprise(%)\"])\n return empty\n\n\ndef get_html(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n return html\n\n\n\ndef decrypt_cryptojs_stores(data):\n \"\"\"\n Yahoo has started encrypting data stores, this method decrypts it.\n :param data: Python dict of the json data\n :return: The decrypted string data in data['context']['dispatcher']['stores']\n \"\"\"\n\n _cs = data[\"_cs\"]\n # Assumes _cr has format like: '{\"words\":[-449732894,601032952,157396918,2056341829],\"sigBytes\":16}';\n _cr = _json.loads(data[\"_cr\"])\n _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in _cr[\"words\"])\n\n password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n\n encrypted_stores = data['context']['dispatcher']['stores']\n encrypted_stores = b64decode(encrypted_stores)\n assert encrypted_stores[0:8] == b\"Salted__\"\n salt = encrypted_stores[8:16]\n encrypted_stores = encrypted_stores[16:]\n\n key, iv = _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n\n if usePycryptodome:\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n plaintext = cipher.decrypt(encrypted_stores)\n plaintext = unpad(plaintext, 16, style=\"pkcs7\")\n else:\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n plaintext = unpadder.update(plaintext) + unpadder.finalize()\n plaintext = plaintext.decode(\"utf-8\")\n\n return plaintext\n\ndef _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\") -> tuple:\n \"\"\"OpenSSL EVP Key Derivation Function\n Args:\n password (Union[str, bytes, bytearray]): Password to generate key from.\n salt (Union[bytes, bytearray]): Salt to use.\n keySize (int, optional): Output key length in bytes. Defaults to 32.\n ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.\n iterations (int, optional): Number of iterations to perform. Defaults to 1.\n hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.\n Returns:\n key, iv: Derived key and Initialization Vector (IV) bytes.\n\n Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3\n OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78\n \"\"\"\n\n assert iterations > 0, \"Iterations can not be less than 1.\"\n\n if isinstance(password, str):\n password = password.encode(\"utf-8\")\n\n final_length = keySize + ivSize\n key_iv = b\"\"\n block = None\n\n while len(key_iv) < final_length:\n hasher = hashlib.new(hashAlgorithm)\n if block:\n hasher.update(block)\n hasher.update(password)\n hasher.update(salt)\n block = hasher.digest()\n for _ in range(1, iterations):\n block = hashlib.new(hashAlgorithm, block).digest()\n key_iv += block\n\n key, iv = key_iv[:keySize], key_iv[keySize:final_length]\n return key, iv\n\n\ndef get_json(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)\n\n if \"_cs\" in data and \"_cr\" in data:\n data_stores = _json.loads(decrypt_cryptojs_stores(data))\n else:\n if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n # Keep old code, just in case\n data_stores = data['context']['dispatcher']['stores']\n else:\n data_stores = data\n\n if not 'QuoteSummaryStore' in data_stores:\n # Problem in data. Either delisted, or Yahoo spam triggered\n return {}\n\n data = data_stores['QuoteSummaryStore']\n # add data about Shares Outstanding for companies' tickers if they are available\n try:\n data['annualBasicAverageShares'] = \\\n data_stores['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']\n except Exception:\n pass\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef _parse_user_dt(dt, exchange_tz):\n if isinstance(dt, int):\n ## Should already be epoch, test with conversion:\n _datetime.datetime.fromtimestamp(dt)\n else:\n # Convert str/date -> datetime, set tzinfo=exchange, get timestamp:\n if isinstance(dt, str):\n dt = _datetime.datetime.strptime(str(dt), '%Y-%m-%d')\n if isinstance(dt, _datetime.date) and not isinstance(dt, _datetime.datetime):\n dt = _datetime.datetime.combine(dt, _datetime.time(0))\n if isinstance(dt, _datetime.datetime) and dt.tzinfo is None:\n # Assume user is referring to exchange's timezone\n dt = _tz.timezone(exchange_tz).localize(dt)\n dt = int(dt.timestamp())\n return dt\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n return quotes\n\n\ndef parse_actions(data):\n dividends = _pd.DataFrame(\n columns=[\"Dividends\"], index=_pd.DatetimeIndex([]))\n splits = _pd.DataFrame(\n columns=[\"Stock Splits\"], index=_pd.DatetimeIndex([]))\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\ndef fix_Yahoo_dst_issue(df, interval):\n if interval in [\"1d\",\"1w\",\"1wk\"]:\n # These intervals should start at time 00:00. But for some combinations of date and timezone, \n # Yahoo has time off by few hours (e.g. Brazil 23:00 around Jan-2022). Suspect DST problem.\n # The clue is (a) minutes=0 and (b) hour near 0. \n # Obviously Yahoo meant 00:00, so ensure this doesn't affect date conversion:\n f_pre_midnight = (df.index.minute == 0) & (df.index.hour.isin([22,23]))\n dst_error_hours = _np.array([0]*df.shape[0])\n dst_error_hours[f_pre_midnight] = 24-df.index[f_pre_midnight].hour\n df.index += _pd.TimedeltaIndex(dst_error_hours, 'h')\n return df\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n# Simple file cache of ticker->timezone:\n_cache_dp = None\ndef get_cache_dirpath():\n if _cache_dp is None:\n dp = _os.path.join(_ad.user_cache_dir(), \"py-yfinance\")\n else:\n dp = _os.path.join(_cache_dp, \"py-yfinance\")\n return dp\ndef set_tz_cache_location(dp):\n global _cache_dp\n _cache_dp = dp\n\ndef cache_lookup_tkr_tz(tkr):\n fp = _os.path.join(get_cache_dirpath(), \"tkr-tz.csv\")\n if not _os.path.isfile(fp):\n return None\n\n mutex.acquire()\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n mutex.release()\n if tkr in df.index:\n return df.loc[tkr,\"Tz\"]\n else:\n return None\ndef cache_store_tkr_tz(tkr,tz):\n\n dp = get_cache_dirpath()\n fp = _os.path.join(dp, \"tkr-tz.csv\")\n mutex.acquire()\n if not _os.path.isdir(dp):\n _os.makedirs(dp)\n if (not _os.path.isfile(fp)) and (tz is not None):\n df = _pd.DataFrame({\"Tz\":[tz]}, index=[tkr])\n df.index.name = \"Ticker\"\n df.to_csv(fp)\n\n else:\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n if tz is None:\n # Delete if in cache:\n if tkr in df.index:\n df.drop(tkr).to_csv(fp)\n else:\n if tkr in df.index:\n raise Exception(\"Tkr {} tz already in cache\".format(tkr))\n df.loc[tkr,\"Tz\"] = tz\n df.to_csv(fp)\n \n mutex.release()\n\n", "path": "yfinance/utils.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport datetime as _datetime\nimport pytz as _tz\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport os as _os\nimport appdirs as _ad\n\nfrom base64 import b64decode\nimport hashlib\nusePycryptodome = False # slightly faster\n# usePycryptodome = True\nif usePycryptodome:\n # NOTE: if decide to use 'pycryptodome', set min version to 3.6.6\n from Crypto.Cipher import AES\n from Crypto.Util.Padding import unpad\nelse:\n from cryptography.hazmat.primitives import padding\n from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\nfrom threading import Lock\nmutex = Lock()\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\nuser_agent_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\ndef is_isin(string):\n return bool(_re.match(\"^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$\", string))\n\n\ndef get_all_by_isin(isin, proxy=None, session=None):\n if not(is_isin(isin)):\n raise ValueError(\"Invalid ISIN number\")\n\n from .base import _BASE_URL_\n session = session or _requests\n url = \"{}/v1/finance/search?q={}\".format(_BASE_URL_, isin)\n data = session.get(url=url, proxies=proxy, headers=user_agent_headers)\n try:\n data = data.json()\n ticker = data.get('quotes', [{}])[0]\n return {\n 'ticker': {\n 'symbol': ticker['symbol'],\n 'shortname': ticker['shortname'],\n 'longname': ticker['longname'],\n 'type': ticker['quoteType'],\n 'exchange': ticker['exchDisp'],\n },\n 'news': data.get('news', [])\n }\n except Exception:\n return {}\n\n\ndef get_ticker_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {}).get('symbol', '')\n\n\ndef get_info_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {})\n\n\ndef get_news_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('news', {})\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef empty_earnings_dates_df():\n empty = _pd.DataFrame(\n columns=[\"Symbol\", \"Company\", \"Earnings Date\",\n \"EPS Estimate\", \"Reported EPS\", \"Surprise(%)\"])\n return empty\n\n\ndef get_html(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n return html\n\n\n\ndef decrypt_cryptojs_stores(data):\n \"\"\"\n Yahoo has started encrypting data stores, this method decrypts it.\n :param data: Python dict of the json data\n :return: The decrypted string data in data['context']['dispatcher']['stores']\n \"\"\"\n\n _cs = data[\"_cs\"]\n # Assumes _cr has format like: '{\"words\":[-449732894,601032952,157396918,2056341829],\"sigBytes\":16}';\n _cr = _json.loads(data[\"_cr\"])\n _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in _cr[\"words\"])\n\n password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n\n encrypted_stores = data['context']['dispatcher']['stores']\n encrypted_stores = b64decode(encrypted_stores)\n assert encrypted_stores[0:8] == b\"Salted__\"\n salt = encrypted_stores[8:16]\n encrypted_stores = encrypted_stores[16:]\n\n key, iv = _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n\n if usePycryptodome:\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n plaintext = cipher.decrypt(encrypted_stores)\n plaintext = unpad(plaintext, 16, style=\"pkcs7\")\n else:\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n plaintext = unpadder.update(plaintext) + unpadder.finalize()\n plaintext = plaintext.decode(\"utf-8\")\n\n return plaintext\n\ndef _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\") -> tuple:\n \"\"\"OpenSSL EVP Key Derivation Function\n Args:\n password (Union[str, bytes, bytearray]): Password to generate key from.\n salt (Union[bytes, bytearray]): Salt to use.\n keySize (int, optional): Output key length in bytes. Defaults to 32.\n ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.\n iterations (int, optional): Number of iterations to perform. Defaults to 1.\n hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.\n Returns:\n key, iv: Derived key and Initialization Vector (IV) bytes.\n\n Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3\n OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78\n \"\"\"\n\n assert iterations > 0, \"Iterations can not be less than 1.\"\n\n if isinstance(password, str):\n password = password.encode(\"utf-8\")\n\n final_length = keySize + ivSize\n key_iv = b\"\"\n block = None\n\n while len(key_iv) < final_length:\n hasher = hashlib.new(hashAlgorithm)\n if block:\n hasher.update(block)\n hasher.update(password)\n hasher.update(salt)\n block = hasher.digest()\n for _ in range(1, iterations):\n block = hashlib.new(hashAlgorithm, block).digest()\n key_iv += block\n\n key, iv = key_iv[:keySize], key_iv[keySize:final_length]\n return key, iv\n\n\ndef get_json(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n\n if not \"root.App.main =\" in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)\n\n if \"_cs\" in data and \"_cr\" in data:\n data_stores = _json.loads(decrypt_cryptojs_stores(data))\n else:\n if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n # Keep old code, just in case\n data_stores = data['context']['dispatcher']['stores']\n else:\n data_stores = data\n\n if not 'QuoteSummaryStore' in data_stores:\n # Problem in data. Either delisted, or Yahoo spam triggered\n return {}\n\n data = data_stores['QuoteSummaryStore']\n # add data about Shares Outstanding for companies' tickers if they are available\n try:\n data['annualBasicAverageShares'] = \\\n data_stores['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']\n except Exception:\n pass\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef _parse_user_dt(dt, exchange_tz):\n if isinstance(dt, int):\n ## Should already be epoch, test with conversion:\n _datetime.datetime.fromtimestamp(dt)\n else:\n # Convert str/date -> datetime, set tzinfo=exchange, get timestamp:\n if isinstance(dt, str):\n dt = _datetime.datetime.strptime(str(dt), '%Y-%m-%d')\n if isinstance(dt, _datetime.date) and not isinstance(dt, _datetime.datetime):\n dt = _datetime.datetime.combine(dt, _datetime.time(0))\n if isinstance(dt, _datetime.datetime) and dt.tzinfo is None:\n # Assume user is referring to exchange's timezone\n dt = _tz.timezone(exchange_tz).localize(dt)\n dt = int(dt.timestamp())\n return dt\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n return quotes\n\n\ndef parse_actions(data):\n dividends = _pd.DataFrame(\n columns=[\"Dividends\"], index=_pd.DatetimeIndex([]))\n splits = _pd.DataFrame(\n columns=[\"Stock Splits\"], index=_pd.DatetimeIndex([]))\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\ndef fix_Yahoo_dst_issue(df, interval):\n if interval in [\"1d\",\"1w\",\"1wk\"]:\n # These intervals should start at time 00:00. But for some combinations of date and timezone, \n # Yahoo has time off by few hours (e.g. Brazil 23:00 around Jan-2022). Suspect DST problem.\n # The clue is (a) minutes=0 and (b) hour near 0. \n # Obviously Yahoo meant 00:00, so ensure this doesn't affect date conversion:\n f_pre_midnight = (df.index.minute == 0) & (df.index.hour.isin([22,23]))\n dst_error_hours = _np.array([0]*df.shape[0])\n dst_error_hours[f_pre_midnight] = 24-df.index[f_pre_midnight].hour\n df.index += _pd.TimedeltaIndex(dst_error_hours, 'h')\n return df\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n# Simple file cache of ticker->timezone:\n_cache_dp = None\ndef get_cache_dirpath():\n if _cache_dp is None:\n dp = _os.path.join(_ad.user_cache_dir(), \"py-yfinance\")\n else:\n dp = _os.path.join(_cache_dp, \"py-yfinance\")\n return dp\ndef set_tz_cache_location(dp):\n global _cache_dp\n _cache_dp = dp\n\ndef cache_lookup_tkr_tz(tkr):\n fp = _os.path.join(get_cache_dirpath(), \"tkr-tz.csv\")\n if not _os.path.isfile(fp):\n return None\n\n mutex.acquire()\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n mutex.release()\n if tkr in df.index:\n return df.loc[tkr,\"Tz\"]\n else:\n return None\ndef cache_store_tkr_tz(tkr,tz):\n\n dp = get_cache_dirpath()\n fp = _os.path.join(dp, \"tkr-tz.csv\")\n mutex.acquire()\n if not _os.path.isdir(dp):\n _os.makedirs(dp)\n if (not _os.path.isfile(fp)) and (tz is not None):\n df = _pd.DataFrame({\"Tz\":[tz]}, index=[tkr])\n df.index.name = \"Ticker\"\n df.to_csv(fp)\n\n else:\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n if tz is None:\n # Delete if in cache:\n if tkr in df.index:\n df.drop(tkr).to_csv(fp)\n else:\n if tkr in df.index:\n raise Exception(\"Tkr {} tz already in cache\".format(tkr))\n df.loc[tkr,\"Tz\"] = tz\n df.to_csv(fp)\n \n mutex.release()\n\n", "path": "yfinance/utils.py"}]} |
gh_patches_debug_1464 | rasdani/github-patches | git_diff | kubeflow__pipelines-4118 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samples/core/iris/iris.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py"""
16
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 import os
22 import kfp
23 from typing import Text
24
25 import absl
26 import tensorflow_model_analysis as tfma
27
28 from tfx.components import CsvExampleGen
29 from tfx.components import Evaluator
30 from tfx.components import ExampleValidator
31 from tfx.components import Pusher
32 from tfx.components import ResolverNode
33 from tfx.components import SchemaGen
34 from tfx.components import StatisticsGen
35 from tfx.components import Trainer
36 from tfx.components import Transform
37 from tfx.components.base import executor_spec
38 from tfx.components.trainer.executor import GenericExecutor
39 from tfx.dsl.experimental import latest_blessed_model_resolver
40 from tfx.orchestration import data_types
41 from tfx.orchestration import pipeline
42 from tfx.orchestration.kubeflow import kubeflow_dag_runner
43 from tfx.proto import trainer_pb2
44 from tfx.proto import pusher_pb2
45 from tfx.types import Channel
46 from tfx.types.standard_artifacts import Model
47 from tfx.types.standard_artifacts import ModelBlessing
48 from tfx.utils.dsl_utils import external_input
49
50 _pipeline_name = 'iris_native_keras'
51
52 # This example assumes that Iris flowers data is stored in GCS and the
53 # utility function is in iris_utils.py. Feel free to customize as needed.
54 _data_root_param = data_types.RuntimeParameter(
55 name='data-root',
56 default='gs://ml-pipeline/sample-data/iris/data',
57 ptype=Text,
58 )
59
60 # Python module file to inject customized logic into the TFX components. The
61 # Transform and Trainer both require user-defined functions to run successfully.
62 # This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py
63 # and baked into the TFX image used in the pipeline.
64 _module_file_param = data_types.RuntimeParameter(
65 name='module-file',
66 default=
67 '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py',
68 ptype=Text,
69 )
70
71 # Directory and data locations. This example assumes all of the flowers
72 # example code and metadata library is relative to a GCS path.
73 # Note: if one deployed KFP from GKE marketplace, it's possible to leverage
74 # the following magic placeholder to auto-populate the default GCS bucket
75 # associated with KFP deployment. Otherwise you'll need to replace it with your
76 # actual bucket name here or when creating a run.
77 _pipeline_root = os.path.join(
78 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER
79 )
80
81
82 def _create_pipeline(
83 pipeline_name: Text, pipeline_root: Text
84 ) -> pipeline.Pipeline:
85 """Implements the Iris flowers pipeline with TFX."""
86 examples = external_input(_data_root_param)
87
88 # Brings data into the pipeline or otherwise joins/converts training data.
89 example_gen = CsvExampleGen(input=examples)
90
91 # Computes statistics over data for visualization and example validation.
92 statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
93
94 # Generates schema based on statistics files.
95 infer_schema = SchemaGen(
96 statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True
97 )
98
99 # Performs anomaly detection based on statistics and data schema.
100 validate_stats = ExampleValidator(
101 statistics=statistics_gen.outputs['statistics'],
102 schema=infer_schema.outputs['schema']
103 )
104
105 # Performs transformations and feature engineering in training and serving.
106 transform = Transform(
107 examples=example_gen.outputs['examples'],
108 schema=infer_schema.outputs['schema'],
109 module_file=_module_file_param
110 )
111
112 # Uses user-provided Python function that implements a model using Keras.
113 trainer = Trainer(
114 module_file=_module_file_param,
115 custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
116 examples=transform.outputs['transformed_examples'],
117 transform_graph=transform.outputs['transform_graph'],
118 schema=infer_schema.outputs['schema'],
119 train_args=trainer_pb2.TrainArgs(num_steps=100),
120 eval_args=trainer_pb2.EvalArgs(num_steps=50)
121 )
122
123 # Get the latest blessed model for model validation.
124 model_resolver = ResolverNode(
125 instance_name='latest_blessed_model_resolver',
126 resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
127 model=Channel(type=Model),
128 model_blessing=Channel(type=ModelBlessing)
129 )
130
131 # Uses TFMA to compute an evaluation statistics over features of a model and
132 # perform quality validation of a candidate model (compared to a baseline).
133 # Note: to compile this successfully you'll need TFMA at >= 0.21.5
134 eval_config = tfma.EvalConfig(
135 model_specs=[
136 tfma.ModelSpec(name='candidate', label_key='variety'),
137 tfma.ModelSpec(
138 name='baseline', label_key='variety', is_baseline=True
139 )
140 ],
141 slicing_specs=[
142 tfma.SlicingSpec(),
143 # Data can be sliced along a feature column. Required by TFMA visualization.
144 tfma.SlicingSpec(feature_keys=['sepal_length'])],
145 metrics_specs=[
146 tfma.MetricsSpec(
147 metrics=[
148 tfma.MetricConfig(
149 class_name='SparseCategoricalAccuracy',
150 threshold=tfma.config.MetricThreshold(
151 value_threshold=tfma.GenericValueThreshold(
152 lower_bound={'value': 0.9}
153 ),
154 change_threshold=tfma.GenericChangeThreshold(
155 direction=tfma.MetricDirection.HIGHER_IS_BETTER,
156 absolute={'value': -1e-10}
157 )
158 )
159 )
160 ]
161 )
162 ]
163 )
164
165 # Uses TFMA to compute a evaluation statistics over features of a model.
166 model_analyzer = Evaluator(
167 examples=example_gen.outputs['examples'],
168 model=trainer.outputs['model'],
169 baseline_model=model_resolver.outputs['model'],
170 # Change threshold will be ignored if there is no baseline (first run).
171 eval_config=eval_config
172 )
173
174 # Checks whether the model passed the validation steps and pushes the model
175 # to a file destination if check passed.
176 pusher = Pusher(
177 model=trainer.outputs['model'],
178 model_blessing=model_analyzer.outputs['blessing'],
179 push_destination=pusher_pb2.PushDestination(
180 filesystem=pusher_pb2.PushDestination.Filesystem(
181 base_directory=os.path.
182 join(str(pipeline.ROOT_PARAMETER), 'model_serving')
183 )
184 )
185 )
186
187 return pipeline.Pipeline(
188 pipeline_name=pipeline_name,
189 pipeline_root=pipeline_root,
190 components=[
191 example_gen, statistics_gen, infer_schema, validate_stats, transform,
192 trainer, model_resolver, model_analyzer, pusher
193 ],
194 enable_cache=True,
195 )
196
197
198 if __name__ == '__main__':
199 absl.logging.set_verbosity(absl.logging.INFO)
200 # Make sure the version of TFX image used is consistent with the version of
201 # TFX SDK. Here we use tfx:0.22.0 image.
202 config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
203 kubeflow_metadata_config=kubeflow_dag_runner.
204 get_default_kubeflow_metadata_config(),
205 tfx_image='gcr.io/tfx-oss-public/tfx:0.22.0',
206 )
207 kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(
208 output_filename=__file__ + '.yaml', config=config
209 )
210 kfp_runner.run(
211 _create_pipeline(
212 pipeline_name=_pipeline_name, pipeline_root=_pipeline_root
213 )
214 )
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/samples/core/iris/iris.py b/samples/core/iris/iris.py
--- a/samples/core/iris/iris.py
+++ b/samples/core/iris/iris.py
@@ -14,10 +14,6 @@
# limitations under the License.
"""Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
import os
import kfp
from typing import Text
| {"golden_diff": "diff --git a/samples/core/iris/iris.py b/samples/core/iris/iris.py\n--- a/samples/core/iris/iris.py\n+++ b/samples/core/iris/iris.py\n@@ -14,10 +14,6 @@\n # limitations under the License.\n \"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n \n-from __future__ import absolute_import\n-from __future__ import division\n-from __future__ import print_function\n-\n import os\n import kfp\n from typing import Text\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport kfp\nfrom typing import Text\n\nimport absl\nimport tensorflow_model_analysis as tfma\n\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import Pusher\nfrom tfx.components import ResolverNode\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.components.base import executor_spec\nfrom tfx.components.trainer.executor import GenericExecutor\nfrom tfx.dsl.experimental import latest_blessed_model_resolver\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import Channel\nfrom tfx.types.standard_artifacts import Model\nfrom tfx.types.standard_artifacts import ModelBlessing\nfrom tfx.utils.dsl_utils import external_input\n\n_pipeline_name = 'iris_native_keras'\n\n# This example assumes that Iris flowers data is stored in GCS and the\n# utility function is in iris_utils.py. Feel free to customize as needed.\n_data_root_param = data_types.RuntimeParameter(\n name='data-root',\n default='gs://ml-pipeline/sample-data/iris/data',\n ptype=Text,\n)\n\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n# This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py\n# and baked into the TFX image used in the pipeline.\n_module_file_param = data_types.RuntimeParameter(\n name='module-file',\n default=\n '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py',\n ptype=Text,\n)\n\n# Directory and data locations. This example assumes all of the flowers\n# example code and metadata library is relative to a GCS path.\n# Note: if one deployed KFP from GKE marketplace, it's possible to leverage\n# the following magic placeholder to auto-populate the default GCS bucket\n# associated with KFP deployment. Otherwise you'll need to replace it with your\n# actual bucket name here or when creating a run.\n_pipeline_root = os.path.join(\n 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER\n)\n\n\ndef _create_pipeline(\n pipeline_name: Text, pipeline_root: Text\n) -> pipeline.Pipeline:\n \"\"\"Implements the Iris flowers pipeline with TFX.\"\"\"\n examples = external_input(_data_root_param)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(\n statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema']\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n module_file=_module_file_param\n )\n\n # Uses user-provided Python function that implements a model using Keras.\n trainer = Trainer(\n module_file=_module_file_param,\n custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=infer_schema.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=100),\n eval_args=trainer_pb2.EvalArgs(num_steps=50)\n )\n\n # Get the latest blessed model for model validation.\n model_resolver = ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n\n # Uses TFMA to compute an evaluation statistics over features of a model and\n # perform quality validation of a candidate model (compared to a baseline).\n # Note: to compile this successfully you'll need TFMA at >= 0.21.5\n eval_config = tfma.EvalConfig(\n model_specs=[\n tfma.ModelSpec(name='candidate', label_key='variety'),\n tfma.ModelSpec(\n name='baseline', label_key='variety', is_baseline=True\n )\n ],\n slicing_specs=[\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. Required by TFMA visualization.\n tfma.SlicingSpec(feature_keys=['sepal_length'])],\n metrics_specs=[\n tfma.MetricsSpec(\n metrics=[\n tfma.MetricConfig(\n class_name='SparseCategoricalAccuracy',\n threshold=tfma.config.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.9}\n ),\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10}\n )\n )\n )\n ]\n )\n ]\n )\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n # Change threshold will be ignored if there is no baseline (first run).\n eval_config=eval_config\n )\n\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=model_analyzer.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.\n join(str(pipeline.ROOT_PARAMETER), 'model_serving')\n )\n )\n )\n\n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_resolver, model_analyzer, pusher\n ],\n enable_cache=True,\n )\n\n\nif __name__ == '__main__':\n absl.logging.set_verbosity(absl.logging.INFO)\n # Make sure the version of TFX image used is consistent with the version of\n # TFX SDK. Here we use tfx:0.22.0 image.\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=kubeflow_dag_runner.\n get_default_kubeflow_metadata_config(),\n tfx_image='gcr.io/tfx-oss-public/tfx:0.22.0',\n )\n kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(\n output_filename=__file__ + '.yaml', config=config\n )\n kfp_runner.run(\n _create_pipeline(\n pipeline_name=_pipeline_name, pipeline_root=_pipeline_root\n )\n )\n", "path": "samples/core/iris/iris.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n\nimport os\nimport kfp\nfrom typing import Text\n\nimport absl\nimport tensorflow_model_analysis as tfma\n\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import Pusher\nfrom tfx.components import ResolverNode\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.components.base import executor_spec\nfrom tfx.components.trainer.executor import GenericExecutor\nfrom tfx.dsl.experimental import latest_blessed_model_resolver\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import Channel\nfrom tfx.types.standard_artifacts import Model\nfrom tfx.types.standard_artifacts import ModelBlessing\nfrom tfx.utils.dsl_utils import external_input\n\n_pipeline_name = 'iris_native_keras'\n\n# This example assumes that Iris flowers data is stored in GCS and the\n# utility function is in iris_utils.py. Feel free to customize as needed.\n_data_root_param = data_types.RuntimeParameter(\n name='data-root',\n default='gs://ml-pipeline/sample-data/iris/data',\n ptype=Text,\n)\n\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n# This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py\n# and baked into the TFX image used in the pipeline.\n_module_file_param = data_types.RuntimeParameter(\n name='module-file',\n default=\n '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py',\n ptype=Text,\n)\n\n# Directory and data locations. This example assumes all of the flowers\n# example code and metadata library is relative to a GCS path.\n# Note: if one deployed KFP from GKE marketplace, it's possible to leverage\n# the following magic placeholder to auto-populate the default GCS bucket\n# associated with KFP deployment. Otherwise you'll need to replace it with your\n# actual bucket name here or when creating a run.\n_pipeline_root = os.path.join(\n 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER\n)\n\n\ndef _create_pipeline(\n pipeline_name: Text, pipeline_root: Text\n) -> pipeline.Pipeline:\n \"\"\"Implements the Iris flowers pipeline with TFX.\"\"\"\n examples = external_input(_data_root_param)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(\n statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema']\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n module_file=_module_file_param\n )\n\n # Uses user-provided Python function that implements a model using Keras.\n trainer = Trainer(\n module_file=_module_file_param,\n custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=infer_schema.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=100),\n eval_args=trainer_pb2.EvalArgs(num_steps=50)\n )\n\n # Get the latest blessed model for model validation.\n model_resolver = ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n\n # Uses TFMA to compute an evaluation statistics over features of a model and\n # perform quality validation of a candidate model (compared to a baseline).\n # Note: to compile this successfully you'll need TFMA at >= 0.21.5\n eval_config = tfma.EvalConfig(\n model_specs=[\n tfma.ModelSpec(name='candidate', label_key='variety'),\n tfma.ModelSpec(\n name='baseline', label_key='variety', is_baseline=True\n )\n ],\n slicing_specs=[\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. Required by TFMA visualization.\n tfma.SlicingSpec(feature_keys=['sepal_length'])],\n metrics_specs=[\n tfma.MetricsSpec(\n metrics=[\n tfma.MetricConfig(\n class_name='SparseCategoricalAccuracy',\n threshold=tfma.config.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.9}\n ),\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10}\n )\n )\n )\n ]\n )\n ]\n )\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n # Change threshold will be ignored if there is no baseline (first run).\n eval_config=eval_config\n )\n\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=model_analyzer.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.\n join(str(pipeline.ROOT_PARAMETER), 'model_serving')\n )\n )\n )\n\n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_resolver, model_analyzer, pusher\n ],\n enable_cache=True,\n )\n\n\nif __name__ == '__main__':\n absl.logging.set_verbosity(absl.logging.INFO)\n # Make sure the version of TFX image used is consistent with the version of\n # TFX SDK. Here we use tfx:0.21.2 image.\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=kubeflow_dag_runner.\n get_default_kubeflow_metadata_config(),\n tfx_image='gcr.io/tfx-oss-public/tfx:0.21.2',\n )\n kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(\n output_filename=__file__ + '.yaml', config=config\n )\n kfp_runner.run(\n _create_pipeline(\n pipeline_name=_pipeline_name, pipeline_root=_pipeline_root\n )\n )\n", "path": "samples/core/iris/iris.py"}]} |
gh_patches_debug_1465 | rasdani/github-patches | git_diff | pallets__click-1839 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
urllib.unquote() no longer exists
In [_termui_impl.py](https://github.com/pallets/click/blob/972becff259e4ffcd220a6cad5096f36a89fdd6d/src/click/_termui_impl.py#L556) `urllib.unquote()` is called. But [urllib](https://docs.python.org/3/library/urllib.html) is a package now. Equivalent functionality is available in the urllib.parse module.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/click/_termui_impl.py`
Content:
```
1 """
2 This module contains implementations for the termui module. To keep the
3 import time of Click down, some infrequently used functionality is
4 placed in this module and only imported as needed.
5 """
6 import contextlib
7 import math
8 import os
9 import sys
10 import time
11 from gettext import gettext as _
12
13 from ._compat import _default_text_stdout
14 from ._compat import CYGWIN
15 from ._compat import get_best_encoding
16 from ._compat import isatty
17 from ._compat import open_stream
18 from ._compat import strip_ansi
19 from ._compat import term_len
20 from ._compat import WIN
21 from .exceptions import ClickException
22 from .utils import echo
23
24 if os.name == "nt":
25 BEFORE_BAR = "\r"
26 AFTER_BAR = "\n"
27 else:
28 BEFORE_BAR = "\r\033[?25l"
29 AFTER_BAR = "\033[?25h\n"
30
31
32 def _length_hint(obj):
33 """Returns the length hint of an object."""
34 try:
35 return len(obj)
36 except (AttributeError, TypeError):
37 try:
38 get_hint = type(obj).__length_hint__
39 except AttributeError:
40 return None
41 try:
42 hint = get_hint(obj)
43 except TypeError:
44 return None
45 if hint is NotImplemented or not isinstance(hint, int) or hint < 0:
46 return None
47 return hint
48
49
50 class ProgressBar:
51 def __init__(
52 self,
53 iterable,
54 length=None,
55 fill_char="#",
56 empty_char=" ",
57 bar_template="%(bar)s",
58 info_sep=" ",
59 show_eta=True,
60 show_percent=None,
61 show_pos=False,
62 item_show_func=None,
63 label=None,
64 file=None,
65 color=None,
66 update_min_steps=1,
67 width=30,
68 ):
69 self.fill_char = fill_char
70 self.empty_char = empty_char
71 self.bar_template = bar_template
72 self.info_sep = info_sep
73 self.show_eta = show_eta
74 self.show_percent = show_percent
75 self.show_pos = show_pos
76 self.item_show_func = item_show_func
77 self.label = label or ""
78 if file is None:
79 file = _default_text_stdout()
80 self.file = file
81 self.color = color
82 self.update_min_steps = update_min_steps
83 self._completed_intervals = 0
84 self.width = width
85 self.autowidth = width == 0
86
87 if length is None:
88 length = _length_hint(iterable)
89 if iterable is None:
90 if length is None:
91 raise TypeError("iterable or length is required")
92 iterable = range(length)
93 self.iter = iter(iterable)
94 self.length = length
95 self.length_known = length is not None
96 self.pos = 0
97 self.avg = []
98 self.start = self.last_eta = time.time()
99 self.eta_known = False
100 self.finished = False
101 self.max_width = None
102 self.entered = False
103 self.current_item = None
104 self.is_hidden = not isatty(self.file)
105 self._last_line = None
106
107 def __enter__(self):
108 self.entered = True
109 self.render_progress()
110 return self
111
112 def __exit__(self, exc_type, exc_value, tb):
113 self.render_finish()
114
115 def __iter__(self):
116 if not self.entered:
117 raise RuntimeError("You need to use progress bars in a with block.")
118 self.render_progress()
119 return self.generator()
120
121 def __next__(self):
122 # Iteration is defined in terms of a generator function,
123 # returned by iter(self); use that to define next(). This works
124 # because `self.iter` is an iterable consumed by that generator,
125 # so it is re-entry safe. Calling `next(self.generator())`
126 # twice works and does "what you want".
127 return next(iter(self))
128
129 def render_finish(self):
130 if self.is_hidden:
131 return
132 self.file.write(AFTER_BAR)
133 self.file.flush()
134
135 @property
136 def pct(self):
137 if self.finished:
138 return 1.0
139 return min(self.pos / (float(self.length) or 1), 1.0)
140
141 @property
142 def time_per_iteration(self):
143 if not self.avg:
144 return 0.0
145 return sum(self.avg) / float(len(self.avg))
146
147 @property
148 def eta(self):
149 if self.length_known and not self.finished:
150 return self.time_per_iteration * (self.length - self.pos)
151 return 0.0
152
153 def format_eta(self):
154 if self.eta_known:
155 t = int(self.eta)
156 seconds = t % 60
157 t //= 60
158 minutes = t % 60
159 t //= 60
160 hours = t % 24
161 t //= 24
162 if t > 0:
163 return f"{t}d {hours:02}:{minutes:02}:{seconds:02}"
164 else:
165 return f"{hours:02}:{minutes:02}:{seconds:02}"
166 return ""
167
168 def format_pos(self):
169 pos = str(self.pos)
170 if self.length_known:
171 pos += f"/{self.length}"
172 return pos
173
174 def format_pct(self):
175 return f"{int(self.pct * 100): 4}%"[1:]
176
177 def format_bar(self):
178 if self.length_known:
179 bar_length = int(self.pct * self.width)
180 bar = self.fill_char * bar_length
181 bar += self.empty_char * (self.width - bar_length)
182 elif self.finished:
183 bar = self.fill_char * self.width
184 else:
185 bar = list(self.empty_char * (self.width or 1))
186 if self.time_per_iteration != 0:
187 bar[
188 int(
189 (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
190 * self.width
191 )
192 ] = self.fill_char
193 bar = "".join(bar)
194 return bar
195
196 def format_progress_line(self):
197 show_percent = self.show_percent
198
199 info_bits = []
200 if self.length_known and show_percent is None:
201 show_percent = not self.show_pos
202
203 if self.show_pos:
204 info_bits.append(self.format_pos())
205 if show_percent:
206 info_bits.append(self.format_pct())
207 if self.show_eta and self.eta_known and not self.finished:
208 info_bits.append(self.format_eta())
209 if self.item_show_func is not None:
210 item_info = self.item_show_func(self.current_item)
211 if item_info is not None:
212 info_bits.append(item_info)
213
214 return (
215 self.bar_template
216 % {
217 "label": self.label,
218 "bar": self.format_bar(),
219 "info": self.info_sep.join(info_bits),
220 }
221 ).rstrip()
222
223 def render_progress(self):
224 import shutil
225
226 if self.is_hidden:
227 # Only output the label as it changes if the output is not a
228 # TTY. Use file=stderr if you expect to be piping stdout.
229 if self._last_line != self.label:
230 self._last_line = self.label
231 echo(self.label, file=self.file, color=self.color)
232
233 return
234
235 buf = []
236 # Update width in case the terminal has been resized
237 if self.autowidth:
238 old_width = self.width
239 self.width = 0
240 clutter_length = term_len(self.format_progress_line())
241 new_width = max(0, shutil.get_terminal_size().columns - clutter_length)
242 if new_width < old_width:
243 buf.append(BEFORE_BAR)
244 buf.append(" " * self.max_width)
245 self.max_width = new_width
246 self.width = new_width
247
248 clear_width = self.width
249 if self.max_width is not None:
250 clear_width = self.max_width
251
252 buf.append(BEFORE_BAR)
253 line = self.format_progress_line()
254 line_len = term_len(line)
255 if self.max_width is None or self.max_width < line_len:
256 self.max_width = line_len
257
258 buf.append(line)
259 buf.append(" " * (clear_width - line_len))
260 line = "".join(buf)
261 # Render the line only if it changed.
262
263 if line != self._last_line:
264 self._last_line = line
265 echo(line, file=self.file, color=self.color, nl=False)
266 self.file.flush()
267
268 def make_step(self, n_steps):
269 self.pos += n_steps
270 if self.length_known and self.pos >= self.length:
271 self.finished = True
272
273 if (time.time() - self.last_eta) < 1.0:
274 return
275
276 self.last_eta = time.time()
277
278 # self.avg is a rolling list of length <= 7 of steps where steps are
279 # defined as time elapsed divided by the total progress through
280 # self.length.
281 if self.pos:
282 step = (time.time() - self.start) / self.pos
283 else:
284 step = time.time() - self.start
285
286 self.avg = self.avg[-6:] + [step]
287
288 self.eta_known = self.length_known
289
290 def update(self, n_steps, current_item=None):
291 """Update the progress bar by advancing a specified number of
292 steps, and optionally set the ``current_item`` for this new
293 position.
294
295 :param n_steps: Number of steps to advance.
296 :param current_item: Optional item to set as ``current_item``
297 for the updated position.
298
299 .. versionchanged:: 8.0
300 Added the ``current_item`` optional parameter.
301
302 .. versionchanged:: 8.0
303 Only render when the number of steps meets the
304 ``update_min_steps`` threshold.
305 """
306 if current_item is not None:
307 self.current_item = current_item
308
309 self._completed_intervals += n_steps
310
311 if self._completed_intervals >= self.update_min_steps:
312 self.make_step(self._completed_intervals)
313 self.render_progress()
314 self._completed_intervals = 0
315
316 def finish(self):
317 self.eta_known = 0
318 self.current_item = None
319 self.finished = True
320
321 def generator(self):
322 """Return a generator which yields the items added to the bar
323 during construction, and updates the progress bar *after* the
324 yielded block returns.
325 """
326 # WARNING: the iterator interface for `ProgressBar` relies on
327 # this and only works because this is a simple generator which
328 # doesn't create or manage additional state. If this function
329 # changes, the impact should be evaluated both against
330 # `iter(bar)` and `next(bar)`. `next()` in particular may call
331 # `self.generator()` repeatedly, and this must remain safe in
332 # order for that interface to work.
333 if not self.entered:
334 raise RuntimeError("You need to use progress bars in a with block.")
335
336 if self.is_hidden:
337 yield from self.iter
338 else:
339 for rv in self.iter:
340 self.current_item = rv
341
342 # This allows show_item_func to be updated before the
343 # item is processed. Only trigger at the beginning of
344 # the update interval.
345 if self._completed_intervals == 0:
346 self.render_progress()
347
348 yield rv
349 self.update(1)
350
351 self.finish()
352 self.render_progress()
353
354
355 def pager(generator, color=None):
356 """Decide what method to use for paging through text."""
357 stdout = _default_text_stdout()
358 if not isatty(sys.stdin) or not isatty(stdout):
359 return _nullpager(stdout, generator, color)
360 pager_cmd = (os.environ.get("PAGER", None) or "").strip()
361 if pager_cmd:
362 if WIN:
363 return _tempfilepager(generator, pager_cmd, color)
364 return _pipepager(generator, pager_cmd, color)
365 if os.environ.get("TERM") in ("dumb", "emacs"):
366 return _nullpager(stdout, generator, color)
367 if WIN or sys.platform.startswith("os2"):
368 return _tempfilepager(generator, "more <", color)
369 if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
370 return _pipepager(generator, "less", color)
371
372 import tempfile
373
374 fd, filename = tempfile.mkstemp()
375 os.close(fd)
376 try:
377 if hasattr(os, "system") and os.system(f'more "{filename}"') == 0:
378 return _pipepager(generator, "more", color)
379 return _nullpager(stdout, generator, color)
380 finally:
381 os.unlink(filename)
382
383
384 def _pipepager(generator, cmd, color):
385 """Page through text by feeding it to another program. Invoking a
386 pager through this might support colors.
387 """
388 import subprocess
389
390 env = dict(os.environ)
391
392 # If we're piping to less we might support colors under the
393 # condition that
394 cmd_detail = cmd.rsplit("/", 1)[-1].split()
395 if color is None and cmd_detail[0] == "less":
396 less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}"
397 if not less_flags:
398 env["LESS"] = "-R"
399 color = True
400 elif "r" in less_flags or "R" in less_flags:
401 color = True
402
403 c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)
404 encoding = get_best_encoding(c.stdin)
405 try:
406 for text in generator:
407 if not color:
408 text = strip_ansi(text)
409
410 c.stdin.write(text.encode(encoding, "replace"))
411 except (OSError, KeyboardInterrupt):
412 pass
413 else:
414 c.stdin.close()
415
416 # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
417 # search or other commands inside less).
418 #
419 # That means when the user hits ^C, the parent process (click) terminates,
420 # but less is still alive, paging the output and messing up the terminal.
421 #
422 # If the user wants to make the pager exit on ^C, they should set
423 # `LESS='-K'`. It's not our decision to make.
424 while True:
425 try:
426 c.wait()
427 except KeyboardInterrupt:
428 pass
429 else:
430 break
431
432
433 def _tempfilepager(generator, cmd, color):
434 """Page through text by invoking a program on a temporary file."""
435 import tempfile
436
437 filename = tempfile.mkstemp()
438 # TODO: This never terminates if the passed generator never terminates.
439 text = "".join(generator)
440 if not color:
441 text = strip_ansi(text)
442 encoding = get_best_encoding(sys.stdout)
443 with open_stream(filename, "wb")[0] as f:
444 f.write(text.encode(encoding))
445 try:
446 os.system(f'{cmd} "{filename}"')
447 finally:
448 os.unlink(filename)
449
450
451 def _nullpager(stream, generator, color):
452 """Simply print unformatted text. This is the ultimate fallback."""
453 for text in generator:
454 if not color:
455 text = strip_ansi(text)
456 stream.write(text)
457
458
459 class Editor:
460 def __init__(self, editor=None, env=None, require_save=True, extension=".txt"):
461 self.editor = editor
462 self.env = env
463 self.require_save = require_save
464 self.extension = extension
465
466 def get_editor(self):
467 if self.editor is not None:
468 return self.editor
469 for key in "VISUAL", "EDITOR":
470 rv = os.environ.get(key)
471 if rv:
472 return rv
473 if WIN:
474 return "notepad"
475 for editor in "sensible-editor", "vim", "nano":
476 if os.system(f"which {editor} >/dev/null 2>&1") == 0:
477 return editor
478 return "vi"
479
480 def edit_file(self, filename):
481 import subprocess
482
483 editor = self.get_editor()
484 if self.env:
485 environ = os.environ.copy()
486 environ.update(self.env)
487 else:
488 environ = None
489 try:
490 c = subprocess.Popen(f'{editor} "{filename}"', env=environ, shell=True)
491 exit_code = c.wait()
492 if exit_code != 0:
493 raise ClickException(
494 _("{editor}: Editing failed").format(editor=editor)
495 )
496 except OSError as e:
497 raise ClickException(
498 _("{editor}: Editing failed: {e}").format(editor=editor, e=e)
499 )
500
501 def edit(self, text):
502 import tempfile
503
504 if not text:
505 text = ""
506
507 is_bytes = isinstance(text, (bytes, bytearray))
508
509 if not is_bytes:
510 if text and not text.endswith("\n"):
511 text += "\n"
512
513 if WIN:
514 text = text.replace("\n", "\r\n").encode("utf-8-sig")
515 else:
516 text = text.encode("utf-8")
517
518 fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
519
520 try:
521 with os.fdopen(fd, "wb") as f:
522 f.write(text)
523
524 # If the filesystem resolution is 1 second, like Mac OS
525 # 10.12 Extended, or 2 seconds, like FAT32, and the editor
526 # closes very fast, require_save can fail. Set the modified
527 # time to be 2 seconds in the past to work around this.
528 os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))
529 # Depending on the resolution, the exact value might not be
530 # recorded, so get the new recorded value.
531 timestamp = os.path.getmtime(name)
532
533 self.edit_file(name)
534
535 if self.require_save and os.path.getmtime(name) == timestamp:
536 return None
537
538 with open(name, "rb") as f:
539 rv = f.read()
540
541 if is_bytes:
542 return rv
543
544 return rv.decode("utf-8-sig").replace("\r\n", "\n")
545 finally:
546 os.unlink(name)
547
548
549 def open_url(url, wait=False, locate=False):
550 import subprocess
551
552 def _unquote_file(url):
553 import urllib
554
555 if url.startswith("file://"):
556 url = urllib.unquote(url[7:])
557 return url
558
559 if sys.platform == "darwin":
560 args = ["open"]
561 if wait:
562 args.append("-W")
563 if locate:
564 args.append("-R")
565 args.append(_unquote_file(url))
566 null = open("/dev/null", "w")
567 try:
568 return subprocess.Popen(args, stderr=null).wait()
569 finally:
570 null.close()
571 elif WIN:
572 if locate:
573 url = _unquote_file(url.replace('"', ""))
574 args = f'explorer /select,"{url}"'
575 else:
576 url = url.replace('"', "")
577 wait = "/WAIT" if wait else ""
578 args = f'start {wait} "" "{url}"'
579 return os.system(args)
580 elif CYGWIN:
581 if locate:
582 url = os.path.dirname(_unquote_file(url).replace('"', ""))
583 args = f'cygstart "{url}"'
584 else:
585 url = url.replace('"', "")
586 wait = "-w" if wait else ""
587 args = f'cygstart {wait} "{url}"'
588 return os.system(args)
589
590 try:
591 if locate:
592 url = os.path.dirname(_unquote_file(url)) or "."
593 else:
594 url = _unquote_file(url)
595 c = subprocess.Popen(["xdg-open", url])
596 if wait:
597 return c.wait()
598 return 0
599 except OSError:
600 if url.startswith(("http://", "https://")) and not locate and not wait:
601 import webbrowser
602
603 webbrowser.open(url)
604 return 0
605 return 1
606
607
608 def _translate_ch_to_exc(ch):
609 if ch == "\x03":
610 raise KeyboardInterrupt()
611 if ch == "\x04" and not WIN: # Unix-like, Ctrl+D
612 raise EOFError()
613 if ch == "\x1a" and WIN: # Windows, Ctrl+Z
614 raise EOFError()
615
616
617 if WIN:
618 import msvcrt
619
620 @contextlib.contextmanager
621 def raw_terminal():
622 yield
623
624 def getchar(echo):
625 # The function `getch` will return a bytes object corresponding to
626 # the pressed character. Since Windows 10 build 1803, it will also
627 # return \x00 when called a second time after pressing a regular key.
628 #
629 # `getwch` does not share this probably-bugged behavior. Moreover, it
630 # returns a Unicode object by default, which is what we want.
631 #
632 # Either of these functions will return \x00 or \xe0 to indicate
633 # a special key, and you need to call the same function again to get
634 # the "rest" of the code. The fun part is that \u00e0 is
635 # "latin small letter a with grave", so if you type that on a French
636 # keyboard, you _also_ get a \xe0.
637 # E.g., consider the Up arrow. This returns \xe0 and then \x48. The
638 # resulting Unicode string reads as "a with grave" + "capital H".
639 # This is indistinguishable from when the user actually types
640 # "a with grave" and then "capital H".
641 #
642 # When \xe0 is returned, we assume it's part of a special-key sequence
643 # and call `getwch` again, but that means that when the user types
644 # the \u00e0 character, `getchar` doesn't return until a second
645 # character is typed.
646 # The alternative is returning immediately, but that would mess up
647 # cross-platform handling of arrow keys and others that start with
648 # \xe0. Another option is using `getch`, but then we can't reliably
649 # read non-ASCII characters, because return values of `getch` are
650 # limited to the current 8-bit codepage.
651 #
652 # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
653 # is doing the right thing in more situations than with `getch`.
654 if echo:
655 func = msvcrt.getwche
656 else:
657 func = msvcrt.getwch
658
659 rv = func()
660 if rv in ("\x00", "\xe0"):
661 # \x00 and \xe0 are control characters that indicate special key,
662 # see above.
663 rv += func()
664 _translate_ch_to_exc(rv)
665 return rv
666
667
668 else:
669 import tty
670 import termios
671
672 @contextlib.contextmanager
673 def raw_terminal():
674 if not isatty(sys.stdin):
675 f = open("/dev/tty")
676 fd = f.fileno()
677 else:
678 fd = sys.stdin.fileno()
679 f = None
680 try:
681 old_settings = termios.tcgetattr(fd)
682 try:
683 tty.setraw(fd)
684 yield fd
685 finally:
686 termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
687 sys.stdout.flush()
688 if f is not None:
689 f.close()
690 except termios.error:
691 pass
692
693 def getchar(echo):
694 with raw_terminal() as fd:
695 ch = os.read(fd, 32)
696 ch = ch.decode(get_best_encoding(sys.stdin), "replace")
697 if echo and isatty(sys.stdout):
698 sys.stdout.write(ch)
699 _translate_ch_to_exc(ch)
700 return ch
701
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/click/_termui_impl.py b/src/click/_termui_impl.py
--- a/src/click/_termui_impl.py
+++ b/src/click/_termui_impl.py
@@ -549,11 +549,12 @@
def open_url(url, wait=False, locate=False):
import subprocess
- def _unquote_file(url):
- import urllib
+ def _unquote_file(url: str) -> str:
+ from urllib.parse import unquote
if url.startswith("file://"):
- url = urllib.unquote(url[7:])
+ url = unquote(url[7:])
+
return url
if sys.platform == "darwin":
| {"golden_diff": "diff --git a/src/click/_termui_impl.py b/src/click/_termui_impl.py\n--- a/src/click/_termui_impl.py\n+++ b/src/click/_termui_impl.py\n@@ -549,11 +549,12 @@\n def open_url(url, wait=False, locate=False):\n import subprocess\n \n- def _unquote_file(url):\n- import urllib\n+ def _unquote_file(url: str) -> str:\n+ from urllib.parse import unquote\n \n if url.startswith(\"file://\"):\n- url = urllib.unquote(url[7:])\n+ url = unquote(url[7:])\n+\n return url\n \n if sys.platform == \"darwin\":\n", "issue": "urllib.unquote() no longer exists\nIn [_termui_impl.py](https://github.com/pallets/click/blob/972becff259e4ffcd220a6cad5096f36a89fdd6d/src/click/_termui_impl.py#L556) `urllib.unquote()` is called. But [urllib](https://docs.python.org/3/library/urllib.html) is a package now. Equivalent functionality is available in the urllib.parse module.\n", "before_files": [{"content": "\"\"\"\nThis module contains implementations for the termui module. To keep the\nimport time of Click down, some infrequently used functionality is\nplaced in this module and only imported as needed.\n\"\"\"\nimport contextlib\nimport math\nimport os\nimport sys\nimport time\nfrom gettext import gettext as _\n\nfrom ._compat import _default_text_stdout\nfrom ._compat import CYGWIN\nfrom ._compat import get_best_encoding\nfrom ._compat import isatty\nfrom ._compat import open_stream\nfrom ._compat import strip_ansi\nfrom ._compat import term_len\nfrom ._compat import WIN\nfrom .exceptions import ClickException\nfrom .utils import echo\n\nif os.name == \"nt\":\n BEFORE_BAR = \"\\r\"\n AFTER_BAR = \"\\n\"\nelse:\n BEFORE_BAR = \"\\r\\033[?25l\"\n AFTER_BAR = \"\\033[?25h\\n\"\n\n\ndef _length_hint(obj):\n \"\"\"Returns the length hint of an object.\"\"\"\n try:\n return len(obj)\n except (AttributeError, TypeError):\n try:\n get_hint = type(obj).__length_hint__\n except AttributeError:\n return None\n try:\n hint = get_hint(obj)\n except TypeError:\n return None\n if hint is NotImplemented or not isinstance(hint, int) or hint < 0:\n return None\n return hint\n\n\nclass ProgressBar:\n def __init__(\n self,\n iterable,\n length=None,\n fill_char=\"#\",\n empty_char=\" \",\n bar_template=\"%(bar)s\",\n info_sep=\" \",\n show_eta=True,\n show_percent=None,\n show_pos=False,\n item_show_func=None,\n label=None,\n file=None,\n color=None,\n update_min_steps=1,\n width=30,\n ):\n self.fill_char = fill_char\n self.empty_char = empty_char\n self.bar_template = bar_template\n self.info_sep = info_sep\n self.show_eta = show_eta\n self.show_percent = show_percent\n self.show_pos = show_pos\n self.item_show_func = item_show_func\n self.label = label or \"\"\n if file is None:\n file = _default_text_stdout()\n self.file = file\n self.color = color\n self.update_min_steps = update_min_steps\n self._completed_intervals = 0\n self.width = width\n self.autowidth = width == 0\n\n if length is None:\n length = _length_hint(iterable)\n if iterable is None:\n if length is None:\n raise TypeError(\"iterable or length is required\")\n iterable = range(length)\n self.iter = iter(iterable)\n self.length = length\n self.length_known = length is not None\n self.pos = 0\n self.avg = []\n self.start = self.last_eta = time.time()\n self.eta_known = False\n self.finished = False\n self.max_width = None\n self.entered = False\n self.current_item = None\n self.is_hidden = not isatty(self.file)\n self._last_line = None\n\n def __enter__(self):\n self.entered = True\n self.render_progress()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.render_finish()\n\n def __iter__(self):\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n self.render_progress()\n return self.generator()\n\n def __next__(self):\n # Iteration is defined in terms of a generator function,\n # returned by iter(self); use that to define next(). This works\n # because `self.iter` is an iterable consumed by that generator,\n # so it is re-entry safe. Calling `next(self.generator())`\n # twice works and does \"what you want\".\n return next(iter(self))\n\n def render_finish(self):\n if self.is_hidden:\n return\n self.file.write(AFTER_BAR)\n self.file.flush()\n\n @property\n def pct(self):\n if self.finished:\n return 1.0\n return min(self.pos / (float(self.length) or 1), 1.0)\n\n @property\n def time_per_iteration(self):\n if not self.avg:\n return 0.0\n return sum(self.avg) / float(len(self.avg))\n\n @property\n def eta(self):\n if self.length_known and not self.finished:\n return self.time_per_iteration * (self.length - self.pos)\n return 0.0\n\n def format_eta(self):\n if self.eta_known:\n t = int(self.eta)\n seconds = t % 60\n t //= 60\n minutes = t % 60\n t //= 60\n hours = t % 24\n t //= 24\n if t > 0:\n return f\"{t}d {hours:02}:{minutes:02}:{seconds:02}\"\n else:\n return f\"{hours:02}:{minutes:02}:{seconds:02}\"\n return \"\"\n\n def format_pos(self):\n pos = str(self.pos)\n if self.length_known:\n pos += f\"/{self.length}\"\n return pos\n\n def format_pct(self):\n return f\"{int(self.pct * 100): 4}%\"[1:]\n\n def format_bar(self):\n if self.length_known:\n bar_length = int(self.pct * self.width)\n bar = self.fill_char * bar_length\n bar += self.empty_char * (self.width - bar_length)\n elif self.finished:\n bar = self.fill_char * self.width\n else:\n bar = list(self.empty_char * (self.width or 1))\n if self.time_per_iteration != 0:\n bar[\n int(\n (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)\n * self.width\n )\n ] = self.fill_char\n bar = \"\".join(bar)\n return bar\n\n def format_progress_line(self):\n show_percent = self.show_percent\n\n info_bits = []\n if self.length_known and show_percent is None:\n show_percent = not self.show_pos\n\n if self.show_pos:\n info_bits.append(self.format_pos())\n if show_percent:\n info_bits.append(self.format_pct())\n if self.show_eta and self.eta_known and not self.finished:\n info_bits.append(self.format_eta())\n if self.item_show_func is not None:\n item_info = self.item_show_func(self.current_item)\n if item_info is not None:\n info_bits.append(item_info)\n\n return (\n self.bar_template\n % {\n \"label\": self.label,\n \"bar\": self.format_bar(),\n \"info\": self.info_sep.join(info_bits),\n }\n ).rstrip()\n\n def render_progress(self):\n import shutil\n\n if self.is_hidden:\n # Only output the label as it changes if the output is not a\n # TTY. Use file=stderr if you expect to be piping stdout.\n if self._last_line != self.label:\n self._last_line = self.label\n echo(self.label, file=self.file, color=self.color)\n\n return\n\n buf = []\n # Update width in case the terminal has been resized\n if self.autowidth:\n old_width = self.width\n self.width = 0\n clutter_length = term_len(self.format_progress_line())\n new_width = max(0, shutil.get_terminal_size().columns - clutter_length)\n if new_width < old_width:\n buf.append(BEFORE_BAR)\n buf.append(\" \" * self.max_width)\n self.max_width = new_width\n self.width = new_width\n\n clear_width = self.width\n if self.max_width is not None:\n clear_width = self.max_width\n\n buf.append(BEFORE_BAR)\n line = self.format_progress_line()\n line_len = term_len(line)\n if self.max_width is None or self.max_width < line_len:\n self.max_width = line_len\n\n buf.append(line)\n buf.append(\" \" * (clear_width - line_len))\n line = \"\".join(buf)\n # Render the line only if it changed.\n\n if line != self._last_line:\n self._last_line = line\n echo(line, file=self.file, color=self.color, nl=False)\n self.file.flush()\n\n def make_step(self, n_steps):\n self.pos += n_steps\n if self.length_known and self.pos >= self.length:\n self.finished = True\n\n if (time.time() - self.last_eta) < 1.0:\n return\n\n self.last_eta = time.time()\n\n # self.avg is a rolling list of length <= 7 of steps where steps are\n # defined as time elapsed divided by the total progress through\n # self.length.\n if self.pos:\n step = (time.time() - self.start) / self.pos\n else:\n step = time.time() - self.start\n\n self.avg = self.avg[-6:] + [step]\n\n self.eta_known = self.length_known\n\n def update(self, n_steps, current_item=None):\n \"\"\"Update the progress bar by advancing a specified number of\n steps, and optionally set the ``current_item`` for this new\n position.\n\n :param n_steps: Number of steps to advance.\n :param current_item: Optional item to set as ``current_item``\n for the updated position.\n\n .. versionchanged:: 8.0\n Added the ``current_item`` optional parameter.\n\n .. versionchanged:: 8.0\n Only render when the number of steps meets the\n ``update_min_steps`` threshold.\n \"\"\"\n if current_item is not None:\n self.current_item = current_item\n\n self._completed_intervals += n_steps\n\n if self._completed_intervals >= self.update_min_steps:\n self.make_step(self._completed_intervals)\n self.render_progress()\n self._completed_intervals = 0\n\n def finish(self):\n self.eta_known = 0\n self.current_item = None\n self.finished = True\n\n def generator(self):\n \"\"\"Return a generator which yields the items added to the bar\n during construction, and updates the progress bar *after* the\n yielded block returns.\n \"\"\"\n # WARNING: the iterator interface for `ProgressBar` relies on\n # this and only works because this is a simple generator which\n # doesn't create or manage additional state. If this function\n # changes, the impact should be evaluated both against\n # `iter(bar)` and `next(bar)`. `next()` in particular may call\n # `self.generator()` repeatedly, and this must remain safe in\n # order for that interface to work.\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n\n if self.is_hidden:\n yield from self.iter\n else:\n for rv in self.iter:\n self.current_item = rv\n\n # This allows show_item_func to be updated before the\n # item is processed. Only trigger at the beginning of\n # the update interval.\n if self._completed_intervals == 0:\n self.render_progress()\n\n yield rv\n self.update(1)\n\n self.finish()\n self.render_progress()\n\n\ndef pager(generator, color=None):\n \"\"\"Decide what method to use for paging through text.\"\"\"\n stdout = _default_text_stdout()\n if not isatty(sys.stdin) or not isatty(stdout):\n return _nullpager(stdout, generator, color)\n pager_cmd = (os.environ.get(\"PAGER\", None) or \"\").strip()\n if pager_cmd:\n if WIN:\n return _tempfilepager(generator, pager_cmd, color)\n return _pipepager(generator, pager_cmd, color)\n if os.environ.get(\"TERM\") in (\"dumb\", \"emacs\"):\n return _nullpager(stdout, generator, color)\n if WIN or sys.platform.startswith(\"os2\"):\n return _tempfilepager(generator, \"more <\", color)\n if hasattr(os, \"system\") and os.system(\"(less) 2>/dev/null\") == 0:\n return _pipepager(generator, \"less\", color)\n\n import tempfile\n\n fd, filename = tempfile.mkstemp()\n os.close(fd)\n try:\n if hasattr(os, \"system\") and os.system(f'more \"{filename}\"') == 0:\n return _pipepager(generator, \"more\", color)\n return _nullpager(stdout, generator, color)\n finally:\n os.unlink(filename)\n\n\ndef _pipepager(generator, cmd, color):\n \"\"\"Page through text by feeding it to another program. Invoking a\n pager through this might support colors.\n \"\"\"\n import subprocess\n\n env = dict(os.environ)\n\n # If we're piping to less we might support colors under the\n # condition that\n cmd_detail = cmd.rsplit(\"/\", 1)[-1].split()\n if color is None and cmd_detail[0] == \"less\":\n less_flags = f\"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}\"\n if not less_flags:\n env[\"LESS\"] = \"-R\"\n color = True\n elif \"r\" in less_flags or \"R\" in less_flags:\n color = True\n\n c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)\n encoding = get_best_encoding(c.stdin)\n try:\n for text in generator:\n if not color:\n text = strip_ansi(text)\n\n c.stdin.write(text.encode(encoding, \"replace\"))\n except (OSError, KeyboardInterrupt):\n pass\n else:\n c.stdin.close()\n\n # Less doesn't respect ^C, but catches it for its own UI purposes (aborting\n # search or other commands inside less).\n #\n # That means when the user hits ^C, the parent process (click) terminates,\n # but less is still alive, paging the output and messing up the terminal.\n #\n # If the user wants to make the pager exit on ^C, they should set\n # `LESS='-K'`. It's not our decision to make.\n while True:\n try:\n c.wait()\n except KeyboardInterrupt:\n pass\n else:\n break\n\n\ndef _tempfilepager(generator, cmd, color):\n \"\"\"Page through text by invoking a program on a temporary file.\"\"\"\n import tempfile\n\n filename = tempfile.mkstemp()\n # TODO: This never terminates if the passed generator never terminates.\n text = \"\".join(generator)\n if not color:\n text = strip_ansi(text)\n encoding = get_best_encoding(sys.stdout)\n with open_stream(filename, \"wb\")[0] as f:\n f.write(text.encode(encoding))\n try:\n os.system(f'{cmd} \"{filename}\"')\n finally:\n os.unlink(filename)\n\n\ndef _nullpager(stream, generator, color):\n \"\"\"Simply print unformatted text. This is the ultimate fallback.\"\"\"\n for text in generator:\n if not color:\n text = strip_ansi(text)\n stream.write(text)\n\n\nclass Editor:\n def __init__(self, editor=None, env=None, require_save=True, extension=\".txt\"):\n self.editor = editor\n self.env = env\n self.require_save = require_save\n self.extension = extension\n\n def get_editor(self):\n if self.editor is not None:\n return self.editor\n for key in \"VISUAL\", \"EDITOR\":\n rv = os.environ.get(key)\n if rv:\n return rv\n if WIN:\n return \"notepad\"\n for editor in \"sensible-editor\", \"vim\", \"nano\":\n if os.system(f\"which {editor} >/dev/null 2>&1\") == 0:\n return editor\n return \"vi\"\n\n def edit_file(self, filename):\n import subprocess\n\n editor = self.get_editor()\n if self.env:\n environ = os.environ.copy()\n environ.update(self.env)\n else:\n environ = None\n try:\n c = subprocess.Popen(f'{editor} \"{filename}\"', env=environ, shell=True)\n exit_code = c.wait()\n if exit_code != 0:\n raise ClickException(\n _(\"{editor}: Editing failed\").format(editor=editor)\n )\n except OSError as e:\n raise ClickException(\n _(\"{editor}: Editing failed: {e}\").format(editor=editor, e=e)\n )\n\n def edit(self, text):\n import tempfile\n\n if not text:\n text = \"\"\n\n is_bytes = isinstance(text, (bytes, bytearray))\n\n if not is_bytes:\n if text and not text.endswith(\"\\n\"):\n text += \"\\n\"\n\n if WIN:\n text = text.replace(\"\\n\", \"\\r\\n\").encode(\"utf-8-sig\")\n else:\n text = text.encode(\"utf-8\")\n\n fd, name = tempfile.mkstemp(prefix=\"editor-\", suffix=self.extension)\n\n try:\n with os.fdopen(fd, \"wb\") as f:\n f.write(text)\n\n # If the filesystem resolution is 1 second, like Mac OS\n # 10.12 Extended, or 2 seconds, like FAT32, and the editor\n # closes very fast, require_save can fail. Set the modified\n # time to be 2 seconds in the past to work around this.\n os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))\n # Depending on the resolution, the exact value might not be\n # recorded, so get the new recorded value.\n timestamp = os.path.getmtime(name)\n\n self.edit_file(name)\n\n if self.require_save and os.path.getmtime(name) == timestamp:\n return None\n\n with open(name, \"rb\") as f:\n rv = f.read()\n\n if is_bytes:\n return rv\n\n return rv.decode(\"utf-8-sig\").replace(\"\\r\\n\", \"\\n\")\n finally:\n os.unlink(name)\n\n\ndef open_url(url, wait=False, locate=False):\n import subprocess\n\n def _unquote_file(url):\n import urllib\n\n if url.startswith(\"file://\"):\n url = urllib.unquote(url[7:])\n return url\n\n if sys.platform == \"darwin\":\n args = [\"open\"]\n if wait:\n args.append(\"-W\")\n if locate:\n args.append(\"-R\")\n args.append(_unquote_file(url))\n null = open(\"/dev/null\", \"w\")\n try:\n return subprocess.Popen(args, stderr=null).wait()\n finally:\n null.close()\n elif WIN:\n if locate:\n url = _unquote_file(url.replace('\"', \"\"))\n args = f'explorer /select,\"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"/WAIT\" if wait else \"\"\n args = f'start {wait} \"\" \"{url}\"'\n return os.system(args)\n elif CYGWIN:\n if locate:\n url = os.path.dirname(_unquote_file(url).replace('\"', \"\"))\n args = f'cygstart \"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"-w\" if wait else \"\"\n args = f'cygstart {wait} \"{url}\"'\n return os.system(args)\n\n try:\n if locate:\n url = os.path.dirname(_unquote_file(url)) or \".\"\n else:\n url = _unquote_file(url)\n c = subprocess.Popen([\"xdg-open\", url])\n if wait:\n return c.wait()\n return 0\n except OSError:\n if url.startswith((\"http://\", \"https://\")) and not locate and not wait:\n import webbrowser\n\n webbrowser.open(url)\n return 0\n return 1\n\n\ndef _translate_ch_to_exc(ch):\n if ch == \"\\x03\":\n raise KeyboardInterrupt()\n if ch == \"\\x04\" and not WIN: # Unix-like, Ctrl+D\n raise EOFError()\n if ch == \"\\x1a\" and WIN: # Windows, Ctrl+Z\n raise EOFError()\n\n\nif WIN:\n import msvcrt\n\n @contextlib.contextmanager\n def raw_terminal():\n yield\n\n def getchar(echo):\n # The function `getch` will return a bytes object corresponding to\n # the pressed character. Since Windows 10 build 1803, it will also\n # return \\x00 when called a second time after pressing a regular key.\n #\n # `getwch` does not share this probably-bugged behavior. Moreover, it\n # returns a Unicode object by default, which is what we want.\n #\n # Either of these functions will return \\x00 or \\xe0 to indicate\n # a special key, and you need to call the same function again to get\n # the \"rest\" of the code. The fun part is that \\u00e0 is\n # \"latin small letter a with grave\", so if you type that on a French\n # keyboard, you _also_ get a \\xe0.\n # E.g., consider the Up arrow. This returns \\xe0 and then \\x48. The\n # resulting Unicode string reads as \"a with grave\" + \"capital H\".\n # This is indistinguishable from when the user actually types\n # \"a with grave\" and then \"capital H\".\n #\n # When \\xe0 is returned, we assume it's part of a special-key sequence\n # and call `getwch` again, but that means that when the user types\n # the \\u00e0 character, `getchar` doesn't return until a second\n # character is typed.\n # The alternative is returning immediately, but that would mess up\n # cross-platform handling of arrow keys and others that start with\n # \\xe0. Another option is using `getch`, but then we can't reliably\n # read non-ASCII characters, because return values of `getch` are\n # limited to the current 8-bit codepage.\n #\n # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`\n # is doing the right thing in more situations than with `getch`.\n if echo:\n func = msvcrt.getwche\n else:\n func = msvcrt.getwch\n\n rv = func()\n if rv in (\"\\x00\", \"\\xe0\"):\n # \\x00 and \\xe0 are control characters that indicate special key,\n # see above.\n rv += func()\n _translate_ch_to_exc(rv)\n return rv\n\n\nelse:\n import tty\n import termios\n\n @contextlib.contextmanager\n def raw_terminal():\n if not isatty(sys.stdin):\n f = open(\"/dev/tty\")\n fd = f.fileno()\n else:\n fd = sys.stdin.fileno()\n f = None\n try:\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n yield fd\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n sys.stdout.flush()\n if f is not None:\n f.close()\n except termios.error:\n pass\n\n def getchar(echo):\n with raw_terminal() as fd:\n ch = os.read(fd, 32)\n ch = ch.decode(get_best_encoding(sys.stdin), \"replace\")\n if echo and isatty(sys.stdout):\n sys.stdout.write(ch)\n _translate_ch_to_exc(ch)\n return ch\n", "path": "src/click/_termui_impl.py"}], "after_files": [{"content": "\"\"\"\nThis module contains implementations for the termui module. To keep the\nimport time of Click down, some infrequently used functionality is\nplaced in this module and only imported as needed.\n\"\"\"\nimport contextlib\nimport math\nimport os\nimport sys\nimport time\nfrom gettext import gettext as _\n\nfrom ._compat import _default_text_stdout\nfrom ._compat import CYGWIN\nfrom ._compat import get_best_encoding\nfrom ._compat import isatty\nfrom ._compat import open_stream\nfrom ._compat import strip_ansi\nfrom ._compat import term_len\nfrom ._compat import WIN\nfrom .exceptions import ClickException\nfrom .utils import echo\n\nif os.name == \"nt\":\n BEFORE_BAR = \"\\r\"\n AFTER_BAR = \"\\n\"\nelse:\n BEFORE_BAR = \"\\r\\033[?25l\"\n AFTER_BAR = \"\\033[?25h\\n\"\n\n\ndef _length_hint(obj):\n \"\"\"Returns the length hint of an object.\"\"\"\n try:\n return len(obj)\n except (AttributeError, TypeError):\n try:\n get_hint = type(obj).__length_hint__\n except AttributeError:\n return None\n try:\n hint = get_hint(obj)\n except TypeError:\n return None\n if hint is NotImplemented or not isinstance(hint, int) or hint < 0:\n return None\n return hint\n\n\nclass ProgressBar:\n def __init__(\n self,\n iterable,\n length=None,\n fill_char=\"#\",\n empty_char=\" \",\n bar_template=\"%(bar)s\",\n info_sep=\" \",\n show_eta=True,\n show_percent=None,\n show_pos=False,\n item_show_func=None,\n label=None,\n file=None,\n color=None,\n update_min_steps=1,\n width=30,\n ):\n self.fill_char = fill_char\n self.empty_char = empty_char\n self.bar_template = bar_template\n self.info_sep = info_sep\n self.show_eta = show_eta\n self.show_percent = show_percent\n self.show_pos = show_pos\n self.item_show_func = item_show_func\n self.label = label or \"\"\n if file is None:\n file = _default_text_stdout()\n self.file = file\n self.color = color\n self.update_min_steps = update_min_steps\n self._completed_intervals = 0\n self.width = width\n self.autowidth = width == 0\n\n if length is None:\n length = _length_hint(iterable)\n if iterable is None:\n if length is None:\n raise TypeError(\"iterable or length is required\")\n iterable = range(length)\n self.iter = iter(iterable)\n self.length = length\n self.length_known = length is not None\n self.pos = 0\n self.avg = []\n self.start = self.last_eta = time.time()\n self.eta_known = False\n self.finished = False\n self.max_width = None\n self.entered = False\n self.current_item = None\n self.is_hidden = not isatty(self.file)\n self._last_line = None\n\n def __enter__(self):\n self.entered = True\n self.render_progress()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.render_finish()\n\n def __iter__(self):\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n self.render_progress()\n return self.generator()\n\n def __next__(self):\n # Iteration is defined in terms of a generator function,\n # returned by iter(self); use that to define next(). This works\n # because `self.iter` is an iterable consumed by that generator,\n # so it is re-entry safe. Calling `next(self.generator())`\n # twice works and does \"what you want\".\n return next(iter(self))\n\n def render_finish(self):\n if self.is_hidden:\n return\n self.file.write(AFTER_BAR)\n self.file.flush()\n\n @property\n def pct(self):\n if self.finished:\n return 1.0\n return min(self.pos / (float(self.length) or 1), 1.0)\n\n @property\n def time_per_iteration(self):\n if not self.avg:\n return 0.0\n return sum(self.avg) / float(len(self.avg))\n\n @property\n def eta(self):\n if self.length_known and not self.finished:\n return self.time_per_iteration * (self.length - self.pos)\n return 0.0\n\n def format_eta(self):\n if self.eta_known:\n t = int(self.eta)\n seconds = t % 60\n t //= 60\n minutes = t % 60\n t //= 60\n hours = t % 24\n t //= 24\n if t > 0:\n return f\"{t}d {hours:02}:{minutes:02}:{seconds:02}\"\n else:\n return f\"{hours:02}:{minutes:02}:{seconds:02}\"\n return \"\"\n\n def format_pos(self):\n pos = str(self.pos)\n if self.length_known:\n pos += f\"/{self.length}\"\n return pos\n\n def format_pct(self):\n return f\"{int(self.pct * 100): 4}%\"[1:]\n\n def format_bar(self):\n if self.length_known:\n bar_length = int(self.pct * self.width)\n bar = self.fill_char * bar_length\n bar += self.empty_char * (self.width - bar_length)\n elif self.finished:\n bar = self.fill_char * self.width\n else:\n bar = list(self.empty_char * (self.width or 1))\n if self.time_per_iteration != 0:\n bar[\n int(\n (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)\n * self.width\n )\n ] = self.fill_char\n bar = \"\".join(bar)\n return bar\n\n def format_progress_line(self):\n show_percent = self.show_percent\n\n info_bits = []\n if self.length_known and show_percent is None:\n show_percent = not self.show_pos\n\n if self.show_pos:\n info_bits.append(self.format_pos())\n if show_percent:\n info_bits.append(self.format_pct())\n if self.show_eta and self.eta_known and not self.finished:\n info_bits.append(self.format_eta())\n if self.item_show_func is not None:\n item_info = self.item_show_func(self.current_item)\n if item_info is not None:\n info_bits.append(item_info)\n\n return (\n self.bar_template\n % {\n \"label\": self.label,\n \"bar\": self.format_bar(),\n \"info\": self.info_sep.join(info_bits),\n }\n ).rstrip()\n\n def render_progress(self):\n import shutil\n\n if self.is_hidden:\n # Only output the label as it changes if the output is not a\n # TTY. Use file=stderr if you expect to be piping stdout.\n if self._last_line != self.label:\n self._last_line = self.label\n echo(self.label, file=self.file, color=self.color)\n\n return\n\n buf = []\n # Update width in case the terminal has been resized\n if self.autowidth:\n old_width = self.width\n self.width = 0\n clutter_length = term_len(self.format_progress_line())\n new_width = max(0, shutil.get_terminal_size().columns - clutter_length)\n if new_width < old_width:\n buf.append(BEFORE_BAR)\n buf.append(\" \" * self.max_width)\n self.max_width = new_width\n self.width = new_width\n\n clear_width = self.width\n if self.max_width is not None:\n clear_width = self.max_width\n\n buf.append(BEFORE_BAR)\n line = self.format_progress_line()\n line_len = term_len(line)\n if self.max_width is None or self.max_width < line_len:\n self.max_width = line_len\n\n buf.append(line)\n buf.append(\" \" * (clear_width - line_len))\n line = \"\".join(buf)\n # Render the line only if it changed.\n\n if line != self._last_line:\n self._last_line = line\n echo(line, file=self.file, color=self.color, nl=False)\n self.file.flush()\n\n def make_step(self, n_steps):\n self.pos += n_steps\n if self.length_known and self.pos >= self.length:\n self.finished = True\n\n if (time.time() - self.last_eta) < 1.0:\n return\n\n self.last_eta = time.time()\n\n # self.avg is a rolling list of length <= 7 of steps where steps are\n # defined as time elapsed divided by the total progress through\n # self.length.\n if self.pos:\n step = (time.time() - self.start) / self.pos\n else:\n step = time.time() - self.start\n\n self.avg = self.avg[-6:] + [step]\n\n self.eta_known = self.length_known\n\n def update(self, n_steps, current_item=None):\n \"\"\"Update the progress bar by advancing a specified number of\n steps, and optionally set the ``current_item`` for this new\n position.\n\n :param n_steps: Number of steps to advance.\n :param current_item: Optional item to set as ``current_item``\n for the updated position.\n\n .. versionchanged:: 8.0\n Added the ``current_item`` optional parameter.\n\n .. versionchanged:: 8.0\n Only render when the number of steps meets the\n ``update_min_steps`` threshold.\n \"\"\"\n if current_item is not None:\n self.current_item = current_item\n\n self._completed_intervals += n_steps\n\n if self._completed_intervals >= self.update_min_steps:\n self.make_step(self._completed_intervals)\n self.render_progress()\n self._completed_intervals = 0\n\n def finish(self):\n self.eta_known = 0\n self.current_item = None\n self.finished = True\n\n def generator(self):\n \"\"\"Return a generator which yields the items added to the bar\n during construction, and updates the progress bar *after* the\n yielded block returns.\n \"\"\"\n # WARNING: the iterator interface for `ProgressBar` relies on\n # this and only works because this is a simple generator which\n # doesn't create or manage additional state. If this function\n # changes, the impact should be evaluated both against\n # `iter(bar)` and `next(bar)`. `next()` in particular may call\n # `self.generator()` repeatedly, and this must remain safe in\n # order for that interface to work.\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n\n if self.is_hidden:\n yield from self.iter\n else:\n for rv in self.iter:\n self.current_item = rv\n\n # This allows show_item_func to be updated before the\n # item is processed. Only trigger at the beginning of\n # the update interval.\n if self._completed_intervals == 0:\n self.render_progress()\n\n yield rv\n self.update(1)\n\n self.finish()\n self.render_progress()\n\n\ndef pager(generator, color=None):\n \"\"\"Decide what method to use for paging through text.\"\"\"\n stdout = _default_text_stdout()\n if not isatty(sys.stdin) or not isatty(stdout):\n return _nullpager(stdout, generator, color)\n pager_cmd = (os.environ.get(\"PAGER\", None) or \"\").strip()\n if pager_cmd:\n if WIN:\n return _tempfilepager(generator, pager_cmd, color)\n return _pipepager(generator, pager_cmd, color)\n if os.environ.get(\"TERM\") in (\"dumb\", \"emacs\"):\n return _nullpager(stdout, generator, color)\n if WIN or sys.platform.startswith(\"os2\"):\n return _tempfilepager(generator, \"more <\", color)\n if hasattr(os, \"system\") and os.system(\"(less) 2>/dev/null\") == 0:\n return _pipepager(generator, \"less\", color)\n\n import tempfile\n\n fd, filename = tempfile.mkstemp()\n os.close(fd)\n try:\n if hasattr(os, \"system\") and os.system(f'more \"{filename}\"') == 0:\n return _pipepager(generator, \"more\", color)\n return _nullpager(stdout, generator, color)\n finally:\n os.unlink(filename)\n\n\ndef _pipepager(generator, cmd, color):\n \"\"\"Page through text by feeding it to another program. Invoking a\n pager through this might support colors.\n \"\"\"\n import subprocess\n\n env = dict(os.environ)\n\n # If we're piping to less we might support colors under the\n # condition that\n cmd_detail = cmd.rsplit(\"/\", 1)[-1].split()\n if color is None and cmd_detail[0] == \"less\":\n less_flags = f\"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}\"\n if not less_flags:\n env[\"LESS\"] = \"-R\"\n color = True\n elif \"r\" in less_flags or \"R\" in less_flags:\n color = True\n\n c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)\n encoding = get_best_encoding(c.stdin)\n try:\n for text in generator:\n if not color:\n text = strip_ansi(text)\n\n c.stdin.write(text.encode(encoding, \"replace\"))\n except (OSError, KeyboardInterrupt):\n pass\n else:\n c.stdin.close()\n\n # Less doesn't respect ^C, but catches it for its own UI purposes (aborting\n # search or other commands inside less).\n #\n # That means when the user hits ^C, the parent process (click) terminates,\n # but less is still alive, paging the output and messing up the terminal.\n #\n # If the user wants to make the pager exit on ^C, they should set\n # `LESS='-K'`. It's not our decision to make.\n while True:\n try:\n c.wait()\n except KeyboardInterrupt:\n pass\n else:\n break\n\n\ndef _tempfilepager(generator, cmd, color):\n \"\"\"Page through text by invoking a program on a temporary file.\"\"\"\n import tempfile\n\n filename = tempfile.mkstemp()\n # TODO: This never terminates if the passed generator never terminates.\n text = \"\".join(generator)\n if not color:\n text = strip_ansi(text)\n encoding = get_best_encoding(sys.stdout)\n with open_stream(filename, \"wb\")[0] as f:\n f.write(text.encode(encoding))\n try:\n os.system(f'{cmd} \"{filename}\"')\n finally:\n os.unlink(filename)\n\n\ndef _nullpager(stream, generator, color):\n \"\"\"Simply print unformatted text. This is the ultimate fallback.\"\"\"\n for text in generator:\n if not color:\n text = strip_ansi(text)\n stream.write(text)\n\n\nclass Editor:\n def __init__(self, editor=None, env=None, require_save=True, extension=\".txt\"):\n self.editor = editor\n self.env = env\n self.require_save = require_save\n self.extension = extension\n\n def get_editor(self):\n if self.editor is not None:\n return self.editor\n for key in \"VISUAL\", \"EDITOR\":\n rv = os.environ.get(key)\n if rv:\n return rv\n if WIN:\n return \"notepad\"\n for editor in \"sensible-editor\", \"vim\", \"nano\":\n if os.system(f\"which {editor} >/dev/null 2>&1\") == 0:\n return editor\n return \"vi\"\n\n def edit_file(self, filename):\n import subprocess\n\n editor = self.get_editor()\n if self.env:\n environ = os.environ.copy()\n environ.update(self.env)\n else:\n environ = None\n try:\n c = subprocess.Popen(f'{editor} \"{filename}\"', env=environ, shell=True)\n exit_code = c.wait()\n if exit_code != 0:\n raise ClickException(\n _(\"{editor}: Editing failed\").format(editor=editor)\n )\n except OSError as e:\n raise ClickException(\n _(\"{editor}: Editing failed: {e}\").format(editor=editor, e=e)\n )\n\n def edit(self, text):\n import tempfile\n\n if not text:\n text = \"\"\n\n is_bytes = isinstance(text, (bytes, bytearray))\n\n if not is_bytes:\n if text and not text.endswith(\"\\n\"):\n text += \"\\n\"\n\n if WIN:\n text = text.replace(\"\\n\", \"\\r\\n\").encode(\"utf-8-sig\")\n else:\n text = text.encode(\"utf-8\")\n\n fd, name = tempfile.mkstemp(prefix=\"editor-\", suffix=self.extension)\n\n try:\n with os.fdopen(fd, \"wb\") as f:\n f.write(text)\n\n # If the filesystem resolution is 1 second, like Mac OS\n # 10.12 Extended, or 2 seconds, like FAT32, and the editor\n # closes very fast, require_save can fail. Set the modified\n # time to be 2 seconds in the past to work around this.\n os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))\n # Depending on the resolution, the exact value might not be\n # recorded, so get the new recorded value.\n timestamp = os.path.getmtime(name)\n\n self.edit_file(name)\n\n if self.require_save and os.path.getmtime(name) == timestamp:\n return None\n\n with open(name, \"rb\") as f:\n rv = f.read()\n\n if is_bytes:\n return rv\n\n return rv.decode(\"utf-8-sig\").replace(\"\\r\\n\", \"\\n\")\n finally:\n os.unlink(name)\n\n\ndef open_url(url, wait=False, locate=False):\n import subprocess\n\n def _unquote_file(url: str) -> str:\n from urllib.parse import unquote\n\n if url.startswith(\"file://\"):\n url = unquote(url[7:])\n\n return url\n\n if sys.platform == \"darwin\":\n args = [\"open\"]\n if wait:\n args.append(\"-W\")\n if locate:\n args.append(\"-R\")\n args.append(_unquote_file(url))\n null = open(\"/dev/null\", \"w\")\n try:\n return subprocess.Popen(args, stderr=null).wait()\n finally:\n null.close()\n elif WIN:\n if locate:\n url = _unquote_file(url.replace('\"', \"\"))\n args = f'explorer /select,\"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"/WAIT\" if wait else \"\"\n args = f'start {wait} \"\" \"{url}\"'\n return os.system(args)\n elif CYGWIN:\n if locate:\n url = os.path.dirname(_unquote_file(url).replace('\"', \"\"))\n args = f'cygstart \"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"-w\" if wait else \"\"\n args = f'cygstart {wait} \"{url}\"'\n return os.system(args)\n\n try:\n if locate:\n url = os.path.dirname(_unquote_file(url)) or \".\"\n else:\n url = _unquote_file(url)\n c = subprocess.Popen([\"xdg-open\", url])\n if wait:\n return c.wait()\n return 0\n except OSError:\n if url.startswith((\"http://\", \"https://\")) and not locate and not wait:\n import webbrowser\n\n webbrowser.open(url)\n return 0\n return 1\n\n\ndef _translate_ch_to_exc(ch):\n if ch == \"\\x03\":\n raise KeyboardInterrupt()\n if ch == \"\\x04\" and not WIN: # Unix-like, Ctrl+D\n raise EOFError()\n if ch == \"\\x1a\" and WIN: # Windows, Ctrl+Z\n raise EOFError()\n\n\nif WIN:\n import msvcrt\n\n @contextlib.contextmanager\n def raw_terminal():\n yield\n\n def getchar(echo):\n # The function `getch` will return a bytes object corresponding to\n # the pressed character. Since Windows 10 build 1803, it will also\n # return \\x00 when called a second time after pressing a regular key.\n #\n # `getwch` does not share this probably-bugged behavior. Moreover, it\n # returns a Unicode object by default, which is what we want.\n #\n # Either of these functions will return \\x00 or \\xe0 to indicate\n # a special key, and you need to call the same function again to get\n # the \"rest\" of the code. The fun part is that \\u00e0 is\n # \"latin small letter a with grave\", so if you type that on a French\n # keyboard, you _also_ get a \\xe0.\n # E.g., consider the Up arrow. This returns \\xe0 and then \\x48. The\n # resulting Unicode string reads as \"a with grave\" + \"capital H\".\n # This is indistinguishable from when the user actually types\n # \"a with grave\" and then \"capital H\".\n #\n # When \\xe0 is returned, we assume it's part of a special-key sequence\n # and call `getwch` again, but that means that when the user types\n # the \\u00e0 character, `getchar` doesn't return until a second\n # character is typed.\n # The alternative is returning immediately, but that would mess up\n # cross-platform handling of arrow keys and others that start with\n # \\xe0. Another option is using `getch`, but then we can't reliably\n # read non-ASCII characters, because return values of `getch` are\n # limited to the current 8-bit codepage.\n #\n # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`\n # is doing the right thing in more situations than with `getch`.\n if echo:\n func = msvcrt.getwche\n else:\n func = msvcrt.getwch\n\n rv = func()\n if rv in (\"\\x00\", \"\\xe0\"):\n # \\x00 and \\xe0 are control characters that indicate special key,\n # see above.\n rv += func()\n _translate_ch_to_exc(rv)\n return rv\n\n\nelse:\n import tty\n import termios\n\n @contextlib.contextmanager\n def raw_terminal():\n if not isatty(sys.stdin):\n f = open(\"/dev/tty\")\n fd = f.fileno()\n else:\n fd = sys.stdin.fileno()\n f = None\n try:\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n yield fd\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n sys.stdout.flush()\n if f is not None:\n f.close()\n except termios.error:\n pass\n\n def getchar(echo):\n with raw_terminal() as fd:\n ch = os.read(fd, 32)\n ch = ch.decode(get_best_encoding(sys.stdin), \"replace\")\n if echo and isatty(sys.stdout):\n sys.stdout.write(ch)\n _translate_ch_to_exc(ch)\n return ch\n", "path": "src/click/_termui_impl.py"}]} |
gh_patches_debug_1466 | rasdani/github-patches | git_diff | google__turbinia-637 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash when running locally
```
$ turbiniactl -t SSHDAnalysisTask -R rawdisk -l dfchecklist.img
[INFO] Turbinia version: 20190819
[INFO] Creating request 5d50f281e7fc4a24bd88993ad8bb34a9 with evidence dfchecklist.img
[INFO] Run command "turbiniactl status -r 5d50f281e7fc4a24bd88993ad8bb34a9" to see the status of this request and associated tasks
[INFO] Running Task SSHDAnalysisTask locally
Traceback (most recent call last):
File "/usr/local/google/home/romaing/venvs/turbinia/bin/turbiniactl", line 11, in <module>
load_entry_point('turbinia==20190819', 'console_scripts', 'turbiniactl')()
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/turbiniactl.py", line 813, in main
result = client.run_local_task(args.task, request)
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/client.py", line 1020, in run_local_task
result = task.run_wrapper(request.evidence[0])
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/workers/__init__.py", line 705, in run_wrapper
evidence = evidence_decode(evidence)
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/evidence.py", line 56, in evidence_decode
raise TurbiniaException(
turbinia.TurbiniaException: Evidence_dict is not a dictionary, type is <class 'turbinia.evidence.RawDisk'>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/client.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2017 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Client objects for Turbinia."""
16
17 from __future__ import unicode_literals
18
19 from collections import defaultdict
20 from datetime import datetime
21 from datetime import timedelta
22
23 import httplib2
24 import json
25 import logging
26 from operator import itemgetter
27 from operator import attrgetter
28 import os
29 import stat
30 import time
31 import subprocess
32 import codecs
33
34 from google import auth
35 from prometheus_client import start_http_server
36 from turbinia import config
37 from turbinia.config import logger
38 from turbinia.config import DATETIME_FORMAT
39 from turbinia import task_manager
40 from turbinia import TurbiniaException
41 from turbinia.lib import text_formatter as fmt
42 from turbinia.lib import docker_manager
43 from turbinia.jobs import manager as job_manager
44 from turbinia.workers import Priority
45 from turbinia.workers.artifact import FileArtifactExtractionTask
46 from turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask
47 from turbinia.workers.analysis.jenkins import JenkinsAnalysisTask
48 from turbinia.workers.analysis.jupyter import JupyterAnalysisTask
49 from turbinia.workers.finalize_request import FinalizeRequestTask
50 from turbinia.workers.docker import DockerContainersEnumerationTask
51 from turbinia.workers.grep import GrepTask
52 from turbinia.workers.hadoop import HadoopAnalysisTask
53 from turbinia.workers.hindsight import HindsightTask
54 from turbinia.workers.partitions import PartitionEnumerationTask
55 from turbinia.workers.plaso import PlasoTask
56 from turbinia.workers.psort import PsortTask
57 from turbinia.workers.redis import RedisAnalysisTask
58 from turbinia.workers.sshd import SSHDAnalysisTask
59 from turbinia.workers.strings import StringsAsciiTask
60 from turbinia.workers.strings import StringsUnicodeTask
61 from turbinia.workers.tomcat import TomcatAnalysisTask
62 from turbinia.workers.volatility import VolatilityTask
63 from turbinia.workers.worker_stat import StatTask
64 from turbinia.workers.binary_extractor import BinaryExtractorTask
65 from turbinia.workers.bulk_extractor import BulkExtractorTask
66 from turbinia.workers.photorec import PhotorecTask
67
68 MAX_RETRIES = 10
69 RETRY_SLEEP = 60
70
71 # TODO(aarontp): Remove this map after
72 # https://github.com/google/turbinia/issues/278 is fixed.
73 TASK_MAP = {
74 'fileartifactextractiontask': FileArtifactExtractionTask,
75 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,
76 'finalizerequesttask': FinalizeRequestTask,
77 'jenkinsanalysistask': JenkinsAnalysisTask,
78 'JupyterAnalysisTask': JupyterAnalysisTask,
79 'greptask': GrepTask,
80 'hadoopanalysistask': HadoopAnalysisTask,
81 'hindsighttask': HindsightTask,
82 'partitionenumerationtask': PartitionEnumerationTask,
83 'plasotask': PlasoTask,
84 'psorttask': PsortTask,
85 'redisanalysistask': RedisAnalysisTask,
86 'sshdanalysistask': SSHDAnalysisTask,
87 'stringsasciitask': StringsAsciiTask,
88 'stringsunicodetask': StringsUnicodeTask,
89 'tomcatanalysistask': TomcatAnalysisTask,
90 'volatilitytask': VolatilityTask,
91 'stattask': StatTask,
92 'binaryextractor': BinaryExtractorTask,
93 'bulkextractortask': BulkExtractorTask,
94 'dockertask': DockerContainersEnumerationTask,
95 'photorectask': PhotorecTask
96 }
97
98 config.LoadConfig()
99 if config.TASK_MANAGER.lower() == 'psq':
100 import psq
101
102 from google.cloud import exceptions
103 from google.cloud import datastore
104 from google.cloud import pubsub
105
106 from libcloudforensics.providers.gcp.internal import function as gcp_function
107 elif config.TASK_MANAGER.lower() == 'celery':
108 from turbinia.state_manager import RedisStateManager
109
110 log = logging.getLogger('turbinia')
111 logger.setup()
112
113
114 def get_turbinia_client(run_local=False):
115 """Return Turbinia client based on config.
116
117 Returns:
118 Initialized BaseTurbiniaClient or TurbiniaCeleryClient object.
119 """
120 config.LoadConfig()
121 # pylint: disable=no-else-return
122 if config.TASK_MANAGER.lower() == 'psq':
123 return BaseTurbiniaClient(run_local=run_local)
124 elif config.TASK_MANAGER.lower() == 'celery':
125 return TurbiniaCeleryClient(run_local=run_local)
126 else:
127 msg = 'Task Manager type "{0:s}" not implemented'.format(
128 config.TASK_MANAGER)
129 raise TurbiniaException(msg)
130
131
132 def check_docker_dependencies(dependencies):
133 """Checks docker dependencies.
134
135 Args:
136 dependencies(dict): dictionary of dependencies to check for.
137
138 Raises:
139 TurbiniaException: If dependency is not met.
140 """
141 #TODO(wyassine): may run into issues down the line when a docker image
142 # does not have bash or which installed. (no linux fs layer).
143 log.info('Performing docker dependency check.')
144 job_names = list(job_manager.JobsManager.GetJobNames())
145 images = docker_manager.DockerManager().list_images(return_filter='short_id')
146
147 # Iterate through list of jobs
148 for job, values in dependencies.items():
149 if job not in job_names:
150 log.warning(
151 'The job {0:s} was not found or has been disabled. Skipping '
152 'dependency check...'.format(job))
153 continue
154 docker_image = values.get('docker_image')
155 # short id only pulls the first 10 characters of image id.
156 if docker_image and len(docker_image) > 10:
157 docker_image = docker_image[0:10]
158
159 if docker_image in images:
160 for program in values['programs']:
161 cmd = 'type {0:s}'.format(program)
162 stdout, stderr, ret = docker_manager.ContainerManager(
163 values['docker_image']).execute_container(cmd, shell=True)
164 if ret != 0:
165 raise TurbiniaException(
166 'Job dependency {0:s} not found for job {1:s}. Please install '
167 'the dependency for the container or disable the job.'.format(
168 program, job))
169 job_manager.JobsManager.RegisterDockerImage(job, values['docker_image'])
170 elif docker_image:
171 raise TurbiniaException(
172 'Docker image {0:s} was not found for the job {1:s}. Please '
173 'update the config with the correct image id'.format(
174 values['docker_image'], job))
175
176
177 def check_system_dependencies(dependencies):
178 """Checks system dependencies.
179
180 Args:
181 dependencies(dict): dictionary of dependencies to check for.
182
183 Raises:
184 TurbiniaException: If dependency is not met.
185 """
186 log.info('Performing system dependency check.')
187 job_names = list(job_manager.JobsManager.GetJobNames())
188
189 # Iterate through list of jobs
190 for job, values in dependencies.items():
191 if job not in job_names:
192 log.warning(
193 'The job {0:s} was not found or has been disabled. Skipping '
194 'dependency check...'.format(job))
195 continue
196 elif not values.get('docker_image'):
197 for program in values['programs']:
198 cmd = 'type {0:s}'.format(program)
199 proc = subprocess.Popen(cmd, shell=True)
200 proc.communicate()
201 ret = proc.returncode
202 if ret != 0:
203 raise TurbiniaException(
204 'Job dependency {0:s} not found in $PATH for the job {1:s}. '
205 'Please install the dependency or disable the job.'.format(
206 program, job))
207
208
209 def check_directory(directory):
210 """Checks directory to make sure it exists and is writable.
211
212 Args:
213 directory (string): Path to directory
214
215 Raises:
216 TurbiniaException: When directory cannot be created or used.
217 """
218 if os.path.exists(directory) and not os.path.isdir(directory):
219 raise TurbiniaException(
220 'File {0:s} exists, but is not a directory'.format(directory))
221
222 if not os.path.exists(directory):
223 try:
224 os.makedirs(directory)
225 except OSError:
226 raise TurbiniaException(
227 'Can not create Directory {0:s}'.format(directory))
228
229 if not os.access(directory, os.W_OK):
230 try:
231 mode = os.stat(directory)[0]
232 os.chmod(directory, mode | stat.S_IWUSR)
233 except OSError:
234 raise TurbiniaException(
235 'Can not add write permissions to {0:s}'.format(directory))
236
237
238 class TurbiniaStats(object):
239 """Statistics for Turbinia task execution.
240
241 Attributes:
242 count(int): The number of tasks
243 min(datetime.timedelta): The minimum run time of all tasks
244 max(datetime.timedelta): The maximum run time of all tasks
245 mean(datetime.timedelta): The mean run time of all tasks
246 tasks(list): A list of tasks to calculate stats for
247 """
248
249 def __init__(self, description=None):
250 self.description = description
251 self.min = None
252 self.mean = None
253 self.max = None
254 self.tasks = []
255
256 def __str__(self):
257 return self.format_stats()
258
259 @property
260 def count(self):
261 """Gets a count of the tasks in this stats object.
262
263 Returns:
264 Int of task count.
265 """
266 return len(self.tasks)
267
268 def add_task(self, task):
269 """Add a task result dict.
270
271 Args:
272 task(dict): The task results we want to count stats for.
273 """
274 self.tasks.append(task)
275
276 def calculate_stats(self):
277 """Calculates statistics of the current tasks."""
278 if not self.tasks:
279 return
280
281 sorted_tasks = sorted(self.tasks, key=itemgetter('run_time'))
282 self.min = sorted_tasks[0]['run_time']
283 self.max = sorted_tasks[len(sorted_tasks) - 1]['run_time']
284 self.mean = sorted_tasks[len(sorted_tasks) // 2]['run_time']
285
286 # Remove the microseconds to keep things cleaner
287 self.min = self.min - timedelta(microseconds=self.min.microseconds)
288 self.max = self.max - timedelta(microseconds=self.max.microseconds)
289 self.mean = self.mean - timedelta(microseconds=self.mean.microseconds)
290
291 def format_stats(self):
292 """Formats statistics data.
293
294 Returns:
295 String of statistics data
296 """
297 return '{0:s}: Count: {1:d}, Min: {2!s}, Mean: {3!s}, Max: {4!s}'.format(
298 self.description, self.count, self.min, self.mean, self.max)
299
300 def format_stats_csv(self):
301 """Formats statistics data into CSV output.
302
303 Returns:
304 String of statistics data in CSV format
305 """
306 return '{0:s}, {1:d}, {2!s}, {3!s}, {4!s}'.format(
307 self.description, self.count, self.min, self.mean, self.max)
308
309
310 class BaseTurbiniaClient(object):
311 """Client class for Turbinia.
312
313 Attributes:
314 task_manager (TaskManager): Turbinia task manager
315 """
316
317 def __init__(self, run_local=False):
318 config.LoadConfig()
319 if run_local:
320 self.task_manager = None
321 else:
322 self.task_manager = task_manager.get_task_manager()
323 self.task_manager.setup(server=False)
324
325 def create_task(self, task_name):
326 """Creates a Turbinia Task by name.
327
328 Args:
329 task_name(string): Name of the Task we are going to run.
330
331 Returns:
332 TurbiniaTask: An instantiated Task object.
333
334 Raises:
335 TurbiniaException: When no Task object matching task_name is found.
336 """
337 task_obj = TASK_MAP.get(task_name.lower())
338 log.debug('Looking up Task {0:s} by name'.format(task_name))
339 if not task_obj:
340 raise TurbiniaException('No Task named {0:s} found'.format(task_name))
341 return task_obj()
342
343 def list_jobs(self):
344 """List the available jobs."""
345 # TODO(aarontp): Refactor this out so that we don't need to depend on
346 # the task manager from the client.
347 log.info('Available Jobs:')
348 for job in self.task_manager.jobs:
349 log.info('\t{0:s}'.format(job.NAME))
350
351 def wait_for_request(
352 self, instance, project, region, request_id=None, user=None,
353 poll_interval=60):
354 """Polls and waits for Turbinia Request to complete.
355
356 Args:
357 instance (string): The Turbinia instance name (by default the same as the
358 INSTANCE_ID in the config).
359 project (string): The name of the project.
360 region (string): The name of the region to execute in.
361 request_id (string): The Id of the request we want tasks for.
362 user (string): The user of the request we want tasks for.
363 poll_interval (int): Interval of seconds between polling cycles.
364 """
365 last_completed_count = -1
366 last_uncompleted_count = -1
367 while True:
368 task_results = self.get_task_data(
369 instance, project, region, request_id=request_id, user=user)
370 completed_tasks = []
371 uncompleted_tasks = []
372 for task in task_results:
373 if task.get('successful') is not None:
374 completed_tasks.append(task)
375 else:
376 uncompleted_tasks.append(task)
377
378 if completed_tasks and len(completed_tasks) == len(task_results):
379 break
380
381 completed_names = [t.get('name') for t in completed_tasks]
382 completed_names = ', '.join(sorted(completed_names))
383 uncompleted_names = [t.get('name') for t in uncompleted_tasks]
384 uncompleted_names = ', '.join(sorted(uncompleted_names))
385 total_count = len(completed_tasks) + len(uncompleted_tasks)
386 msg = (
387 'Tasks completed ({0:d}/{1:d}): [{2:s}], waiting for [{3:s}].'.format(
388 len(completed_tasks), total_count, completed_names,
389 uncompleted_names))
390 if (len(completed_tasks) > last_completed_count or
391 len(uncompleted_tasks) > last_uncompleted_count):
392 log.info(msg)
393 else:
394 log.debug(msg)
395
396 last_completed_count = len(completed_tasks)
397 last_uncompleted_count = len(uncompleted_tasks)
398 time.sleep(poll_interval)
399
400 log.info('All {0:d} Tasks completed'.format(len(task_results)))
401
402 def get_task_data(
403 self, instance, project, region, days=0, task_id=None, request_id=None,
404 user=None, function_name='gettasks', output_json=False):
405 """Gets task data from Google Cloud Functions.
406
407 Args:
408 instance (string): The Turbinia instance name (by default the same as the
409 INSTANCE_ID in the config).
410 project (string): The name of the project.
411 region (string): The name of the region to execute in.
412 days (int): The number of days we want history for.
413 task_id (string): The Id of the task.
414 request_id (string): The Id of the request we want tasks for.
415 user (string): The user of the request we want tasks for.
416 function_name (string): The GCF function we want to call.
417 output_json (bool): Whether to return JSON output.
418
419 Returns:
420 (List|JSON string) of Task dict objects
421 """
422 cloud_function = gcp_function.GoogleCloudFunction(project)
423 func_args = {'instance': instance, 'kind': 'TurbiniaTask'}
424
425 if days:
426 start_time = datetime.now() - timedelta(days=days)
427 # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a
428 # javascript Date() object in the cloud function.
429 start_string = start_time.strftime(DATETIME_FORMAT)
430 func_args.update({'start_time': start_string})
431 elif task_id:
432 func_args.update({'task_id': task_id})
433 elif request_id:
434 func_args.update({'request_id': request_id})
435
436 if user:
437 func_args.update({'user': user})
438
439 response = None
440 retry_count = 0
441 credential_error_count = 0
442 while response is None and retry_count < MAX_RETRIES:
443 try:
444 response = cloud_function.ExecuteFunction(
445 function_name, region, func_args)
446 except auth.exceptions.RefreshError as exception:
447 if credential_error_count == 0:
448 log.info(
449 'GCP Credentials need to be refreshed, please refresh in another '
450 'terminal and this process will resume. Error: {0!s}'.format(
451 exception))
452 else:
453 log.debug(
454 'GCP Credentials need to be refreshed, please refresh in another '
455 'terminal and this process will resume. Attempt {0:d}. Error: '
456 '{1!s}'.format(credential_error_count + 1, exception))
457 # Note, we are intentially not incrementing the retry_count here because
458 # we will retry indefinitely while we wait for the user to reauth.
459 credential_error_count += 1
460 except httplib2.ServerNotFoundError as exception:
461 log.info(
462 'Error connecting to server, will retry [{0:d} of {1:d} retries]: '
463 '{2!s}'.format(retry_count, MAX_RETRIES, exception))
464 retry_count += 1
465
466 if response is None:
467 time.sleep(RETRY_SLEEP)
468
469 if 'result' not in response:
470 log.error('No results found')
471 if response.get('error', '{}') != '{}':
472 msg = 'Error executing Cloud Function: [{0!s}].'.format(
473 response.get('error'))
474 log.error(msg)
475 log.debug('GCF response: {0!s}'.format(response))
476 raise TurbiniaException(
477 'Cloud Function {0:s} returned no results.'.format(function_name))
478
479 try:
480 results = json.loads(response['result'])
481 except (TypeError, ValueError) as e:
482 raise TurbiniaException(
483 'Could not deserialize result [{0!s}] from GCF: [{1!s}]'.format(
484 response.get('result'), e))
485
486 task_data = results[0]
487 if output_json:
488 try:
489 json_data = json.dumps(task_data)
490 except (TypeError, ValueError) as e:
491 raise TurbiniaException(
492 'Could not re-serialize result [{0!s}] from GCF: [{1!s}]'.format(
493 str(task_data), e))
494 return json_data
495
496 # Convert run_time/last_update back into datetime objects
497 for task in task_data:
498 if task.get('run_time'):
499 task['run_time'] = timedelta(seconds=task['run_time'])
500 if task.get('last_update'):
501 task['last_update'] = datetime.strptime(
502 task['last_update'], DATETIME_FORMAT)
503
504 return task_data
505
506 def format_task_detail(self, task, show_files=False):
507 """Formats a single task in detail.
508
509 Args:
510 task (dict): The task to format data for
511 show_files (bool): Whether we want to print out log file paths
512
513 Returns:
514 list: Formatted task data
515 """
516 report = []
517 saved_paths = task.get('saved_paths') or []
518 status = task.get('status') or 'No task status'
519
520 report.append(fmt.heading2(task.get('name')))
521 line = '{0:s} {1:s}'.format(fmt.bold('Status:'), status)
522 report.append(fmt.bullet(line))
523 report.append(fmt.bullet('Task Id: {0:s}'.format(task.get('id'))))
524 report.append(
525 fmt.bullet('Executed on worker {0:s}'.format(task.get('worker_name'))))
526 if task.get('report_data'):
527 report.append('')
528 report.append(fmt.heading3('Task Reported Data'))
529 report.extend(task.get('report_data').splitlines())
530 if show_files:
531 report.append('')
532 report.append(fmt.heading3('Saved Task Files:'))
533 for path in saved_paths:
534 report.append(fmt.bullet(fmt.code(path)))
535 report.append('')
536 return report
537
538 def format_worker_task(self, task):
539 """Formats a single task for Worker view.
540
541 Args:
542 task (dict): The task to format data for
543 Returns:
544 list: Formatted task data
545 """
546 report = []
547 report.append(
548 fmt.bullet('{0:s} - {1:s}'.format(task['task_id'], task['task_name'])))
549 report.append(
550 fmt.bullet(
551 'Last Update: {0:s}'.format(
552 task['last_update'].strftime(DATETIME_FORMAT)), level=2))
553 report.append(fmt.bullet('Status: {0:s}'.format(task['status']), level=2))
554 report.append(
555 fmt.bullet('Run Time: {0:s}'.format(str(task['run_time'])), level=2))
556 report.append('')
557 return report
558
559 def format_task(self, task, show_files=False):
560 """Formats a single task in short form.
561
562 Args:
563 task (dict): The task to format data for
564 show_files (bool): Whether we want to print out log file paths
565
566 Returns:
567 list: Formatted task data
568 """
569 report = []
570 saved_paths = task.get('saved_paths') or []
571 status = task.get('status') or 'No task status'
572 report.append(fmt.bullet('{0:s}: {1:s}'.format(task.get('name'), status)))
573 if show_files:
574 for path in saved_paths:
575 report.append(fmt.bullet(fmt.code(path), level=2))
576 report.append('')
577 return report
578
579 def get_task_statistics(
580 self, instance, project, region, days=0, task_id=None, request_id=None,
581 user=None):
582 """Gathers statistics for Turbinia execution data.
583
584 Args:
585 instance (string): The Turbinia instance name (by default the same as the
586 INSTANCE_ID in the config).
587 project (string): The name of the project.
588 region (string): The name of the zone to execute in.
589 days (int): The number of days we want history for.
590 task_id (string): The Id of the task.
591 request_id (string): The Id of the request we want tasks for.
592 user (string): The user of the request we want tasks for.
593
594 Returns:
595 task_stats(dict): Mapping of statistic names to values
596 """
597 task_results = self.get_task_data(
598 instance, project, region, days, task_id, request_id, user)
599 if not task_results:
600 return {}
601
602 task_stats = {
603 'all_tasks': TurbiniaStats('All Tasks'),
604 'successful_tasks': TurbiniaStats('Successful Tasks'),
605 'failed_tasks': TurbiniaStats('Failed Tasks'),
606 'requests': TurbiniaStats('Total Request Time'),
607 # The following are dicts mapping the user/worker/type names to their
608 # respective TurbiniaStats() objects.
609 # Total wall-time for all tasks of a given type
610 'tasks_per_type': {},
611 # Total wall-time for all tasks per Worker
612 'tasks_per_worker': {},
613 # Total wall-time for all tasks per User
614 'tasks_per_user': {},
615 }
616
617 # map of request ids to [min time, max time]
618 requests = {}
619
620 for task in task_results:
621 request_id = task.get('request_id')
622 task_type = task.get('name')
623 worker = task.get('worker_name')
624 user = task.get('requester')
625 if not task.get('run_time'):
626 log.debug(
627 'Ignoring task {0:s} in statistics because the run_time is not '
628 'set, and it is required to calculate stats'.format(
629 task.get('name')))
630 continue
631
632 # Stats for all/successful/failed tasks
633 task_stats['all_tasks'].add_task(task)
634 if task.get('successful') is True:
635 task_stats['successful_tasks'].add_task(task)
636 elif task.get('successful') is False:
637 task_stats['failed_tasks'].add_task(task)
638
639 # Stats for Tasks per Task type.
640 if task_type in task_stats['tasks_per_type']:
641 task_type_stats = task_stats['tasks_per_type'].get(task_type)
642 else:
643 task_type_stats = TurbiniaStats('Task type {0:s}'.format(task_type))
644 task_stats['tasks_per_type'][task_type] = task_type_stats
645 task_type_stats.add_task(task)
646
647 # Stats per worker.
648 if worker in task_stats['tasks_per_worker']:
649 worker_stats = task_stats['tasks_per_worker'].get(worker)
650 else:
651 worker_stats = TurbiniaStats('Worker {0:s}'.format(worker))
652 task_stats['tasks_per_worker'][worker] = worker_stats
653 worker_stats.add_task(task)
654
655 # Stats per submitting User.
656 if user in task_stats['tasks_per_user']:
657 user_stats = task_stats['tasks_per_user'].get(user)
658 else:
659 user_stats = TurbiniaStats('User {0:s}'.format(user))
660 task_stats['tasks_per_user'][user] = user_stats
661 user_stats.add_task(task)
662
663 # Stats for the total request. This will, for each request, calculate the
664 # start time of the earliest task and the stop time of the latest task.
665 # This will give the overall run time covering all tasks in the request.
666 task_start_time = task['last_update'] - task['run_time']
667 task_stop_time = task['last_update']
668 if request_id in requests:
669 start_time, stop_time = requests[request_id]
670 if task_start_time < start_time:
671 requests[request_id][0] = task_start_time
672 if task_stop_time > stop_time:
673 requests[request_id][1] = task_stop_time
674 else:
675 requests[request_id] = [task_start_time, task_stop_time]
676
677 # Add a fake task result for each request with our calculated times to the
678 # stats module
679 for min_time, max_time in requests.values():
680 task = {}
681 task['run_time'] = max_time - min_time
682 task_stats['requests'].add_task(task)
683
684 # Go over all stat objects and calculate them
685 for stat_obj in task_stats.values():
686 if isinstance(stat_obj, dict):
687 for inner_stat_obj in stat_obj.values():
688 inner_stat_obj.calculate_stats()
689 else:
690 stat_obj.calculate_stats()
691
692 return task_stats
693
694 def format_task_statistics(
695 self, instance, project, region, days=0, task_id=None, request_id=None,
696 user=None, csv=False):
697 """Formats statistics for Turbinia execution data.
698
699 Args:
700 instance (string): The Turbinia instance name (by default the same as the
701 INSTANCE_ID in the config).
702 project (string): The name of the project.
703 region (string): The name of the zone to execute in.
704 days (int): The number of days we want history for.
705 task_id (string): The Id of the task.
706 request_id (string): The Id of the request we want tasks for.
707 user (string): The user of the request we want tasks for.
708 csv (bool): Whether we want the output in CSV format.
709
710 Returns:
711 String of task statistics report
712 """
713 task_stats = self.get_task_statistics(
714 instance, project, region, days, task_id, request_id, user)
715 if not task_stats:
716 return 'No tasks found'
717
718 stats_order = [
719 'all_tasks', 'successful_tasks', 'failed_tasks', 'requests',
720 'tasks_per_type', 'tasks_per_worker', 'tasks_per_user'
721 ]
722
723 if csv:
724 report = ['stat_type, count, min, mean, max']
725 else:
726 report = ['Execution time statistics for Turbinia:', '']
727 for stat_name in stats_order:
728 stat_obj = task_stats[stat_name]
729 if isinstance(stat_obj, dict):
730 # Sort by description so that we get consistent report output
731 inner_stat_objs = sorted(
732 stat_obj.values(), key=attrgetter('description'))
733 for inner_stat_obj in inner_stat_objs:
734 if csv:
735 report.append(inner_stat_obj.format_stats_csv())
736 else:
737 report.append(inner_stat_obj.format_stats())
738 else:
739 if csv:
740 report.append(stat_obj.format_stats_csv())
741 else:
742 report.append(stat_obj.format_stats())
743
744 report.append('')
745 return '\n'.join(report)
746
747 def format_worker_status(
748 self, instance, project, region, days=0, all_fields=False):
749 """Formats the recent history for Turbinia Workers.
750
751 Args:
752 instance (string): The Turbinia instance name (by default the same as the
753 INSTANCE_ID in the config).
754 project (string): The name of the project.
755 region (string): The name of the zone to execute in.
756 days (int): The number of days we want history for.
757 all_fields (bool): Include historical Task information for the worker.
758 Returns:
759 String of Request status
760 """
761 # Set number of days to retrieve data
762 num_days = 7
763 if days != 0:
764 num_days = days
765 task_results = self.get_task_data(instance, project, region, days=num_days)
766 if not task_results:
767 return ''
768
769 # Sort task_results by last updated timestamp.
770 task_results = sorted(
771 task_results, key=itemgetter('last_update'), reverse=True)
772
773 # Create dictionary of worker_node: {{task_id, task_update,
774 # task_name, task_status}}
775 workers_dict = {}
776 scheduled_counter = 0
777 for result in task_results:
778 worker_node = result.get('worker_name')
779 status = result.get('status')
780 status = status if status else 'No task status'
781 if worker_node and worker_node not in workers_dict:
782 workers_dict[worker_node] = []
783 if worker_node:
784 task_dict = {}
785 task_dict['task_id'] = result.get('id')
786 task_dict['last_update'] = result.get('last_update')
787 task_dict['task_name'] = result.get('name')
788 task_dict['status'] = status
789 # Check status for anything that is running.
790 if 'running' in status:
791 run_time = (datetime.now() -
792 result.get('last_update')).total_seconds()
793 run_time = timedelta(seconds=run_time)
794 task_dict['run_time'] = run_time
795 else:
796 run_time = result.get('run_time')
797 task_dict['run_time'] = run_time if run_time else 'No run time.'
798 workers_dict[worker_node].append(task_dict)
799 else:
800 # Track scheduled/unassigned Tasks for reporting.
801 scheduled_counter += 1
802
803 # Generate report header
804 report = []
805 report.append(
806 fmt.heading1(
807 'Turbinia report for Worker activity within {0:d} days'.format(
808 num_days)))
809 report.append(
810 fmt.bullet('{0:d} Worker(s) found.'.format(len(workers_dict.keys()))))
811 report.append(
812 fmt.bullet(
813 '{0:d} Task(s) unassigned or scheduled and pending Worker assignment.'
814 .format(scheduled_counter)))
815 for worker_node, tasks in workers_dict.items():
816 report.append('')
817 report.append(fmt.heading2('Worker Node: {0:s}'.format(worker_node)))
818 # Append the statuses chronologically
819 run_status, queued_status, other_status = [], [], []
820 for task in tasks:
821 if 'running' in task['status']:
822 run_status.extend(self.format_worker_task(task))
823 elif 'queued' in task['status']:
824 queued_status.extend(self.format_worker_task(task))
825 else:
826 other_status.extend(self.format_worker_task(task))
827 # Add each of the status lists back to report list
828 not_found = [fmt.bullet('No Tasks found.')]
829 report.append(fmt.heading3('Running Tasks'))
830 report.extend(run_status if run_status else not_found)
831 report.append('')
832 report.append(fmt.heading3('Queued Tasks'))
833 report.extend(queued_status if queued_status else not_found)
834 # Add Historical Tasks
835 if all_fields:
836 report.append('')
837 report.append(fmt.heading3('Finished Tasks'))
838 report.extend(other_status if other_status else not_found)
839 return '\n'.join(report)
840
841 def format_request_status(
842 self, instance, project, region, days=0, all_fields=False):
843 """Formats the recent history for Turbinia Requests.
844
845 Args:
846 instance (string): The Turbinia instance name (by default the same as the
847 INSTANCE_ID in the config).
848 project (string): The name of the project.
849 region (string): The name of the zone to execute in.
850 days (int): The number of days we want history for.
851 all_fields (bool): Include all fields for the Request, which includes,
852 saved file paths.
853 Returns:
854 String of Request status
855 """
856 # Set number of days to retrieve data
857 num_days = 7
858 if days != 0:
859 num_days = days
860 task_results = self.get_task_data(instance, project, region, days=num_days)
861 if not task_results:
862 return ''
863
864 # Sort task_results by last updated timestamp.
865 task_results = sorted(
866 task_results, key=itemgetter('last_update'), reverse=True)
867
868 # Create dictionary of request_id: {saved_paths, last_update, requester,
869 # task_id}
870 request_dict = {}
871 for result in task_results:
872 request_id = result.get('request_id')
873 saved_paths = result.get('saved_paths')
874 if request_id not in request_dict:
875 saved_paths = set(saved_paths) if saved_paths else set()
876 request_dict[request_id] = {}
877 request_dict[request_id]['saved_paths'] = saved_paths
878 request_dict[request_id]['last_update'] = result.get('last_update')
879 request_dict[request_id]['requester'] = result.get('requester')
880 request_dict[request_id]['task_id'] = set([result.get('id')])
881 else:
882 if saved_paths:
883 request_dict[request_id]['saved_paths'].update(saved_paths)
884 request_dict[request_id]['task_id'].update([result.get('id')])
885
886 # Generate report header
887 report = []
888 report.append(
889 fmt.heading1(
890 'Turbinia report for Requests made within {0:d} days'.format(
891 num_days)))
892 report.append(
893 fmt.bullet(
894 '{0:d} requests were made within this timeframe.'.format(
895 len(request_dict.keys()))))
896 # Print report data for Requests
897 for request_id, values in request_dict.items():
898 report.append('')
899 report.append(fmt.heading2('Request ID: {0:s}'.format(request_id)))
900 report.append(
901 fmt.bullet(
902 'Last Update: {0:s}'.format(
903 values['last_update'].strftime(DATETIME_FORMAT))))
904 report.append(fmt.bullet('Requester: {0:s}'.format(values['requester'])))
905 report.append(
906 fmt.bullet('Task Count: {0:d}'.format(len(values['task_id']))))
907 if all_fields:
908 report.append(fmt.bullet('Associated Evidence:'))
909 # Append all saved paths in request
910 for path in sorted(values['saved_paths']):
911 report.append(fmt.bullet(fmt.code(path), level=2))
912 report.append('')
913 return '\n'.join(report)
914
915 def format_task_status(
916 self, instance, project, region, days=0, task_id=None, request_id=None,
917 user=None, all_fields=False, full_report=False,
918 priority_filter=Priority.HIGH, output_json=False):
919 """Formats the recent history for Turbinia Tasks.
920
921 Args:
922 instance (string): The Turbinia instance name (by default the same as the
923 INSTANCE_ID in the config).
924 project (string): The name of the project.
925 region (string): The name of the zone to execute in.
926 days (int): The number of days we want history for.
927 task_id (string): The Id of the task.
928 request_id (string): The Id of the request we want tasks for.
929 user (string): The user of the request we want tasks for.
930 all_fields (bool): Include all fields for the task, including task,
931 request ids and saved file paths.
932 full_report (bool): Generate a full markdown report instead of just a
933 summary.
934 priority_filter (int): Output only a summary for Tasks with a value
935 greater than the priority_filter.
936 output_json (bool): Whether to return JSON output.
937
938 Returns:
939 String of task status in JSON or human readable format.
940 """
941 if user and days == 0:
942 days = 1000
943 task_results = self.get_task_data(
944 instance, project, region, days, task_id, request_id, user,
945 output_json=output_json)
946 if not task_results:
947 return ''
948
949 if output_json:
950 return task_results
951
952 # Sort all tasks by the report_priority so that tasks with a higher
953 # priority are listed first in the report.
954 for result in task_results:
955 # 0 is a valid value, so checking against specific values
956 if result.get('report_priority') in (None, ''):
957 result['report_priority'] = Priority.LOW
958 task_results = sorted(task_results, key=itemgetter('report_priority'))
959 num_results = len(task_results)
960 if not num_results:
961 msg = 'No Turbinia Tasks found.'
962 log.info(msg)
963 return '\n{0:s}'.format(msg)
964
965 # Build up data
966 report = []
967 requester = task_results[0].get('requester')
968 request_id = task_results[0].get('request_id')
969 success_types = ['Successful', 'Failed', 'Scheduled or Running']
970 success_values = [True, False, None]
971 # Reverse mapping values to types
972 success_map = dict(zip(success_values, success_types))
973 task_map = defaultdict(list)
974 success_types.insert(0, 'High Priority')
975 for task in task_results:
976 if task.get('report_priority') <= priority_filter:
977 task_map['High Priority'].append(task)
978 else:
979 task_map[success_map[task.get('successful')]].append(task)
980
981 # Generate report header
982 report.append('\n')
983 report.append(fmt.heading1('Turbinia report {0:s}'.format(request_id)))
984 report.append(
985 fmt.bullet(
986 'Processed {0:d} Tasks for user {1:s}'.format(
987 num_results, requester)))
988
989 # Print report data for tasks
990 for success_type in success_types:
991 report.append('')
992 report.append(fmt.heading1('{0:s} Tasks'.format(success_type)))
993 if not task_map[success_type]:
994 report.append(fmt.bullet('None'))
995 for task in task_map[success_type]:
996 if full_report and success_type == success_types[0]:
997 report.extend(self.format_task_detail(task, show_files=all_fields))
998 else:
999 report.extend(self.format_task(task, show_files=all_fields))
1000
1001 return '\n'.join(report)
1002
1003 def run_local_task(self, task_name, request):
1004 """Runs a Turbinia Task locally.
1005
1006 Args:
1007 task_name(string): Name of the Task we are going to run.
1008 request (TurbiniaRequest): Object containing request and evidence info.
1009
1010 Returns:
1011 TurbiniaTaskResult: The result returned by the Task Execution.
1012 """
1013 task = self.create_task(task_name)
1014 task.request_id = request.request_id
1015 task.base_output_dir = config.OUTPUT_DIR
1016 task.run_local = True
1017 if not request.evidence:
1018 raise TurbiniaException('TurbiniaRequest does not contain evidence.')
1019 log.info('Running Task {0:s} locally'.format(task_name))
1020 result = task.run_wrapper(request.evidence[0])
1021 return result
1022
1023 def send_request(self, request):
1024 """Sends a TurbiniaRequest message.
1025
1026 Args:
1027 request: A TurbiniaRequest object.
1028 """
1029 self.task_manager.server_pubsub.send_request(request)
1030
1031 def close_tasks(
1032 self, instance, project, region, request_id=None, task_id=None, user=None,
1033 requester=None):
1034 """Close Turbinia Tasks based on Request ID.
1035
1036 Args:
1037 instance (string): The Turbinia instance name (by default the same as the
1038 INSTANCE_ID in the config).
1039 project (string): The name of the project.
1040 region (string): The name of the zone to execute in.
1041 request_id (string): The Id of the request we want tasks for.
1042 task_id (string): The Id of the request we want task for.
1043 user (string): The user of the request we want tasks for.
1044 requester (string): The user making the request to close tasks.
1045
1046 Returns: String of closed Task IDs.
1047 """
1048 cloud_function = gcp_function.GoogleCloudFunction(project)
1049 func_args = {
1050 'instance': instance,
1051 'kind': 'TurbiniaTask',
1052 'request_id': request_id,
1053 'task_id': task_id,
1054 'user': user,
1055 'requester': requester
1056 }
1057 response = cloud_function.ExecuteFunction('closetasks', region, func_args)
1058 return 'Closed Task IDs: %s' % response.get('result')
1059
1060
1061 class TurbiniaCeleryClient(BaseTurbiniaClient):
1062 """Client class for Turbinia (Celery).
1063
1064 Overriding some things specific to Celery operation.
1065
1066 Attributes:
1067 redis (RedisStateManager): Redis datastore object
1068 """
1069
1070 def __init__(self, *args, **kwargs):
1071 super(TurbiniaCeleryClient, self).__init__(*args, **kwargs)
1072 self.redis = RedisStateManager()
1073
1074 def send_request(self, request):
1075 """Sends a TurbiniaRequest message.
1076
1077 Args:
1078 request: A TurbiniaRequest object.
1079 """
1080 self.task_manager.kombu.send_request(request)
1081
1082 # pylint: disable=arguments-differ
1083 def get_task_data(
1084 self, instance, _, __, days=0, task_id=None, request_id=None,
1085 function_name=None, output_json=False):
1086 """Gets task data from Redis.
1087
1088 We keep the same function signature, but ignore arguments passed for GCP.
1089
1090 Args:
1091 instance (string): The Turbinia instance name (by default the same as the
1092 INSTANCE_ID in the config).
1093 days (int): The number of days we want history for.
1094 task_id (string): The Id of the task.
1095 request_id (string): The Id of the request we want tasks for.
1096
1097 Returns:
1098 List of Task dict objects.
1099 """
1100 return self.redis.get_task_data(instance, days, task_id, request_id)
1101
1102
1103 class TurbiniaServer(object):
1104 """Turbinia Server class.
1105
1106 Attributes:
1107 task_manager (TaskManager): An object to manage turbinia tasks.
1108 """
1109
1110 def __init__(self, jobs_denylist=None, jobs_allowlist=None):
1111 """Initializes Turbinia Server.
1112
1113 Args:
1114 jobs_denylist (Optional[list[str]]): Jobs we will exclude from running
1115 jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run
1116 """
1117 config.LoadConfig()
1118 self.task_manager = task_manager.get_task_manager()
1119 self.task_manager.setup(jobs_denylist, jobs_allowlist)
1120
1121 def start(self):
1122 """Start Turbinia Server."""
1123 log.info('Starting Prometheus endpoint.')
1124 start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)
1125 log.info('Running Turbinia Server.')
1126 self.task_manager.run()
1127
1128 def add_evidence(self, evidence_):
1129 """Add evidence to be processed."""
1130 self.task_manager.add_evidence(evidence_)
1131
1132
1133 class TurbiniaCeleryWorker(BaseTurbiniaClient):
1134 """Turbinia Celery Worker class.
1135
1136 Attributes:
1137 worker (celery.app): Celery worker app
1138 """
1139
1140 def __init__(self, jobs_denylist=None, jobs_allowlist=None):
1141 """Initialization for celery worker.
1142
1143 Args:
1144 jobs_denylist (Optional[list[str]]): Jobs we will exclude from running
1145 jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run
1146 """
1147 super(TurbiniaCeleryWorker, self).__init__()
1148 # Deregister jobs from denylist/allowlist.
1149 job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)
1150 disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []
1151 disabled_jobs = [j.lower() for j in disabled_jobs]
1152 # Only actually disable jobs that have not been allowlisted.
1153 if jobs_allowlist:
1154 disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))
1155 if disabled_jobs:
1156 log.info(
1157 'Disabling non-allowlisted jobs configured to be disabled in the '
1158 'config file: {0:s}'.format(', '.join(disabled_jobs)))
1159 job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)
1160
1161 # Check for valid dependencies/directories.
1162 dependencies = config.ParseDependencies()
1163 if config.DOCKER_ENABLED:
1164 check_docker_dependencies(dependencies)
1165 check_system_dependencies(dependencies)
1166 check_directory(config.MOUNT_DIR_PREFIX)
1167 check_directory(config.OUTPUT_DIR)
1168 check_directory(config.TMP_DIR)
1169
1170 jobs = job_manager.JobsManager.GetJobNames()
1171 log.info(
1172 'Dependency check complete. The following jobs will be enabled '
1173 'for this worker: {0:s}'.format(','.join(jobs)))
1174 self.worker = self.task_manager.celery.app
1175
1176 def start(self):
1177 """Start Turbinia Celery Worker."""
1178 log.info('Running Turbinia Celery Worker.')
1179 self.worker.task(task_manager.task_runner, name='task_runner')
1180 argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']
1181 self.worker.start(argv)
1182
1183
1184 class TurbiniaPsqWorker(object):
1185 """Turbinia PSQ Worker class.
1186
1187 Attributes:
1188 worker (psq.Worker): PSQ Worker object
1189 psq (psq.Queue): A Task queue object
1190
1191 Raises:
1192 TurbiniaException: When errors occur
1193 """
1194
1195 def __init__(self, jobs_denylist=None, jobs_allowlist=None):
1196 """Initialization for PSQ Worker.
1197
1198 Args:
1199 jobs_denylist (Optional[list[str]]): Jobs we will exclude from running
1200 jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run
1201 """
1202 config.LoadConfig()
1203 psq_publisher = pubsub.PublisherClient()
1204 psq_subscriber = pubsub.SubscriberClient()
1205 datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)
1206 try:
1207 self.psq = psq.Queue(
1208 psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,
1209 name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))
1210 except exceptions.GoogleCloudError as e:
1211 msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))
1212 log.error(msg)
1213 raise TurbiniaException(msg)
1214
1215 # Deregister jobs from denylist/allowlist.
1216 job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)
1217 disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []
1218 disabled_jobs = [j.lower() for j in disabled_jobs]
1219 # Only actually disable jobs that have not been allowlisted.
1220 if jobs_allowlist:
1221 disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))
1222 if disabled_jobs:
1223 log.info(
1224 'Disabling non-allowlisted jobs configured to be disabled in the '
1225 'config file: {0:s}'.format(', '.join(disabled_jobs)))
1226 job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)
1227
1228 # Check for valid dependencies/directories.
1229 dependencies = config.ParseDependencies()
1230 if config.DOCKER_ENABLED:
1231 check_docker_dependencies(dependencies)
1232 check_system_dependencies(dependencies)
1233 check_directory(config.MOUNT_DIR_PREFIX)
1234 check_directory(config.OUTPUT_DIR)
1235 check_directory(config.TMP_DIR)
1236
1237 jobs = job_manager.JobsManager.GetJobNames()
1238 log.info(
1239 'Dependency check complete. The following jobs are enabled '
1240 'for this worker: {0:s}'.format(','.join(jobs)))
1241 log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))
1242 self.worker = psq.Worker(queue=self.psq)
1243
1244 def start(self):
1245 """Start Turbinia PSQ Worker."""
1246 log.info('Starting Prometheus endpoint.')
1247 start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)
1248 log.info('Running Turbinia PSQ Worker.')
1249 self.worker.listen()
1250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/client.py b/turbinia/client.py
--- a/turbinia/client.py
+++ b/turbinia/client.py
@@ -1017,7 +1017,7 @@
if not request.evidence:
raise TurbiniaException('TurbiniaRequest does not contain evidence.')
log.info('Running Task {0:s} locally'.format(task_name))
- result = task.run_wrapper(request.evidence[0])
+ result = task.run_wrapper(request.evidence[0].serialize())
return result
def send_request(self, request):
| {"golden_diff": "diff --git a/turbinia/client.py b/turbinia/client.py\n--- a/turbinia/client.py\n+++ b/turbinia/client.py\n@@ -1017,7 +1017,7 @@\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n- result = task.run_wrapper(request.evidence[0])\n+ result = task.run_wrapper(request.evidence[0].serialize())\n return result\n \n def send_request(self, request):\n", "issue": "Crash when running locally\n```\r\n$ turbiniactl -t SSHDAnalysisTask -R rawdisk -l dfchecklist.img \r\n[INFO] Turbinia version: 20190819\r\n[INFO] Creating request 5d50f281e7fc4a24bd88993ad8bb34a9 with evidence dfchecklist.img\r\n[INFO] Run command \"turbiniactl status -r 5d50f281e7fc4a24bd88993ad8bb34a9\" to see the status of this request and associated tasks\r\n[INFO] Running Task SSHDAnalysisTask locally\r\nTraceback (most recent call last):\r\n File \"/usr/local/google/home/romaing/venvs/turbinia/bin/turbiniactl\", line 11, in <module>\r\n load_entry_point('turbinia==20190819', 'console_scripts', 'turbiniactl')()\r\n File \"/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/turbiniactl.py\", line 813, in main\r\n result = client.run_local_task(args.task, request)\r\n File \"/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/client.py\", line 1020, in run_local_task\r\n result = task.run_wrapper(request.evidence[0])\r\n File \"/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/workers/__init__.py\", line 705, in run_wrapper\r\n evidence = evidence_decode(evidence)\r\n File \"/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/evidence.py\", line 56, in evidence_decode\r\n raise TurbiniaException(\r\nturbinia.TurbiniaException: Evidence_dict is not a dictionary, type is <class 'turbinia.evidence.RawDisk'>\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport httplib2\nimport json\nimport logging\nfrom operator import itemgetter\nfrom operator import attrgetter\nimport os\nimport stat\nimport time\nimport subprocess\nimport codecs\n\nfrom google import auth\nfrom prometheus_client import start_http_server\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia.config import DATETIME_FORMAT\nfrom turbinia import task_manager\nfrom turbinia import TurbiniaException\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.lib import docker_manager\nfrom turbinia.jobs import manager as job_manager\nfrom turbinia.workers import Priority\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.analysis.jupyter import JupyterAnalysisTask\nfrom turbinia.workers.finalize_request import FinalizeRequestTask\nfrom turbinia.workers.docker import DockerContainersEnumerationTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.hindsight import HindsightTask\nfrom turbinia.workers.partitions import PartitionEnumerationTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.redis import RedisAnalysisTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.volatility import VolatilityTask\nfrom turbinia.workers.worker_stat import StatTask\nfrom turbinia.workers.binary_extractor import BinaryExtractorTask\nfrom turbinia.workers.bulk_extractor import BulkExtractorTask\nfrom turbinia.workers.photorec import PhotorecTask\n\nMAX_RETRIES = 10\nRETRY_SLEEP = 60\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'finalizerequesttask': FinalizeRequestTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'JupyterAnalysisTask': JupyterAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'hindsighttask': HindsightTask,\n 'partitionenumerationtask': PartitionEnumerationTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'redisanalysistask': RedisAnalysisTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'volatilitytask': VolatilityTask,\n 'stattask': StatTask,\n 'binaryextractor': BinaryExtractorTask,\n 'bulkextractortask': BulkExtractorTask,\n 'dockertask': DockerContainersEnumerationTask,\n 'photorectask': PhotorecTask\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from libcloudforensics.providers.gcp.internal import function as gcp_function\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef get_turbinia_client(run_local=False):\n \"\"\"Return Turbinia client based on config.\n\n Returns:\n Initialized BaseTurbiniaClient or TurbiniaCeleryClient object.\n \"\"\"\n config.LoadConfig()\n # pylint: disable=no-else-return\n if config.TASK_MANAGER.lower() == 'psq':\n return BaseTurbiniaClient(run_local=run_local)\n elif config.TASK_MANAGER.lower() == 'celery':\n return TurbiniaCeleryClient(run_local=run_local)\n else:\n msg = 'Task Manager type \"{0:s}\" not implemented'.format(\n config.TASK_MANAGER)\n raise TurbiniaException(msg)\n\n\ndef check_docker_dependencies(dependencies):\n \"\"\"Checks docker dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n #TODO(wyassine): may run into issues down the line when a docker image\n # does not have bash or which installed. (no linux fs layer).\n log.info('Performing docker dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n images = docker_manager.DockerManager().list_images(return_filter='short_id')\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n docker_image = values.get('docker_image')\n # short id only pulls the first 10 characters of image id.\n if docker_image and len(docker_image) > 10:\n docker_image = docker_image[0:10]\n\n if docker_image in images:\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n stdout, stderr, ret = docker_manager.ContainerManager(\n values['docker_image']).execute_container(cmd, shell=True)\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found for job {1:s}. Please install '\n 'the dependency for the container or disable the job.'.format(\n program, job))\n job_manager.JobsManager.RegisterDockerImage(job, values['docker_image'])\n elif docker_image:\n raise TurbiniaException(\n 'Docker image {0:s} was not found for the job {1:s}. Please '\n 'update the config with the correct image id'.format(\n values['docker_image'], job))\n\n\ndef check_system_dependencies(dependencies):\n \"\"\"Checks system dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n log.info('Performing system dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n elif not values.get('docker_image'):\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n proc = subprocess.Popen(cmd, shell=True)\n proc.communicate()\n ret = proc.returncode\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found in $PATH for the job {1:s}. '\n 'Please install the dependency or disable the job.'.format(\n program, job))\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaStats(object):\n \"\"\"Statistics for Turbinia task execution.\n\n Attributes:\n count(int): The number of tasks\n min(datetime.timedelta): The minimum run time of all tasks\n max(datetime.timedelta): The maximum run time of all tasks\n mean(datetime.timedelta): The mean run time of all tasks\n tasks(list): A list of tasks to calculate stats for\n \"\"\"\n\n def __init__(self, description=None):\n self.description = description\n self.min = None\n self.mean = None\n self.max = None\n self.tasks = []\n\n def __str__(self):\n return self.format_stats()\n\n @property\n def count(self):\n \"\"\"Gets a count of the tasks in this stats object.\n\n Returns:\n Int of task count.\n \"\"\"\n return len(self.tasks)\n\n def add_task(self, task):\n \"\"\"Add a task result dict.\n\n Args:\n task(dict): The task results we want to count stats for.\n \"\"\"\n self.tasks.append(task)\n\n def calculate_stats(self):\n \"\"\"Calculates statistics of the current tasks.\"\"\"\n if not self.tasks:\n return\n\n sorted_tasks = sorted(self.tasks, key=itemgetter('run_time'))\n self.min = sorted_tasks[0]['run_time']\n self.max = sorted_tasks[len(sorted_tasks) - 1]['run_time']\n self.mean = sorted_tasks[len(sorted_tasks) // 2]['run_time']\n\n # Remove the microseconds to keep things cleaner\n self.min = self.min - timedelta(microseconds=self.min.microseconds)\n self.max = self.max - timedelta(microseconds=self.max.microseconds)\n self.mean = self.mean - timedelta(microseconds=self.mean.microseconds)\n\n def format_stats(self):\n \"\"\"Formats statistics data.\n\n Returns:\n String of statistics data\n \"\"\"\n return '{0:s}: Count: {1:d}, Min: {2!s}, Mean: {3!s}, Max: {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n def format_stats_csv(self):\n \"\"\"Formats statistics data into CSV output.\n\n Returns:\n String of statistics data in CSV format\n \"\"\"\n return '{0:s}, {1:d}, {2!s}, {3!s}, {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n\nclass BaseTurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.NAME))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n last_completed_count = -1\n last_uncompleted_count = -1\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_tasks = []\n uncompleted_tasks = []\n for task in task_results:\n if task.get('successful') is not None:\n completed_tasks.append(task)\n else:\n uncompleted_tasks.append(task)\n\n if completed_tasks and len(completed_tasks) == len(task_results):\n break\n\n completed_names = [t.get('name') for t in completed_tasks]\n completed_names = ', '.join(sorted(completed_names))\n uncompleted_names = [t.get('name') for t in uncompleted_tasks]\n uncompleted_names = ', '.join(sorted(uncompleted_names))\n total_count = len(completed_tasks) + len(uncompleted_tasks)\n msg = (\n 'Tasks completed ({0:d}/{1:d}): [{2:s}], waiting for [{3:s}].'.format(\n len(completed_tasks), total_count, completed_names,\n uncompleted_names))\n if (len(completed_tasks) > last_completed_count or\n len(uncompleted_tasks) > last_uncompleted_count):\n log.info(msg)\n else:\n log.debug(msg)\n\n last_completed_count = len(completed_tasks)\n last_uncompleted_count = len(uncompleted_tasks)\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks', output_json=False):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n (List|JSON string) of Task dict objects\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime(DATETIME_FORMAT)\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = None\n retry_count = 0\n credential_error_count = 0\n while response is None and retry_count < MAX_RETRIES:\n try:\n response = cloud_function.ExecuteFunction(\n function_name, region, func_args)\n except auth.exceptions.RefreshError as exception:\n if credential_error_count == 0:\n log.info(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Error: {0!s}'.format(\n exception))\n else:\n log.debug(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Attempt {0:d}. Error: '\n '{1!s}'.format(credential_error_count + 1, exception))\n # Note, we are intentially not incrementing the retry_count here because\n # we will retry indefinitely while we wait for the user to reauth.\n credential_error_count += 1\n except httplib2.ServerNotFoundError as exception:\n log.info(\n 'Error connecting to server, will retry [{0:d} of {1:d} retries]: '\n '{2!s}'.format(retry_count, MAX_RETRIES, exception))\n retry_count += 1\n\n if response is None:\n time.sleep(RETRY_SLEEP)\n\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result [{0!s}] from GCF: [{1!s}]'.format(\n response.get('result'), e))\n\n task_data = results[0]\n if output_json:\n try:\n json_data = json.dumps(task_data)\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not re-serialize result [{0!s}] from GCF: [{1!s}]'.format(\n str(task_data), e))\n return json_data\n\n # Convert run_time/last_update back into datetime objects\n for task in task_data:\n if task.get('run_time'):\n task['run_time'] = timedelta(seconds=task['run_time'])\n if task.get('last_update'):\n task['last_update'] = datetime.strptime(\n task['last_update'], DATETIME_FORMAT)\n\n return task_data\n\n def format_task_detail(self, task, show_files=False):\n \"\"\"Formats a single task in detail.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n\n report.append(fmt.heading2(task.get('name')))\n line = '{0:s} {1:s}'.format(fmt.bold('Status:'), status)\n report.append(fmt.bullet(line))\n report.append(fmt.bullet('Task Id: {0:s}'.format(task.get('id'))))\n report.append(\n fmt.bullet('Executed on worker {0:s}'.format(task.get('worker_name'))))\n if task.get('report_data'):\n report.append('')\n report.append(fmt.heading3('Task Reported Data'))\n report.extend(task.get('report_data').splitlines())\n if show_files:\n report.append('')\n report.append(fmt.heading3('Saved Task Files:'))\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path)))\n report.append('')\n return report\n\n def format_worker_task(self, task):\n \"\"\"Formats a single task for Worker view.\n\n Args:\n task (dict): The task to format data for\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n report.append(\n fmt.bullet('{0:s} - {1:s}'.format(task['task_id'], task['task_name'])))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n task['last_update'].strftime(DATETIME_FORMAT)), level=2))\n report.append(fmt.bullet('Status: {0:s}'.format(task['status']), level=2))\n report.append(\n fmt.bullet('Run Time: {0:s}'.format(str(task['run_time'])), level=2))\n report.append('')\n return report\n\n def format_task(self, task, show_files=False):\n \"\"\"Formats a single task in short form.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n report.append(fmt.bullet('{0:s}: {1:s}'.format(task.get('name'), status)))\n if show_files:\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return report\n\n def get_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None):\n \"\"\"Gathers statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n\n Returns:\n task_stats(dict): Mapping of statistic names to values\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n if not task_results:\n return {}\n\n task_stats = {\n 'all_tasks': TurbiniaStats('All Tasks'),\n 'successful_tasks': TurbiniaStats('Successful Tasks'),\n 'failed_tasks': TurbiniaStats('Failed Tasks'),\n 'requests': TurbiniaStats('Total Request Time'),\n # The following are dicts mapping the user/worker/type names to their\n # respective TurbiniaStats() objects.\n # Total wall-time for all tasks of a given type\n 'tasks_per_type': {},\n # Total wall-time for all tasks per Worker\n 'tasks_per_worker': {},\n # Total wall-time for all tasks per User\n 'tasks_per_user': {},\n }\n\n # map of request ids to [min time, max time]\n requests = {}\n\n for task in task_results:\n request_id = task.get('request_id')\n task_type = task.get('name')\n worker = task.get('worker_name')\n user = task.get('requester')\n if not task.get('run_time'):\n log.debug(\n 'Ignoring task {0:s} in statistics because the run_time is not '\n 'set, and it is required to calculate stats'.format(\n task.get('name')))\n continue\n\n # Stats for all/successful/failed tasks\n task_stats['all_tasks'].add_task(task)\n if task.get('successful') is True:\n task_stats['successful_tasks'].add_task(task)\n elif task.get('successful') is False:\n task_stats['failed_tasks'].add_task(task)\n\n # Stats for Tasks per Task type.\n if task_type in task_stats['tasks_per_type']:\n task_type_stats = task_stats['tasks_per_type'].get(task_type)\n else:\n task_type_stats = TurbiniaStats('Task type {0:s}'.format(task_type))\n task_stats['tasks_per_type'][task_type] = task_type_stats\n task_type_stats.add_task(task)\n\n # Stats per worker.\n if worker in task_stats['tasks_per_worker']:\n worker_stats = task_stats['tasks_per_worker'].get(worker)\n else:\n worker_stats = TurbiniaStats('Worker {0:s}'.format(worker))\n task_stats['tasks_per_worker'][worker] = worker_stats\n worker_stats.add_task(task)\n\n # Stats per submitting User.\n if user in task_stats['tasks_per_user']:\n user_stats = task_stats['tasks_per_user'].get(user)\n else:\n user_stats = TurbiniaStats('User {0:s}'.format(user))\n task_stats['tasks_per_user'][user] = user_stats\n user_stats.add_task(task)\n\n # Stats for the total request. This will, for each request, calculate the\n # start time of the earliest task and the stop time of the latest task.\n # This will give the overall run time covering all tasks in the request.\n task_start_time = task['last_update'] - task['run_time']\n task_stop_time = task['last_update']\n if request_id in requests:\n start_time, stop_time = requests[request_id]\n if task_start_time < start_time:\n requests[request_id][0] = task_start_time\n if task_stop_time > stop_time:\n requests[request_id][1] = task_stop_time\n else:\n requests[request_id] = [task_start_time, task_stop_time]\n\n # Add a fake task result for each request with our calculated times to the\n # stats module\n for min_time, max_time in requests.values():\n task = {}\n task['run_time'] = max_time - min_time\n task_stats['requests'].add_task(task)\n\n # Go over all stat objects and calculate them\n for stat_obj in task_stats.values():\n if isinstance(stat_obj, dict):\n for inner_stat_obj in stat_obj.values():\n inner_stat_obj.calculate_stats()\n else:\n stat_obj.calculate_stats()\n\n return task_stats\n\n def format_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, csv=False):\n \"\"\"Formats statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n csv (bool): Whether we want the output in CSV format.\n\n Returns:\n String of task statistics report\n \"\"\"\n task_stats = self.get_task_statistics(\n instance, project, region, days, task_id, request_id, user)\n if not task_stats:\n return 'No tasks found'\n\n stats_order = [\n 'all_tasks', 'successful_tasks', 'failed_tasks', 'requests',\n 'tasks_per_type', 'tasks_per_worker', 'tasks_per_user'\n ]\n\n if csv:\n report = ['stat_type, count, min, mean, max']\n else:\n report = ['Execution time statistics for Turbinia:', '']\n for stat_name in stats_order:\n stat_obj = task_stats[stat_name]\n if isinstance(stat_obj, dict):\n # Sort by description so that we get consistent report output\n inner_stat_objs = sorted(\n stat_obj.values(), key=attrgetter('description'))\n for inner_stat_obj in inner_stat_objs:\n if csv:\n report.append(inner_stat_obj.format_stats_csv())\n else:\n report.append(inner_stat_obj.format_stats())\n else:\n if csv:\n report.append(stat_obj.format_stats_csv())\n else:\n report.append(stat_obj.format_stats())\n\n report.append('')\n return '\\n'.join(report)\n\n def format_worker_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Workers.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include historical Task information for the worker.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of worker_node: {{task_id, task_update,\n # task_name, task_status}}\n workers_dict = {}\n scheduled_counter = 0\n for result in task_results:\n worker_node = result.get('worker_name')\n status = result.get('status')\n status = status if status else 'No task status'\n if worker_node and worker_node not in workers_dict:\n workers_dict[worker_node] = []\n if worker_node:\n task_dict = {}\n task_dict['task_id'] = result.get('id')\n task_dict['last_update'] = result.get('last_update')\n task_dict['task_name'] = result.get('name')\n task_dict['status'] = status\n # Check status for anything that is running.\n if 'running' in status:\n run_time = (datetime.now() -\n result.get('last_update')).total_seconds()\n run_time = timedelta(seconds=run_time)\n task_dict['run_time'] = run_time\n else:\n run_time = result.get('run_time')\n task_dict['run_time'] = run_time if run_time else 'No run time.'\n workers_dict[worker_node].append(task_dict)\n else:\n # Track scheduled/unassigned Tasks for reporting.\n scheduled_counter += 1\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Worker activity within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet('{0:d} Worker(s) found.'.format(len(workers_dict.keys()))))\n report.append(\n fmt.bullet(\n '{0:d} Task(s) unassigned or scheduled and pending Worker assignment.'\n .format(scheduled_counter)))\n for worker_node, tasks in workers_dict.items():\n report.append('')\n report.append(fmt.heading2('Worker Node: {0:s}'.format(worker_node)))\n # Append the statuses chronologically\n run_status, queued_status, other_status = [], [], []\n for task in tasks:\n if 'running' in task['status']:\n run_status.extend(self.format_worker_task(task))\n elif 'queued' in task['status']:\n queued_status.extend(self.format_worker_task(task))\n else:\n other_status.extend(self.format_worker_task(task))\n # Add each of the status lists back to report list\n not_found = [fmt.bullet('No Tasks found.')]\n report.append(fmt.heading3('Running Tasks'))\n report.extend(run_status if run_status else not_found)\n report.append('')\n report.append(fmt.heading3('Queued Tasks'))\n report.extend(queued_status if queued_status else not_found)\n # Add Historical Tasks\n if all_fields:\n report.append('')\n report.append(fmt.heading3('Finished Tasks'))\n report.extend(other_status if other_status else not_found)\n return '\\n'.join(report)\n\n def format_request_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Requests.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include all fields for the Request, which includes,\n saved file paths.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of request_id: {saved_paths, last_update, requester,\n # task_id}\n request_dict = {}\n for result in task_results:\n request_id = result.get('request_id')\n saved_paths = result.get('saved_paths')\n if request_id not in request_dict:\n saved_paths = set(saved_paths) if saved_paths else set()\n request_dict[request_id] = {}\n request_dict[request_id]['saved_paths'] = saved_paths\n request_dict[request_id]['last_update'] = result.get('last_update')\n request_dict[request_id]['requester'] = result.get('requester')\n request_dict[request_id]['task_id'] = set([result.get('id')])\n else:\n if saved_paths:\n request_dict[request_id]['saved_paths'].update(saved_paths)\n request_dict[request_id]['task_id'].update([result.get('id')])\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Requests made within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet(\n '{0:d} requests were made within this timeframe.'.format(\n len(request_dict.keys()))))\n # Print report data for Requests\n for request_id, values in request_dict.items():\n report.append('')\n report.append(fmt.heading2('Request ID: {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n values['last_update'].strftime(DATETIME_FORMAT))))\n report.append(fmt.bullet('Requester: {0:s}'.format(values['requester'])))\n report.append(\n fmt.bullet('Task Count: {0:d}'.format(len(values['task_id']))))\n if all_fields:\n report.append(fmt.bullet('Associated Evidence:'))\n # Append all saved paths in request\n for path in sorted(values['saved_paths']):\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return '\\n'.join(report)\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False, full_report=False,\n priority_filter=Priority.HIGH, output_json=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n full_report (bool): Generate a full markdown report instead of just a\n summary.\n priority_filter (int): Output only a summary for Tasks with a value\n greater than the priority_filter.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n String of task status in JSON or human readable format.\n \"\"\"\n if user and days == 0:\n days = 1000\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user,\n output_json=output_json)\n if not task_results:\n return ''\n\n if output_json:\n return task_results\n\n # Sort all tasks by the report_priority so that tasks with a higher\n # priority are listed first in the report.\n for result in task_results:\n # 0 is a valid value, so checking against specific values\n if result.get('report_priority') in (None, ''):\n result['report_priority'] = Priority.LOW\n task_results = sorted(task_results, key=itemgetter('report_priority'))\n num_results = len(task_results)\n if not num_results:\n msg = 'No Turbinia Tasks found.'\n log.info(msg)\n return '\\n{0:s}'.format(msg)\n\n # Build up data\n report = []\n requester = task_results[0].get('requester')\n request_id = task_results[0].get('request_id')\n success_types = ['Successful', 'Failed', 'Scheduled or Running']\n success_values = [True, False, None]\n # Reverse mapping values to types\n success_map = dict(zip(success_values, success_types))\n task_map = defaultdict(list)\n success_types.insert(0, 'High Priority')\n for task in task_results:\n if task.get('report_priority') <= priority_filter:\n task_map['High Priority'].append(task)\n else:\n task_map[success_map[task.get('successful')]].append(task)\n\n # Generate report header\n report.append('\\n')\n report.append(fmt.heading1('Turbinia report {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Processed {0:d} Tasks for user {1:s}'.format(\n num_results, requester)))\n\n # Print report data for tasks\n for success_type in success_types:\n report.append('')\n report.append(fmt.heading1('{0:s} Tasks'.format(success_type)))\n if not task_map[success_type]:\n report.append(fmt.bullet('None'))\n for task in task_map[success_type]:\n if full_report and success_type == success_types[0]:\n report.extend(self.format_task_detail(task, show_files=all_fields))\n else:\n report.extend(self.format_task(task, show_files=all_fields))\n\n return '\\n'.join(report)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0])\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', region, func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(BaseTurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TurbiniaCeleryClient, self).__init__(*args, **kwargs)\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None, output_json=False):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_denylist, jobs_allowlist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(BaseTurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for celery worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs will be enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n self.worker.task(task_manager.task_runner, name='task_runner')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for PSQ Worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs are enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n", "path": "turbinia/client.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport httplib2\nimport json\nimport logging\nfrom operator import itemgetter\nfrom operator import attrgetter\nimport os\nimport stat\nimport time\nimport subprocess\nimport codecs\n\nfrom google import auth\nfrom prometheus_client import start_http_server\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia.config import DATETIME_FORMAT\nfrom turbinia import task_manager\nfrom turbinia import TurbiniaException\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.lib import docker_manager\nfrom turbinia.jobs import manager as job_manager\nfrom turbinia.workers import Priority\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.analysis.jupyter import JupyterAnalysisTask\nfrom turbinia.workers.finalize_request import FinalizeRequestTask\nfrom turbinia.workers.docker import DockerContainersEnumerationTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.hindsight import HindsightTask\nfrom turbinia.workers.partitions import PartitionEnumerationTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.redis import RedisAnalysisTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.volatility import VolatilityTask\nfrom turbinia.workers.worker_stat import StatTask\nfrom turbinia.workers.binary_extractor import BinaryExtractorTask\nfrom turbinia.workers.bulk_extractor import BulkExtractorTask\nfrom turbinia.workers.photorec import PhotorecTask\n\nMAX_RETRIES = 10\nRETRY_SLEEP = 60\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'finalizerequesttask': FinalizeRequestTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'JupyterAnalysisTask': JupyterAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'hindsighttask': HindsightTask,\n 'partitionenumerationtask': PartitionEnumerationTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'redisanalysistask': RedisAnalysisTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'volatilitytask': VolatilityTask,\n 'stattask': StatTask,\n 'binaryextractor': BinaryExtractorTask,\n 'bulkextractortask': BulkExtractorTask,\n 'dockertask': DockerContainersEnumerationTask,\n 'photorectask': PhotorecTask\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from libcloudforensics.providers.gcp.internal import function as gcp_function\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef get_turbinia_client(run_local=False):\n \"\"\"Return Turbinia client based on config.\n\n Returns:\n Initialized BaseTurbiniaClient or TurbiniaCeleryClient object.\n \"\"\"\n config.LoadConfig()\n # pylint: disable=no-else-return\n if config.TASK_MANAGER.lower() == 'psq':\n return BaseTurbiniaClient(run_local=run_local)\n elif config.TASK_MANAGER.lower() == 'celery':\n return TurbiniaCeleryClient(run_local=run_local)\n else:\n msg = 'Task Manager type \"{0:s}\" not implemented'.format(\n config.TASK_MANAGER)\n raise TurbiniaException(msg)\n\n\ndef check_docker_dependencies(dependencies):\n \"\"\"Checks docker dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n #TODO(wyassine): may run into issues down the line when a docker image\n # does not have bash or which installed. (no linux fs layer).\n log.info('Performing docker dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n images = docker_manager.DockerManager().list_images(return_filter='short_id')\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n docker_image = values.get('docker_image')\n # short id only pulls the first 10 characters of image id.\n if docker_image and len(docker_image) > 10:\n docker_image = docker_image[0:10]\n\n if docker_image in images:\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n stdout, stderr, ret = docker_manager.ContainerManager(\n values['docker_image']).execute_container(cmd, shell=True)\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found for job {1:s}. Please install '\n 'the dependency for the container or disable the job.'.format(\n program, job))\n job_manager.JobsManager.RegisterDockerImage(job, values['docker_image'])\n elif docker_image:\n raise TurbiniaException(\n 'Docker image {0:s} was not found for the job {1:s}. Please '\n 'update the config with the correct image id'.format(\n values['docker_image'], job))\n\n\ndef check_system_dependencies(dependencies):\n \"\"\"Checks system dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n log.info('Performing system dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n elif not values.get('docker_image'):\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n proc = subprocess.Popen(cmd, shell=True)\n proc.communicate()\n ret = proc.returncode\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found in $PATH for the job {1:s}. '\n 'Please install the dependency or disable the job.'.format(\n program, job))\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaStats(object):\n \"\"\"Statistics for Turbinia task execution.\n\n Attributes:\n count(int): The number of tasks\n min(datetime.timedelta): The minimum run time of all tasks\n max(datetime.timedelta): The maximum run time of all tasks\n mean(datetime.timedelta): The mean run time of all tasks\n tasks(list): A list of tasks to calculate stats for\n \"\"\"\n\n def __init__(self, description=None):\n self.description = description\n self.min = None\n self.mean = None\n self.max = None\n self.tasks = []\n\n def __str__(self):\n return self.format_stats()\n\n @property\n def count(self):\n \"\"\"Gets a count of the tasks in this stats object.\n\n Returns:\n Int of task count.\n \"\"\"\n return len(self.tasks)\n\n def add_task(self, task):\n \"\"\"Add a task result dict.\n\n Args:\n task(dict): The task results we want to count stats for.\n \"\"\"\n self.tasks.append(task)\n\n def calculate_stats(self):\n \"\"\"Calculates statistics of the current tasks.\"\"\"\n if not self.tasks:\n return\n\n sorted_tasks = sorted(self.tasks, key=itemgetter('run_time'))\n self.min = sorted_tasks[0]['run_time']\n self.max = sorted_tasks[len(sorted_tasks) - 1]['run_time']\n self.mean = sorted_tasks[len(sorted_tasks) // 2]['run_time']\n\n # Remove the microseconds to keep things cleaner\n self.min = self.min - timedelta(microseconds=self.min.microseconds)\n self.max = self.max - timedelta(microseconds=self.max.microseconds)\n self.mean = self.mean - timedelta(microseconds=self.mean.microseconds)\n\n def format_stats(self):\n \"\"\"Formats statistics data.\n\n Returns:\n String of statistics data\n \"\"\"\n return '{0:s}: Count: {1:d}, Min: {2!s}, Mean: {3!s}, Max: {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n def format_stats_csv(self):\n \"\"\"Formats statistics data into CSV output.\n\n Returns:\n String of statistics data in CSV format\n \"\"\"\n return '{0:s}, {1:d}, {2!s}, {3!s}, {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n\nclass BaseTurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.NAME))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n last_completed_count = -1\n last_uncompleted_count = -1\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_tasks = []\n uncompleted_tasks = []\n for task in task_results:\n if task.get('successful') is not None:\n completed_tasks.append(task)\n else:\n uncompleted_tasks.append(task)\n\n if completed_tasks and len(completed_tasks) == len(task_results):\n break\n\n completed_names = [t.get('name') for t in completed_tasks]\n completed_names = ', '.join(sorted(completed_names))\n uncompleted_names = [t.get('name') for t in uncompleted_tasks]\n uncompleted_names = ', '.join(sorted(uncompleted_names))\n total_count = len(completed_tasks) + len(uncompleted_tasks)\n msg = (\n 'Tasks completed ({0:d}/{1:d}): [{2:s}], waiting for [{3:s}].'.format(\n len(completed_tasks), total_count, completed_names,\n uncompleted_names))\n if (len(completed_tasks) > last_completed_count or\n len(uncompleted_tasks) > last_uncompleted_count):\n log.info(msg)\n else:\n log.debug(msg)\n\n last_completed_count = len(completed_tasks)\n last_uncompleted_count = len(uncompleted_tasks)\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks', output_json=False):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n (List|JSON string) of Task dict objects\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime(DATETIME_FORMAT)\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = None\n retry_count = 0\n credential_error_count = 0\n while response is None and retry_count < MAX_RETRIES:\n try:\n response = cloud_function.ExecuteFunction(\n function_name, region, func_args)\n except auth.exceptions.RefreshError as exception:\n if credential_error_count == 0:\n log.info(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Error: {0!s}'.format(\n exception))\n else:\n log.debug(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Attempt {0:d}. Error: '\n '{1!s}'.format(credential_error_count + 1, exception))\n # Note, we are intentially not incrementing the retry_count here because\n # we will retry indefinitely while we wait for the user to reauth.\n credential_error_count += 1\n except httplib2.ServerNotFoundError as exception:\n log.info(\n 'Error connecting to server, will retry [{0:d} of {1:d} retries]: '\n '{2!s}'.format(retry_count, MAX_RETRIES, exception))\n retry_count += 1\n\n if response is None:\n time.sleep(RETRY_SLEEP)\n\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result [{0!s}] from GCF: [{1!s}]'.format(\n response.get('result'), e))\n\n task_data = results[0]\n if output_json:\n try:\n json_data = json.dumps(task_data)\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not re-serialize result [{0!s}] from GCF: [{1!s}]'.format(\n str(task_data), e))\n return json_data\n\n # Convert run_time/last_update back into datetime objects\n for task in task_data:\n if task.get('run_time'):\n task['run_time'] = timedelta(seconds=task['run_time'])\n if task.get('last_update'):\n task['last_update'] = datetime.strptime(\n task['last_update'], DATETIME_FORMAT)\n\n return task_data\n\n def format_task_detail(self, task, show_files=False):\n \"\"\"Formats a single task in detail.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n\n report.append(fmt.heading2(task.get('name')))\n line = '{0:s} {1:s}'.format(fmt.bold('Status:'), status)\n report.append(fmt.bullet(line))\n report.append(fmt.bullet('Task Id: {0:s}'.format(task.get('id'))))\n report.append(\n fmt.bullet('Executed on worker {0:s}'.format(task.get('worker_name'))))\n if task.get('report_data'):\n report.append('')\n report.append(fmt.heading3('Task Reported Data'))\n report.extend(task.get('report_data').splitlines())\n if show_files:\n report.append('')\n report.append(fmt.heading3('Saved Task Files:'))\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path)))\n report.append('')\n return report\n\n def format_worker_task(self, task):\n \"\"\"Formats a single task for Worker view.\n\n Args:\n task (dict): The task to format data for\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n report.append(\n fmt.bullet('{0:s} - {1:s}'.format(task['task_id'], task['task_name'])))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n task['last_update'].strftime(DATETIME_FORMAT)), level=2))\n report.append(fmt.bullet('Status: {0:s}'.format(task['status']), level=2))\n report.append(\n fmt.bullet('Run Time: {0:s}'.format(str(task['run_time'])), level=2))\n report.append('')\n return report\n\n def format_task(self, task, show_files=False):\n \"\"\"Formats a single task in short form.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n report.append(fmt.bullet('{0:s}: {1:s}'.format(task.get('name'), status)))\n if show_files:\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return report\n\n def get_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None):\n \"\"\"Gathers statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n\n Returns:\n task_stats(dict): Mapping of statistic names to values\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n if not task_results:\n return {}\n\n task_stats = {\n 'all_tasks': TurbiniaStats('All Tasks'),\n 'successful_tasks': TurbiniaStats('Successful Tasks'),\n 'failed_tasks': TurbiniaStats('Failed Tasks'),\n 'requests': TurbiniaStats('Total Request Time'),\n # The following are dicts mapping the user/worker/type names to their\n # respective TurbiniaStats() objects.\n # Total wall-time for all tasks of a given type\n 'tasks_per_type': {},\n # Total wall-time for all tasks per Worker\n 'tasks_per_worker': {},\n # Total wall-time for all tasks per User\n 'tasks_per_user': {},\n }\n\n # map of request ids to [min time, max time]\n requests = {}\n\n for task in task_results:\n request_id = task.get('request_id')\n task_type = task.get('name')\n worker = task.get('worker_name')\n user = task.get('requester')\n if not task.get('run_time'):\n log.debug(\n 'Ignoring task {0:s} in statistics because the run_time is not '\n 'set, and it is required to calculate stats'.format(\n task.get('name')))\n continue\n\n # Stats for all/successful/failed tasks\n task_stats['all_tasks'].add_task(task)\n if task.get('successful') is True:\n task_stats['successful_tasks'].add_task(task)\n elif task.get('successful') is False:\n task_stats['failed_tasks'].add_task(task)\n\n # Stats for Tasks per Task type.\n if task_type in task_stats['tasks_per_type']:\n task_type_stats = task_stats['tasks_per_type'].get(task_type)\n else:\n task_type_stats = TurbiniaStats('Task type {0:s}'.format(task_type))\n task_stats['tasks_per_type'][task_type] = task_type_stats\n task_type_stats.add_task(task)\n\n # Stats per worker.\n if worker in task_stats['tasks_per_worker']:\n worker_stats = task_stats['tasks_per_worker'].get(worker)\n else:\n worker_stats = TurbiniaStats('Worker {0:s}'.format(worker))\n task_stats['tasks_per_worker'][worker] = worker_stats\n worker_stats.add_task(task)\n\n # Stats per submitting User.\n if user in task_stats['tasks_per_user']:\n user_stats = task_stats['tasks_per_user'].get(user)\n else:\n user_stats = TurbiniaStats('User {0:s}'.format(user))\n task_stats['tasks_per_user'][user] = user_stats\n user_stats.add_task(task)\n\n # Stats for the total request. This will, for each request, calculate the\n # start time of the earliest task and the stop time of the latest task.\n # This will give the overall run time covering all tasks in the request.\n task_start_time = task['last_update'] - task['run_time']\n task_stop_time = task['last_update']\n if request_id in requests:\n start_time, stop_time = requests[request_id]\n if task_start_time < start_time:\n requests[request_id][0] = task_start_time\n if task_stop_time > stop_time:\n requests[request_id][1] = task_stop_time\n else:\n requests[request_id] = [task_start_time, task_stop_time]\n\n # Add a fake task result for each request with our calculated times to the\n # stats module\n for min_time, max_time in requests.values():\n task = {}\n task['run_time'] = max_time - min_time\n task_stats['requests'].add_task(task)\n\n # Go over all stat objects and calculate them\n for stat_obj in task_stats.values():\n if isinstance(stat_obj, dict):\n for inner_stat_obj in stat_obj.values():\n inner_stat_obj.calculate_stats()\n else:\n stat_obj.calculate_stats()\n\n return task_stats\n\n def format_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, csv=False):\n \"\"\"Formats statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n csv (bool): Whether we want the output in CSV format.\n\n Returns:\n String of task statistics report\n \"\"\"\n task_stats = self.get_task_statistics(\n instance, project, region, days, task_id, request_id, user)\n if not task_stats:\n return 'No tasks found'\n\n stats_order = [\n 'all_tasks', 'successful_tasks', 'failed_tasks', 'requests',\n 'tasks_per_type', 'tasks_per_worker', 'tasks_per_user'\n ]\n\n if csv:\n report = ['stat_type, count, min, mean, max']\n else:\n report = ['Execution time statistics for Turbinia:', '']\n for stat_name in stats_order:\n stat_obj = task_stats[stat_name]\n if isinstance(stat_obj, dict):\n # Sort by description so that we get consistent report output\n inner_stat_objs = sorted(\n stat_obj.values(), key=attrgetter('description'))\n for inner_stat_obj in inner_stat_objs:\n if csv:\n report.append(inner_stat_obj.format_stats_csv())\n else:\n report.append(inner_stat_obj.format_stats())\n else:\n if csv:\n report.append(stat_obj.format_stats_csv())\n else:\n report.append(stat_obj.format_stats())\n\n report.append('')\n return '\\n'.join(report)\n\n def format_worker_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Workers.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include historical Task information for the worker.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of worker_node: {{task_id, task_update,\n # task_name, task_status}}\n workers_dict = {}\n scheduled_counter = 0\n for result in task_results:\n worker_node = result.get('worker_name')\n status = result.get('status')\n status = status if status else 'No task status'\n if worker_node and worker_node not in workers_dict:\n workers_dict[worker_node] = []\n if worker_node:\n task_dict = {}\n task_dict['task_id'] = result.get('id')\n task_dict['last_update'] = result.get('last_update')\n task_dict['task_name'] = result.get('name')\n task_dict['status'] = status\n # Check status for anything that is running.\n if 'running' in status:\n run_time = (datetime.now() -\n result.get('last_update')).total_seconds()\n run_time = timedelta(seconds=run_time)\n task_dict['run_time'] = run_time\n else:\n run_time = result.get('run_time')\n task_dict['run_time'] = run_time if run_time else 'No run time.'\n workers_dict[worker_node].append(task_dict)\n else:\n # Track scheduled/unassigned Tasks for reporting.\n scheduled_counter += 1\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Worker activity within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet('{0:d} Worker(s) found.'.format(len(workers_dict.keys()))))\n report.append(\n fmt.bullet(\n '{0:d} Task(s) unassigned or scheduled and pending Worker assignment.'\n .format(scheduled_counter)))\n for worker_node, tasks in workers_dict.items():\n report.append('')\n report.append(fmt.heading2('Worker Node: {0:s}'.format(worker_node)))\n # Append the statuses chronologically\n run_status, queued_status, other_status = [], [], []\n for task in tasks:\n if 'running' in task['status']:\n run_status.extend(self.format_worker_task(task))\n elif 'queued' in task['status']:\n queued_status.extend(self.format_worker_task(task))\n else:\n other_status.extend(self.format_worker_task(task))\n # Add each of the status lists back to report list\n not_found = [fmt.bullet('No Tasks found.')]\n report.append(fmt.heading3('Running Tasks'))\n report.extend(run_status if run_status else not_found)\n report.append('')\n report.append(fmt.heading3('Queued Tasks'))\n report.extend(queued_status if queued_status else not_found)\n # Add Historical Tasks\n if all_fields:\n report.append('')\n report.append(fmt.heading3('Finished Tasks'))\n report.extend(other_status if other_status else not_found)\n return '\\n'.join(report)\n\n def format_request_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Requests.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include all fields for the Request, which includes,\n saved file paths.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of request_id: {saved_paths, last_update, requester,\n # task_id}\n request_dict = {}\n for result in task_results:\n request_id = result.get('request_id')\n saved_paths = result.get('saved_paths')\n if request_id not in request_dict:\n saved_paths = set(saved_paths) if saved_paths else set()\n request_dict[request_id] = {}\n request_dict[request_id]['saved_paths'] = saved_paths\n request_dict[request_id]['last_update'] = result.get('last_update')\n request_dict[request_id]['requester'] = result.get('requester')\n request_dict[request_id]['task_id'] = set([result.get('id')])\n else:\n if saved_paths:\n request_dict[request_id]['saved_paths'].update(saved_paths)\n request_dict[request_id]['task_id'].update([result.get('id')])\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Requests made within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet(\n '{0:d} requests were made within this timeframe.'.format(\n len(request_dict.keys()))))\n # Print report data for Requests\n for request_id, values in request_dict.items():\n report.append('')\n report.append(fmt.heading2('Request ID: {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n values['last_update'].strftime(DATETIME_FORMAT))))\n report.append(fmt.bullet('Requester: {0:s}'.format(values['requester'])))\n report.append(\n fmt.bullet('Task Count: {0:d}'.format(len(values['task_id']))))\n if all_fields:\n report.append(fmt.bullet('Associated Evidence:'))\n # Append all saved paths in request\n for path in sorted(values['saved_paths']):\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return '\\n'.join(report)\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False, full_report=False,\n priority_filter=Priority.HIGH, output_json=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n full_report (bool): Generate a full markdown report instead of just a\n summary.\n priority_filter (int): Output only a summary for Tasks with a value\n greater than the priority_filter.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n String of task status in JSON or human readable format.\n \"\"\"\n if user and days == 0:\n days = 1000\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user,\n output_json=output_json)\n if not task_results:\n return ''\n\n if output_json:\n return task_results\n\n # Sort all tasks by the report_priority so that tasks with a higher\n # priority are listed first in the report.\n for result in task_results:\n # 0 is a valid value, so checking against specific values\n if result.get('report_priority') in (None, ''):\n result['report_priority'] = Priority.LOW\n task_results = sorted(task_results, key=itemgetter('report_priority'))\n num_results = len(task_results)\n if not num_results:\n msg = 'No Turbinia Tasks found.'\n log.info(msg)\n return '\\n{0:s}'.format(msg)\n\n # Build up data\n report = []\n requester = task_results[0].get('requester')\n request_id = task_results[0].get('request_id')\n success_types = ['Successful', 'Failed', 'Scheduled or Running']\n success_values = [True, False, None]\n # Reverse mapping values to types\n success_map = dict(zip(success_values, success_types))\n task_map = defaultdict(list)\n success_types.insert(0, 'High Priority')\n for task in task_results:\n if task.get('report_priority') <= priority_filter:\n task_map['High Priority'].append(task)\n else:\n task_map[success_map[task.get('successful')]].append(task)\n\n # Generate report header\n report.append('\\n')\n report.append(fmt.heading1('Turbinia report {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Processed {0:d} Tasks for user {1:s}'.format(\n num_results, requester)))\n\n # Print report data for tasks\n for success_type in success_types:\n report.append('')\n report.append(fmt.heading1('{0:s} Tasks'.format(success_type)))\n if not task_map[success_type]:\n report.append(fmt.bullet('None'))\n for task in task_map[success_type]:\n if full_report and success_type == success_types[0]:\n report.extend(self.format_task_detail(task, show_files=all_fields))\n else:\n report.extend(self.format_task(task, show_files=all_fields))\n\n return '\\n'.join(report)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0].serialize())\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', region, func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(BaseTurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TurbiniaCeleryClient, self).__init__(*args, **kwargs)\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None, output_json=False):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_denylist, jobs_allowlist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(BaseTurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for celery worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs will be enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n self.worker.task(task_manager.task_runner, name='task_runner')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for PSQ Worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs are enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n", "path": "turbinia/client.py"}]} |
gh_patches_debug_1467 | rasdani/github-patches | git_diff | horovod__horovod-3002 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
【Elastic Horovod】Should we catch exceptions for state.sync()?
**Environment:**
1. Framework: (TensorFlow, Keras, PyTorch, MXNet): Pytorch
2. Framework version: 1.6.0
3. Horovod version: 0.21.3
4. MPI version: 4.0.3
5. CUDA version: 10.2
6. NCCL version: 2.7.6
7. Python version: 3.6
**Checklist:**
1. Did you search issues to find if somebody asked this question before? Yes.
2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)?
3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)?
4. Did you check if you question is answered in the [troubleshooting guide] (https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)? Yes
**Bug report:**
When a new worker was added in host discovery script, old workers will sync their state to new one. But if any worker failed during state synchronization, unfortunately, the elastic horovod task will fail and it seems not play a role for elastic:
```
[0]<stderr>:[2021-06-21 21:35:05.743047: E /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:640] Horovod background loop uncaught exception: [/pytorch/third_party/gloo/gloo/transport/tcp/pair.cc:575] Connection closed by peer [11.198.63.123]:50349
[0]<stdout>:[2021-06-21 21:35:05.773132: D /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:652] [0]: Shutting down background thread
[0]<stderr>:Traceback (most recent call last):
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 960, in synchronize
[0]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)[0]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
[0]<stderr>:
[0]<stderr>:During handling of the above exception, another exception occurred:[0]<stderr>:
[0]<stderr>:Traceback (most recent call last):[0]<stderr>: File "pytorch_synthetic_benchmark_elastic.py", line 140, in <module>
[0]<stderr>: run_benchmark(state)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/common/elastic.py", line 162, in wrapper
[0]<stderr>: state.sync()
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 62, in sync
[0]<stderr>: handler.sync()[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 101, in sync
[0]<stderr>: broadcast_parameters(self.value.state_dict(), root_rank=0)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/functions.py", line 58, in broadcast_parameters
[0]<stderr>: synchronize(handle)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 964, in synchronize
[0]<stderr>: raise HorovodInternalError(e)
[0]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
```
I think that was caused by this [code segment:](https://github.com/horovod/horovod/blob/139416965ab9aa5850baf96ec54ce35c58b05119/horovod/common/elastic.py#L161)
It works well for me when I fix code as follows
```
def run_fn(func, reset):
....
try:
while True:
try:
# Here we also catch exceptions for state.sync().
if not skip_sync:
state.sync()
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
skip_sync = False
except HostsUpdatedInterrupt as e:
skip_sync = e.skip_sync
reset()
state.on_reset()
finally:
notification_manager.remove_listener(state)
return wrapper
```
**Steps to reproduce.**
1. In order to easily reproduce the problem, we add one line in `horovod/examples/elastic/pytorch/pytorch_synthetic_benchmark_elastic.py` as follows:
```
...
state.register_reset_callbacks([on_state_reset])
# Here we sleep 30s to keep old workers stay in state.sync() when a new worker
# was add in host-discovery-script.
time.sleep(30)
run_benchmark(state)
...
```
2. Run elastic horovod:
```
horovodrun -np 1 --host-discovery-script ./discovery_hosts.sh --network-interface eth1 --min-np 1 --log-level DEBUG --verbose python3 pytorch_synthetic_benchmark_elastic.py --num-iters=1000
```
3. After some iteration passed, we add a new worker in host-discovery-script to raise `HostsUpdatedInterrupt`. The old workers will call `state.sync()` and hang in `state.sync()` for 30s as new worker will sleep 30s before `hvd.elastic.run`
4. When old worker was hang in `state.sync`, we kill one old worker to raise `HorovodInternalError` . At this time the elastic horovod will fail. The content of stderr as follows:
```
[0]<stderr>:[2021-06-21 21:35:05.743047: E /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:640] Horovod background loop uncaught exception: [/pytorch/third_party/gloo/gloo/transport/tcp/pair.cc:575] Connection closed by peer [11.198.63.123]:50349
[0]<stdout>:[2021-06-21 21:35:05.773132: D /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:652] [0]: Shutting down background thread
[0]<stderr>:Traceback (most recent call last):
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 960, in synchronize
[0]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)[0]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
[0]<stderr>:
[0]<stderr>:During handling of the above exception, another exception occurred:[0]<stderr>:
[0]<stderr>:Traceback (most recent call last):[0]<stderr>: File "pytorch_synthetic_benchmark_elastic.py", line 140, in <module>
[0]<stderr>: run_benchmark(state)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/common/elastic.py", line 162, in wrapper
[0]<stderr>: state.sync()
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 62, in sync
[0]<stderr>: handler.sync()[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 101, in sync
[0]<stderr>: broadcast_parameters(self.value.state_dict(), root_rank=0)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/functions.py", line 58, in broadcast_parameters
[0]<stderr>: synchronize(handle)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 964, in synchronize
[0]<stderr>: raise HorovodInternalError(e)
[0]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `horovod/common/elastic.py`
Content:
```
1 # Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 import functools
17 import queue
18
19 from horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt
20 from horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager
21
22
23 notification_manager = WorkerNotificationManager()
24
25
26 class State(object):
27 """State representation used for tracking in memory state across workers.
28
29 Args:
30 bcast_object: Function used to broadcast a variable from rank 0 to the other workers.
31 get_rank: Function that returns the current rank of this worker.
32 """
33 def __init__(self, bcast_object, get_rank):
34 self._bcast_object = bcast_object
35 self._rank = get_rank
36 self._host_messages = queue.Queue()
37 self._last_updated_timestamp = 0
38 self._reset_callbacks = []
39
40 def register_reset_callbacks(self, callbacks):
41 """Register callbacks that will be invoked following a reset event (worker added or removed).
42
43 For example, a common use of a reset callback would be to update the learning rate scale with the
44 new number of workers.
45
46 Args:
47 callbacks: list of functions to execute.
48 """
49 self._reset_callbacks.extend(callbacks)
50
51 def on_reset(self):
52 self._host_messages = queue.Queue()
53 self.reset()
54 for callback in self._reset_callbacks:
55 callback()
56
57 def on_hosts_updated(self, timestamp, update_res):
58 self._host_messages.put((timestamp, update_res))
59
60 def commit(self):
61 """Commits all modifications to state tracked by this object to host memory.
62
63 This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`
64 if any were detected.
65
66 Because commits are a heavy operation involving data copy (potentially from GPU to host), it is
67 recommended to consider committing less frequently than once per batch. This allows users to tradeoff
68 between per-batch execution time and lost training steps in the event of a worker failure.
69 """
70 self.save()
71 self.check_host_updates()
72
73 def check_host_updates(self):
74 """Checks that a notification has been sent indicating that hosts can be added or will be removed.
75
76 Raises a `HostsUpdatedInterrupt` if such a notification has been received.
77 """
78 # Iterate through the update messages sent from the server. If the update timestamp
79 # is greater than the last update timestamp, then trigger a HostsUpdatedException.
80 last_updated_timestamp = prev_timestamp = self._last_updated_timestamp
81 all_update = HostUpdateResult.no_update
82 while not self._host_messages.empty():
83 timestamp, update = self._host_messages.get()
84 if timestamp > last_updated_timestamp:
85 last_updated_timestamp = timestamp
86 all_update |= update
87
88 # In order to ensure all workers raise the exception at the same time, we need to sync
89 # the updated state across all the workers.
90 # TODO(travis): this should be a max allreduce to account for changes in rank 0
91 prev_timestamp, self._last_updated_timestamp, all_update = \
92 self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))
93
94 # At this point, updated state is globally consistent across all ranks.
95 if self._last_updated_timestamp > prev_timestamp:
96 raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)
97
98
99 def save(self):
100 """Saves state to host memory."""
101 raise NotImplementedError()
102
103 def restore(self):
104 """Restores the last committed state, undoing any uncommitted modifications."""
105 raise NotImplementedError()
106
107 def sync(self):
108 """Synchronize state across workers."""
109 raise NotImplementedError()
110
111 def reset(self):
112 """Reset objects and variables following a reset event (before synchronization)."""
113 pass
114
115
116 class ObjectState(State):
117 """State for simple Python objects.
118
119 Every object is specified as a keyword argument, and will be assigned as an attribute.
120
121 Args:
122 bcast_object: Horovod broadcast object function used to sync state dictionary.
123 get_rank: Horovod rank function used to identify is this process is the coordinator.
124 kwargs: Properties to sync, will be exposed as attributes of the object.
125 """
126 def __init__(self, bcast_object, get_rank, **kwargs):
127 self._bcast_object = bcast_object
128 self._saved_state = kwargs
129 self._set_attrs()
130 super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)
131
132 def save(self):
133 new_state = {}
134 for attr in self._saved_state.keys():
135 new_state[attr] = getattr(self, attr)
136 self._saved_state = new_state
137
138 def restore(self):
139 self._set_attrs()
140
141 def sync(self):
142 if self._saved_state:
143 self._saved_state = self._bcast_object(self._saved_state)
144 self._set_attrs()
145
146 def _set_attrs(self):
147 for attr, value in self._saved_state.items():
148 setattr(self, attr, value)
149
150
151 def run_fn(func, reset):
152 @functools.wraps(func)
153 def wrapper(state, *args, **kwargs):
154 notification_manager.init()
155 notification_manager.register_listener(state)
156 skip_sync = False
157
158 try:
159 while True:
160 if not skip_sync:
161 state.sync()
162
163 try:
164 return func(state, *args, **kwargs)
165 except HorovodInternalError:
166 state.restore()
167 skip_sync = False
168 except HostsUpdatedInterrupt as e:
169 skip_sync = e.skip_sync
170
171 reset()
172 state.on_reset()
173 finally:
174 notification_manager.remove_listener(state)
175 return wrapper
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/horovod/common/elastic.py b/horovod/common/elastic.py
--- a/horovod/common/elastic.py
+++ b/horovod/common/elastic.py
@@ -157,10 +157,10 @@
try:
while True:
- if not skip_sync:
- state.sync()
-
try:
+ if not skip_sync:
+ state.sync()
+
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
| {"golden_diff": "diff --git a/horovod/common/elastic.py b/horovod/common/elastic.py\n--- a/horovod/common/elastic.py\n+++ b/horovod/common/elastic.py\n@@ -157,10 +157,10 @@\n \n try:\n while True:\n- if not skip_sync:\n- state.sync()\n-\n try:\n+ if not skip_sync:\n+ state.sync()\n+\n return func(state, *args, **kwargs)\n except HorovodInternalError:\n state.restore()\n", "issue": "\u3010Elastic Horovod\u3011Should we catch exceptions for state.sync()\uff1f\n**Environment:**\r\n1. Framework: (TensorFlow, Keras, PyTorch, MXNet): Pytorch\r\n2. Framework version: 1.6.0\r\n3. Horovod version: 0.21.3\r\n4. MPI version: 4.0.3\r\n5. CUDA version: 10.2\r\n6. NCCL version: 2.7.6\r\n7. Python version: 3.6\r\n\r\n**Checklist:**\r\n1. Did you search issues to find if somebody asked this question before? Yes.\r\n2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)?\r\n3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)?\r\n4. Did you check if you question is answered in the [troubleshooting guide] (https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)? Yes\r\n\r\n**Bug report:**\r\nWhen a new worker was added in host discovery script, old workers will sync their state to new one. But if any worker failed during state synchronization, unfortunately, the elastic horovod task will fail and it seems not play a role for elastic:\r\n```\r\n[0]<stderr>:[2021-06-21 21:35:05.743047: E /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:640] Horovod background loop uncaught exception: [/pytorch/third_party/gloo/gloo/transport/tcp/pair.cc:575] Connection closed by peer [11.198.63.123]:50349\r\n[0]<stdout>:[2021-06-21 21:35:05.773132: D /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:652] [0]: Shutting down background thread\r\n[0]<stderr>:Traceback (most recent call last):\r\n[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py\", line 960, in synchronize\r\n[0]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)[0]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.\r\n[0]<stderr>:\r\n[0]<stderr>:During handling of the above exception, another exception occurred:[0]<stderr>: \r\n[0]<stderr>:Traceback (most recent call last):[0]<stderr>: File \"pytorch_synthetic_benchmark_elastic.py\", line 140, in <module>\r\n[0]<stderr>: run_benchmark(state)[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/common/elastic.py\", line 162, in wrapper\r\n[0]<stderr>: state.sync()\r\n[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py\", line 62, in sync\r\n[0]<stderr>: handler.sync()[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py\", line 101, in sync\r\n[0]<stderr>: broadcast_parameters(self.value.state_dict(), root_rank=0)[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/functions.py\", line 58, in broadcast_parameters\r\n[0]<stderr>: synchronize(handle)[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py\", line 964, in synchronize\r\n[0]<stderr>: raise HorovodInternalError(e)\r\n[0]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.\r\n``` \r\n\r\nI think that was caused by this [code segment:](https://github.com/horovod/horovod/blob/139416965ab9aa5850baf96ec54ce35c58b05119/horovod/common/elastic.py#L161)\r\nIt works well for me when I fix code as follows\r\n```\r\ndef run_fn(func, reset):\r\n ....\r\n try:\r\n while True:\r\n try:\r\n # Here we also catch exceptions for state.sync().\r\n if not skip_sync:\r\n state.sync()\r\n return func(state, *args, **kwargs)\r\n except HorovodInternalError:\r\n state.restore()\r\n skip_sync = False\r\n except HostsUpdatedInterrupt as e:\r\n skip_sync = e.skip_sync\r\n\r\n reset()\r\n state.on_reset()\r\n finally:\r\n notification_manager.remove_listener(state)\r\n return wrapper\r\n```\r\n\r\n\r\n**Steps to reproduce.**\r\n1. In order to easily reproduce the problem, we add one line in `horovod/examples/elastic/pytorch/pytorch_synthetic_benchmark_elastic.py` as follows:\r\n```\r\n...\r\nstate.register_reset_callbacks([on_state_reset])\r\n# Here we sleep 30s to keep old workers stay in state.sync() when a new worker\r\n# was add in host-discovery-script.\r\ntime.sleep(30)\r\nrun_benchmark(state)\r\n...\r\n```\r\n2. Run elastic horovod:\r\n```\r\nhorovodrun -np 1 --host-discovery-script ./discovery_hosts.sh --network-interface eth1 --min-np 1 --log-level DEBUG --verbose python3 pytorch_synthetic_benchmark_elastic.py --num-iters=1000\r\n```\r\n3. After some iteration passed, we add a new worker in host-discovery-script to raise `HostsUpdatedInterrupt`. The old workers will call `state.sync()` and hang in `state.sync()` for 30s as new worker will sleep 30s before `hvd.elastic.run`\r\n4. When old worker was hang in `state.sync`, we kill one old worker to raise `HorovodInternalError` . At this time the elastic horovod will fail. The content of stderr as follows:\r\n```\r\n[0]<stderr>:[2021-06-21 21:35:05.743047: E /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:640] Horovod background loop uncaught exception: [/pytorch/third_party/gloo/gloo/transport/tcp/pair.cc:575] Connection closed by peer [11.198.63.123]:50349\r\n[0]<stdout>:[2021-06-21 21:35:05.773132: D /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:652] [0]: Shutting down background thread\r\n[0]<stderr>:Traceback (most recent call last):\r\n[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py\", line 960, in synchronize\r\n[0]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)[0]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.\r\n[0]<stderr>:\r\n[0]<stderr>:During handling of the above exception, another exception occurred:[0]<stderr>: \r\n[0]<stderr>:Traceback (most recent call last):[0]<stderr>: File \"pytorch_synthetic_benchmark_elastic.py\", line 140, in <module>\r\n[0]<stderr>: run_benchmark(state)[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/common/elastic.py\", line 162, in wrapper\r\n[0]<stderr>: state.sync()\r\n[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py\", line 62, in sync\r\n[0]<stderr>: handler.sync()[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py\", line 101, in sync\r\n[0]<stderr>: broadcast_parameters(self.value.state_dict(), root_rank=0)[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/functions.py\", line 58, in broadcast_parameters\r\n[0]<stderr>: synchronize(handle)[0]<stderr>: File \"/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py\", line 964, in synchronize\r\n[0]<stderr>: raise HorovodInternalError(e)\r\n[0]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.\r\n``` \r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport functools\nimport queue\n\nfrom horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt\nfrom horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager\n\n\nnotification_manager = WorkerNotificationManager()\n\n\nclass State(object):\n \"\"\"State representation used for tracking in memory state across workers.\n\n Args:\n bcast_object: Function used to broadcast a variable from rank 0 to the other workers.\n get_rank: Function that returns the current rank of this worker.\n \"\"\"\n def __init__(self, bcast_object, get_rank):\n self._bcast_object = bcast_object\n self._rank = get_rank\n self._host_messages = queue.Queue()\n self._last_updated_timestamp = 0\n self._reset_callbacks = []\n\n def register_reset_callbacks(self, callbacks):\n \"\"\"Register callbacks that will be invoked following a reset event (worker added or removed).\n\n For example, a common use of a reset callback would be to update the learning rate scale with the\n new number of workers.\n\n Args:\n callbacks: list of functions to execute.\n \"\"\"\n self._reset_callbacks.extend(callbacks)\n\n def on_reset(self):\n self._host_messages = queue.Queue()\n self.reset()\n for callback in self._reset_callbacks:\n callback()\n\n def on_hosts_updated(self, timestamp, update_res):\n self._host_messages.put((timestamp, update_res))\n\n def commit(self):\n \"\"\"Commits all modifications to state tracked by this object to host memory.\n\n This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`\n if any were detected.\n\n Because commits are a heavy operation involving data copy (potentially from GPU to host), it is\n recommended to consider committing less frequently than once per batch. This allows users to tradeoff\n between per-batch execution time and lost training steps in the event of a worker failure.\n \"\"\"\n self.save()\n self.check_host_updates()\n\n def check_host_updates(self):\n \"\"\"Checks that a notification has been sent indicating that hosts can be added or will be removed.\n\n Raises a `HostsUpdatedInterrupt` if such a notification has been received.\n \"\"\"\n # Iterate through the update messages sent from the server. If the update timestamp\n # is greater than the last update timestamp, then trigger a HostsUpdatedException.\n last_updated_timestamp = prev_timestamp = self._last_updated_timestamp\n all_update = HostUpdateResult.no_update\n while not self._host_messages.empty():\n timestamp, update = self._host_messages.get()\n if timestamp > last_updated_timestamp:\n last_updated_timestamp = timestamp\n all_update |= update\n\n # In order to ensure all workers raise the exception at the same time, we need to sync\n # the updated state across all the workers.\n # TODO(travis): this should be a max allreduce to account for changes in rank 0\n prev_timestamp, self._last_updated_timestamp, all_update = \\\n self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))\n\n # At this point, updated state is globally consistent across all ranks.\n if self._last_updated_timestamp > prev_timestamp:\n raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)\n\n\n def save(self):\n \"\"\"Saves state to host memory.\"\"\"\n raise NotImplementedError()\n\n def restore(self):\n \"\"\"Restores the last committed state, undoing any uncommitted modifications.\"\"\"\n raise NotImplementedError()\n\n def sync(self):\n \"\"\"Synchronize state across workers.\"\"\"\n raise NotImplementedError()\n\n def reset(self):\n \"\"\"Reset objects and variables following a reset event (before synchronization).\"\"\"\n pass\n\n\nclass ObjectState(State):\n \"\"\"State for simple Python objects.\n\n Every object is specified as a keyword argument, and will be assigned as an attribute.\n\n Args:\n bcast_object: Horovod broadcast object function used to sync state dictionary.\n get_rank: Horovod rank function used to identify is this process is the coordinator.\n kwargs: Properties to sync, will be exposed as attributes of the object.\n \"\"\"\n def __init__(self, bcast_object, get_rank, **kwargs):\n self._bcast_object = bcast_object\n self._saved_state = kwargs\n self._set_attrs()\n super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)\n\n def save(self):\n new_state = {}\n for attr in self._saved_state.keys():\n new_state[attr] = getattr(self, attr)\n self._saved_state = new_state\n\n def restore(self):\n self._set_attrs()\n\n def sync(self):\n if self._saved_state:\n self._saved_state = self._bcast_object(self._saved_state)\n self._set_attrs()\n\n def _set_attrs(self):\n for attr, value in self._saved_state.items():\n setattr(self, attr, value)\n\n\ndef run_fn(func, reset):\n @functools.wraps(func)\n def wrapper(state, *args, **kwargs):\n notification_manager.init()\n notification_manager.register_listener(state)\n skip_sync = False\n\n try:\n while True:\n if not skip_sync:\n state.sync()\n\n try:\n return func(state, *args, **kwargs)\n except HorovodInternalError:\n state.restore()\n skip_sync = False\n except HostsUpdatedInterrupt as e:\n skip_sync = e.skip_sync\n\n reset()\n state.on_reset()\n finally:\n notification_manager.remove_listener(state)\n return wrapper\n", "path": "horovod/common/elastic.py"}], "after_files": [{"content": "# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport functools\nimport queue\n\nfrom horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt\nfrom horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager\n\n\nnotification_manager = WorkerNotificationManager()\n\n\nclass State(object):\n \"\"\"State representation used for tracking in memory state across workers.\n\n Args:\n bcast_object: Function used to broadcast a variable from rank 0 to the other workers.\n get_rank: Function that returns the current rank of this worker.\n \"\"\"\n def __init__(self, bcast_object, get_rank):\n self._bcast_object = bcast_object\n self._rank = get_rank\n self._host_messages = queue.Queue()\n self._last_updated_timestamp = 0\n self._reset_callbacks = []\n\n def register_reset_callbacks(self, callbacks):\n \"\"\"Register callbacks that will be invoked following a reset event (worker added or removed).\n\n For example, a common use of a reset callback would be to update the learning rate scale with the\n new number of workers.\n\n Args:\n callbacks: list of functions to execute.\n \"\"\"\n self._reset_callbacks.extend(callbacks)\n\n def on_reset(self):\n self._host_messages = queue.Queue()\n self.reset()\n for callback in self._reset_callbacks:\n callback()\n\n def on_hosts_updated(self, timestamp, update_res):\n self._host_messages.put((timestamp, update_res))\n\n def commit(self):\n \"\"\"Commits all modifications to state tracked by this object to host memory.\n\n This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`\n if any were detected.\n\n Because commits are a heavy operation involving data copy (potentially from GPU to host), it is\n recommended to consider committing less frequently than once per batch. This allows users to tradeoff\n between per-batch execution time and lost training steps in the event of a worker failure.\n \"\"\"\n self.save()\n self.check_host_updates()\n\n def check_host_updates(self):\n \"\"\"Checks that a notification has been sent indicating that hosts can be added or will be removed.\n\n Raises a `HostsUpdatedInterrupt` if such a notification has been received.\n \"\"\"\n # Iterate through the update messages sent from the server. If the update timestamp\n # is greater than the last update timestamp, then trigger a HostsUpdatedException.\n last_updated_timestamp = prev_timestamp = self._last_updated_timestamp\n all_update = HostUpdateResult.no_update\n while not self._host_messages.empty():\n timestamp, update = self._host_messages.get()\n if timestamp > last_updated_timestamp:\n last_updated_timestamp = timestamp\n all_update |= update\n\n # In order to ensure all workers raise the exception at the same time, we need to sync\n # the updated state across all the workers.\n # TODO(travis): this should be a max allreduce to account for changes in rank 0\n prev_timestamp, self._last_updated_timestamp, all_update = \\\n self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))\n\n # At this point, updated state is globally consistent across all ranks.\n if self._last_updated_timestamp > prev_timestamp:\n raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)\n\n\n def save(self):\n \"\"\"Saves state to host memory.\"\"\"\n raise NotImplementedError()\n\n def restore(self):\n \"\"\"Restores the last committed state, undoing any uncommitted modifications.\"\"\"\n raise NotImplementedError()\n\n def sync(self):\n \"\"\"Synchronize state across workers.\"\"\"\n raise NotImplementedError()\n\n def reset(self):\n \"\"\"Reset objects and variables following a reset event (before synchronization).\"\"\"\n pass\n\n\nclass ObjectState(State):\n \"\"\"State for simple Python objects.\n\n Every object is specified as a keyword argument, and will be assigned as an attribute.\n\n Args:\n bcast_object: Horovod broadcast object function used to sync state dictionary.\n get_rank: Horovod rank function used to identify is this process is the coordinator.\n kwargs: Properties to sync, will be exposed as attributes of the object.\n \"\"\"\n def __init__(self, bcast_object, get_rank, **kwargs):\n self._bcast_object = bcast_object\n self._saved_state = kwargs\n self._set_attrs()\n super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)\n\n def save(self):\n new_state = {}\n for attr in self._saved_state.keys():\n new_state[attr] = getattr(self, attr)\n self._saved_state = new_state\n\n def restore(self):\n self._set_attrs()\n\n def sync(self):\n if self._saved_state:\n self._saved_state = self._bcast_object(self._saved_state)\n self._set_attrs()\n\n def _set_attrs(self):\n for attr, value in self._saved_state.items():\n setattr(self, attr, value)\n\n\ndef run_fn(func, reset):\n @functools.wraps(func)\n def wrapper(state, *args, **kwargs):\n notification_manager.init()\n notification_manager.register_listener(state)\n skip_sync = False\n\n try:\n while True:\n try:\n if not skip_sync:\n state.sync()\n\n return func(state, *args, **kwargs)\n except HorovodInternalError:\n state.restore()\n skip_sync = False\n except HostsUpdatedInterrupt as e:\n skip_sync = e.skip_sync\n\n reset()\n state.on_reset()\n finally:\n notification_manager.remove_listener(state)\n return wrapper\n", "path": "horovod/common/elastic.py"}]} |
gh_patches_debug_1468 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-672 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support ElasticSearch 7.14
The python package `elasticsearch-py` introduced the `terms_enum` parameter from ElasticSearch 7.14. This is currently not being instrumented and breaking tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/instruments/elasticsearch.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 from collections import namedtuple
6
7 import wrapt
8
9 from scout_apm.compat import get_pos_args, unwrap_decorators
10 from scout_apm.core.tracked_request import TrackedRequest
11
12 try:
13 from elasticsearch import Elasticsearch, Transport
14 except ImportError: # pragma: no cover
15 Elasticsearch = None
16 Transport = None
17
18 logger = logging.getLogger(__name__)
19
20
21 def ensure_installed():
22 logger.debug("Instrumenting elasticsearch.")
23
24 if Elasticsearch is None:
25 logger.debug(
26 "Couldn't import elasticsearch.Elasticsearch - probably not installed."
27 )
28 else:
29 ensure_client_instrumented()
30 ensure_transport_instrumented()
31
32
33 ClientMethod = namedtuple("ClientMethod", ["name", "takes_index_argument"])
34
35 CLIENT_METHODS = [
36 ClientMethod("bulk", True),
37 ClientMethod("clear_scroll", False),
38 ClientMethod("close", False),
39 ClientMethod("close_point_in_time", False),
40 ClientMethod("count", True),
41 ClientMethod("create", True),
42 ClientMethod("delete", True),
43 ClientMethod("delete_by_query", True),
44 ClientMethod("delete_by_query_rethrottle", False),
45 ClientMethod("delete_script", False),
46 ClientMethod("exists", True),
47 ClientMethod("exists_source", True),
48 ClientMethod("explain", True),
49 ClientMethod("field_caps", True),
50 ClientMethod("get", True),
51 ClientMethod("get_script", False),
52 ClientMethod("get_script_context", False),
53 ClientMethod("get_script_languages", False),
54 ClientMethod("get_source", True),
55 ClientMethod("index", True),
56 ClientMethod("info", False),
57 ClientMethod("mget", True),
58 ClientMethod("msearch", True),
59 ClientMethod("msearch_template", True),
60 ClientMethod("mtermvectors", True),
61 ClientMethod("open_point_in_time", True),
62 ClientMethod("ping", False),
63 ClientMethod("put_script", False),
64 ClientMethod("rank_eval", True),
65 ClientMethod("reindex", False),
66 ClientMethod("reindex_rethrottle", False),
67 ClientMethod("render_search_template", False),
68 ClientMethod("scripts_painless_execute", False),
69 ClientMethod("scroll", False),
70 ClientMethod("search", True),
71 ClientMethod("search_shards", True),
72 ClientMethod("search_template", True),
73 ClientMethod("termvectors", True),
74 ClientMethod("update", True),
75 ClientMethod("update_by_query", True),
76 ClientMethod("update_by_query_rethrottle", False),
77 ]
78
79
80 have_patched_client = False
81
82
83 def ensure_client_instrumented():
84 global have_patched_client
85
86 if not have_patched_client:
87 for name, takes_index_argument in CLIENT_METHODS:
88 try:
89 method = getattr(Elasticsearch, name)
90 if takes_index_argument:
91 wrapped = wrap_client_index_method(method)
92 else:
93 wrapped = wrap_client_method(method)
94 setattr(Elasticsearch, name, wrapped)
95 except Exception as exc:
96 logger.warning(
97 "Failed to instrument elasticsearch.Elasticsearch.%s: %r",
98 name,
99 exc,
100 exc_info=exc,
101 )
102
103 have_patched_client = True
104
105
106 @wrapt.decorator
107 def wrap_client_index_method(wrapped, instance, args, kwargs):
108 # elasticsearch-py 7.5.1 changed the order of arguments for client methods,
109 # so to be safe we need to inspect the wrapped method's positional
110 # arguments to see if we should pull it from there
111 if "index" in kwargs:
112 index = kwargs["index"]
113 else:
114 unwrapped = unwrap_decorators(wrapped)
115 pos_args = get_pos_args(unwrapped)
116 try:
117 index_index = pos_args.index("index")
118 except ValueError: # pragma: no cover
119 # This guards against the method not accepting an 'index' argument
120 # but they all do - for now
121 index = ""
122 else:
123 try:
124 index = args[index_index - 1] # subtract 'self'
125 except IndexError:
126 index = ""
127
128 if isinstance(index, (list, tuple)):
129 index = ",".join(index)
130 if index == "":
131 index = "Unknown"
132 index = index.title()
133
134 camel_name = "".join(c.title() for c in wrapped.__name__.split("_"))
135 operation = "Elasticsearch/{}/{}".format(index, camel_name)
136 tracked_request = TrackedRequest.instance()
137 with tracked_request.span(operation=operation, ignore_children=True):
138 return wrapped(*args, **kwargs)
139
140
141 @wrapt.decorator
142 def wrap_client_method(wrapped, instance, args, kwargs):
143 camel_name = "".join(c.title() for c in wrapped.__name__.split("_"))
144 operation = "Elasticsearch/{}".format(camel_name)
145 tracked_request = TrackedRequest.instance()
146 with tracked_request.span(operation=operation, ignore_children=True):
147 return wrapped(*args, **kwargs)
148
149
150 have_patched_transport = False
151
152
153 def ensure_transport_instrumented():
154 global have_patched_transport
155
156 if not have_patched_transport:
157 try:
158 Transport.perform_request = wrapped_perform_request(
159 Transport.perform_request
160 )
161 except Exception as exc:
162 logger.warning(
163 "Failed to instrument elasticsearch.Transport.perform_request: %r",
164 exc,
165 exc_info=exc,
166 )
167
168 have_patched_transport = True
169
170
171 def _sanitize_name(name):
172 try:
173 op = name.split("/")[-1]
174 op = op[1:] # chop leading '_' from op
175 known_names = (
176 "bench",
177 "bulk",
178 "count",
179 "exists",
180 "explain",
181 "field_stats",
182 "health",
183 "mget",
184 "mlt",
185 "mpercolate",
186 "msearch",
187 "mtermvectors",
188 "percolate",
189 "query",
190 "scroll",
191 "search_shards",
192 "source",
193 "suggest",
194 "template",
195 "termvectors",
196 "update",
197 "search",
198 )
199 if op in known_names:
200 return op.title()
201 return "Unknown"
202 except Exception:
203 return "Unknown"
204
205
206 @wrapt.decorator
207 def wrapped_perform_request(wrapped, instance, args, kwargs):
208 try:
209 op = _sanitize_name(args[1])
210 except IndexError:
211 op = "Unknown"
212
213 tracked_request = TrackedRequest.instance()
214 with tracked_request.span(
215 operation="Elasticsearch/{}".format(op),
216 ignore_children=True,
217 ):
218 return wrapped(*args, **kwargs)
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/instruments/elasticsearch.py b/src/scout_apm/instruments/elasticsearch.py
--- a/src/scout_apm/instruments/elasticsearch.py
+++ b/src/scout_apm/instruments/elasticsearch.py
@@ -71,6 +71,7 @@
ClientMethod("search_shards", True),
ClientMethod("search_template", True),
ClientMethod("termvectors", True),
+ ClientMethod("terms_enum", True),
ClientMethod("update", True),
ClientMethod("update_by_query", True),
ClientMethod("update_by_query_rethrottle", False),
| {"golden_diff": "diff --git a/src/scout_apm/instruments/elasticsearch.py b/src/scout_apm/instruments/elasticsearch.py\n--- a/src/scout_apm/instruments/elasticsearch.py\n+++ b/src/scout_apm/instruments/elasticsearch.py\n@@ -71,6 +71,7 @@\n ClientMethod(\"search_shards\", True),\n ClientMethod(\"search_template\", True),\n ClientMethod(\"termvectors\", True),\n+ ClientMethod(\"terms_enum\", True),\n ClientMethod(\"update\", True),\n ClientMethod(\"update_by_query\", True),\n ClientMethod(\"update_by_query_rethrottle\", False),\n", "issue": "Support ElasticSearch 7.14\nThe python package `elasticsearch-py` introduced the `terms_enum` parameter from ElasticSearch 7.14. This is currently not being instrumented and breaking tests.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom collections import namedtuple\n\nimport wrapt\n\nfrom scout_apm.compat import get_pos_args, unwrap_decorators\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from elasticsearch import Elasticsearch, Transport\nexcept ImportError: # pragma: no cover\n Elasticsearch = None\n Transport = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_installed():\n logger.debug(\"Instrumenting elasticsearch.\")\n\n if Elasticsearch is None:\n logger.debug(\n \"Couldn't import elasticsearch.Elasticsearch - probably not installed.\"\n )\n else:\n ensure_client_instrumented()\n ensure_transport_instrumented()\n\n\nClientMethod = namedtuple(\"ClientMethod\", [\"name\", \"takes_index_argument\"])\n\nCLIENT_METHODS = [\n ClientMethod(\"bulk\", True),\n ClientMethod(\"clear_scroll\", False),\n ClientMethod(\"close\", False),\n ClientMethod(\"close_point_in_time\", False),\n ClientMethod(\"count\", True),\n ClientMethod(\"create\", True),\n ClientMethod(\"delete\", True),\n ClientMethod(\"delete_by_query\", True),\n ClientMethod(\"delete_by_query_rethrottle\", False),\n ClientMethod(\"delete_script\", False),\n ClientMethod(\"exists\", True),\n ClientMethod(\"exists_source\", True),\n ClientMethod(\"explain\", True),\n ClientMethod(\"field_caps\", True),\n ClientMethod(\"get\", True),\n ClientMethod(\"get_script\", False),\n ClientMethod(\"get_script_context\", False),\n ClientMethod(\"get_script_languages\", False),\n ClientMethod(\"get_source\", True),\n ClientMethod(\"index\", True),\n ClientMethod(\"info\", False),\n ClientMethod(\"mget\", True),\n ClientMethod(\"msearch\", True),\n ClientMethod(\"msearch_template\", True),\n ClientMethod(\"mtermvectors\", True),\n ClientMethod(\"open_point_in_time\", True),\n ClientMethod(\"ping\", False),\n ClientMethod(\"put_script\", False),\n ClientMethod(\"rank_eval\", True),\n ClientMethod(\"reindex\", False),\n ClientMethod(\"reindex_rethrottle\", False),\n ClientMethod(\"render_search_template\", False),\n ClientMethod(\"scripts_painless_execute\", False),\n ClientMethod(\"scroll\", False),\n ClientMethod(\"search\", True),\n ClientMethod(\"search_shards\", True),\n ClientMethod(\"search_template\", True),\n ClientMethod(\"termvectors\", True),\n ClientMethod(\"update\", True),\n ClientMethod(\"update_by_query\", True),\n ClientMethod(\"update_by_query_rethrottle\", False),\n]\n\n\nhave_patched_client = False\n\n\ndef ensure_client_instrumented():\n global have_patched_client\n\n if not have_patched_client:\n for name, takes_index_argument in CLIENT_METHODS:\n try:\n method = getattr(Elasticsearch, name)\n if takes_index_argument:\n wrapped = wrap_client_index_method(method)\n else:\n wrapped = wrap_client_method(method)\n setattr(Elasticsearch, name, wrapped)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Elasticsearch.%s: %r\",\n name,\n exc,\n exc_info=exc,\n )\n\n have_patched_client = True\n\n\[email protected]\ndef wrap_client_index_method(wrapped, instance, args, kwargs):\n # elasticsearch-py 7.5.1 changed the order of arguments for client methods,\n # so to be safe we need to inspect the wrapped method's positional\n # arguments to see if we should pull it from there\n if \"index\" in kwargs:\n index = kwargs[\"index\"]\n else:\n unwrapped = unwrap_decorators(wrapped)\n pos_args = get_pos_args(unwrapped)\n try:\n index_index = pos_args.index(\"index\")\n except ValueError: # pragma: no cover\n # This guards against the method not accepting an 'index' argument\n # but they all do - for now\n index = \"\"\n else:\n try:\n index = args[index_index - 1] # subtract 'self'\n except IndexError:\n index = \"\"\n\n if isinstance(index, (list, tuple)):\n index = \",\".join(index)\n if index == \"\":\n index = \"Unknown\"\n index = index.title()\n\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}/{}\".format(index, camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrap_client_method(wrapped, instance, args, kwargs):\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}\".format(camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\nhave_patched_transport = False\n\n\ndef ensure_transport_instrumented():\n global have_patched_transport\n\n if not have_patched_transport:\n try:\n Transport.perform_request = wrapped_perform_request(\n Transport.perform_request\n )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Transport.perform_request: %r\",\n exc,\n exc_info=exc,\n )\n\n have_patched_transport = True\n\n\ndef _sanitize_name(name):\n try:\n op = name.split(\"/\")[-1]\n op = op[1:] # chop leading '_' from op\n known_names = (\n \"bench\",\n \"bulk\",\n \"count\",\n \"exists\",\n \"explain\",\n \"field_stats\",\n \"health\",\n \"mget\",\n \"mlt\",\n \"mpercolate\",\n \"msearch\",\n \"mtermvectors\",\n \"percolate\",\n \"query\",\n \"scroll\",\n \"search_shards\",\n \"source\",\n \"suggest\",\n \"template\",\n \"termvectors\",\n \"update\",\n \"search\",\n )\n if op in known_names:\n return op.title()\n return \"Unknown\"\n except Exception:\n return \"Unknown\"\n\n\[email protected]\ndef wrapped_perform_request(wrapped, instance, args, kwargs):\n try:\n op = _sanitize_name(args[1])\n except IndexError:\n op = \"Unknown\"\n\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(\n operation=\"Elasticsearch/{}\".format(op),\n ignore_children=True,\n ):\n return wrapped(*args, **kwargs)\n", "path": "src/scout_apm/instruments/elasticsearch.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom collections import namedtuple\n\nimport wrapt\n\nfrom scout_apm.compat import get_pos_args, unwrap_decorators\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from elasticsearch import Elasticsearch, Transport\nexcept ImportError: # pragma: no cover\n Elasticsearch = None\n Transport = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_installed():\n logger.debug(\"Instrumenting elasticsearch.\")\n\n if Elasticsearch is None:\n logger.debug(\n \"Couldn't import elasticsearch.Elasticsearch - probably not installed.\"\n )\n else:\n ensure_client_instrumented()\n ensure_transport_instrumented()\n\n\nClientMethod = namedtuple(\"ClientMethod\", [\"name\", \"takes_index_argument\"])\n\nCLIENT_METHODS = [\n ClientMethod(\"bulk\", True),\n ClientMethod(\"clear_scroll\", False),\n ClientMethod(\"close\", False),\n ClientMethod(\"close_point_in_time\", False),\n ClientMethod(\"count\", True),\n ClientMethod(\"create\", True),\n ClientMethod(\"delete\", True),\n ClientMethod(\"delete_by_query\", True),\n ClientMethod(\"delete_by_query_rethrottle\", False),\n ClientMethod(\"delete_script\", False),\n ClientMethod(\"exists\", True),\n ClientMethod(\"exists_source\", True),\n ClientMethod(\"explain\", True),\n ClientMethod(\"field_caps\", True),\n ClientMethod(\"get\", True),\n ClientMethod(\"get_script\", False),\n ClientMethod(\"get_script_context\", False),\n ClientMethod(\"get_script_languages\", False),\n ClientMethod(\"get_source\", True),\n ClientMethod(\"index\", True),\n ClientMethod(\"info\", False),\n ClientMethod(\"mget\", True),\n ClientMethod(\"msearch\", True),\n ClientMethod(\"msearch_template\", True),\n ClientMethod(\"mtermvectors\", True),\n ClientMethod(\"open_point_in_time\", True),\n ClientMethod(\"ping\", False),\n ClientMethod(\"put_script\", False),\n ClientMethod(\"rank_eval\", True),\n ClientMethod(\"reindex\", False),\n ClientMethod(\"reindex_rethrottle\", False),\n ClientMethod(\"render_search_template\", False),\n ClientMethod(\"scripts_painless_execute\", False),\n ClientMethod(\"scroll\", False),\n ClientMethod(\"search\", True),\n ClientMethod(\"search_shards\", True),\n ClientMethod(\"search_template\", True),\n ClientMethod(\"termvectors\", True),\n ClientMethod(\"terms_enum\", True),\n ClientMethod(\"update\", True),\n ClientMethod(\"update_by_query\", True),\n ClientMethod(\"update_by_query_rethrottle\", False),\n]\n\n\nhave_patched_client = False\n\n\ndef ensure_client_instrumented():\n global have_patched_client\n\n if not have_patched_client:\n for name, takes_index_argument in CLIENT_METHODS:\n try:\n method = getattr(Elasticsearch, name)\n if takes_index_argument:\n wrapped = wrap_client_index_method(method)\n else:\n wrapped = wrap_client_method(method)\n setattr(Elasticsearch, name, wrapped)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Elasticsearch.%s: %r\",\n name,\n exc,\n exc_info=exc,\n )\n\n have_patched_client = True\n\n\[email protected]\ndef wrap_client_index_method(wrapped, instance, args, kwargs):\n # elasticsearch-py 7.5.1 changed the order of arguments for client methods,\n # so to be safe we need to inspect the wrapped method's positional\n # arguments to see if we should pull it from there\n if \"index\" in kwargs:\n index = kwargs[\"index\"]\n else:\n unwrapped = unwrap_decorators(wrapped)\n pos_args = get_pos_args(unwrapped)\n try:\n index_index = pos_args.index(\"index\")\n except ValueError: # pragma: no cover\n # This guards against the method not accepting an 'index' argument\n # but they all do - for now\n index = \"\"\n else:\n try:\n index = args[index_index - 1] # subtract 'self'\n except IndexError:\n index = \"\"\n\n if isinstance(index, (list, tuple)):\n index = \",\".join(index)\n if index == \"\":\n index = \"Unknown\"\n index = index.title()\n\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}/{}\".format(index, camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrap_client_method(wrapped, instance, args, kwargs):\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}\".format(camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\nhave_patched_transport = False\n\n\ndef ensure_transport_instrumented():\n global have_patched_transport\n\n if not have_patched_transport:\n try:\n Transport.perform_request = wrapped_perform_request(\n Transport.perform_request\n )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Transport.perform_request: %r\",\n exc,\n exc_info=exc,\n )\n\n have_patched_transport = True\n\n\ndef _sanitize_name(name):\n try:\n op = name.split(\"/\")[-1]\n op = op[1:] # chop leading '_' from op\n known_names = (\n \"bench\",\n \"bulk\",\n \"count\",\n \"exists\",\n \"explain\",\n \"field_stats\",\n \"health\",\n \"mget\",\n \"mlt\",\n \"mpercolate\",\n \"msearch\",\n \"mtermvectors\",\n \"percolate\",\n \"query\",\n \"scroll\",\n \"search_shards\",\n \"source\",\n \"suggest\",\n \"template\",\n \"termvectors\",\n \"update\",\n \"search\",\n )\n if op in known_names:\n return op.title()\n return \"Unknown\"\n except Exception:\n return \"Unknown\"\n\n\[email protected]\ndef wrapped_perform_request(wrapped, instance, args, kwargs):\n try:\n op = _sanitize_name(args[1])\n except IndexError:\n op = \"Unknown\"\n\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(\n operation=\"Elasticsearch/{}\".format(op),\n ignore_children=True,\n ):\n return wrapped(*args, **kwargs)\n", "path": "src/scout_apm/instruments/elasticsearch.py"}]} |
gh_patches_debug_1469 | rasdani/github-patches | git_diff | arviz-devs__arviz-1043 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with PyMC3 model that contains Potential
**Describe the bug**
For PyMC3 model that contains Potential, io_pymc3 is attempting to call `eval()` without graph dependence.
**To Reproduce**
```python
with pm.Model() as m:
x = pm.Normal('x', 0., 1.)
pm.Potential('z', pm.Normal.dist(x, 1.).logp(np.random.randn(10)))
trace = pm.sample()
```
returns:
```python
---------------------------------------------------------------------------
MissingInputError Traceback (most recent call last)
<ipython-input-45-c2e72dd27111> in <module>
2 x = pm.Normal('x', 0., 1.)
3 pm.Potential('z', pm.Normal.dist(x, 1.).logp(np.random.randn(10)))
----> 4 trace = pm.sample()
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/sampling.py in sample(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, progressbar, model, random_seed, discard_tuned_samples, compute_convergence_checks, callback, **kwargs)
539 warnings.warn("The number of samples is too small to check convergence reliably.")
540 else:
--> 541 trace.report._run_convergence_checks(trace, model)
542
543 trace.report._log_summary()
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/backends/report.py in _run_convergence_checks(self, trace, model)
96 varnames.append(rv_name)
97
---> 98 self._ess = ess = ess(trace, var_names=varnames)
99 self._rhat = rhat = rhat(trace, var_names=varnames)
100
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/stats/__init__.py in wrapped(*args, **kwargs)
36 )
37 kwargs[new] = kwargs.pop(old)
---> 38 return func(*args, **kwargs)
39
40 return wrapped
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/stats/diagnostics.py in ess(data, var_names, method, relative, prob)
187 raise TypeError(msg)
188
--> 189 dataset = convert_to_dataset(data, group="posterior")
190 var_names = _var_names(var_names, dataset)
191
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/converters.py in convert_to_dataset(obj, group, coords, dims)
166 xarray.Dataset
167 """
--> 168 inference_data = convert_to_inference_data(obj, group=group, coords=coords, dims=dims)
169 dataset = getattr(inference_data, group, None)
170 if dataset is None:
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/converters.py in convert_to_inference_data(obj, group, coords, dims, **kwargs)
87 return from_pystan(**kwargs)
88 elif obj.__class__.__name__ == "MultiTrace": # ugly, but doesn't make PyMC3 a requirement
---> 89 return from_pymc3(trace=kwargs.pop(group), **kwargs)
90 elif obj.__class__.__name__ == "EnsembleSampler": # ugly, but doesn't make emcee a requirement
91 return from_emcee(sampler=kwargs.pop(group), **kwargs)
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in from_pymc3(trace, prior, posterior_predictive, coords, dims, model)
350 ):
351 """Convert pymc3 data into an InferenceData object."""
--> 352 return PyMC3Converter(
353 trace=trace,
354 prior=prior,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in to_inference_data(self)
342 id_dict["predictions_constant_data"] = self.constant_data_to_xarray()
343 else:
--> 344 id_dict["constant_data"] = self.constant_data_to_xarray()
345 return InferenceData(**id_dict)
346
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/base.py in wrapped(cls, *args, **kwargs)
34 if all([getattr(cls, prop_i) is None for prop_i in prop]):
35 return None
---> 36 return func(cls, *args, **kwargs)
37
38 return wrapped
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/base.py in wrapped(cls, *args, **kwargs)
34 if all([getattr(cls, prop_i) is None for prop_i in prop]):
35 return None
---> 36 return func(cls, *args, **kwargs)
37
38 return wrapped
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in constant_data_to_xarray(self)
309 # this might be a Deterministic, and must be evaluated
310 elif hasattr(self.model[name], "eval"):
--> 311 vals = self.model[name].eval()
312 vals = np.atleast_1d(vals)
313 val_dims = dims.get(name)
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/graph.py in eval(self, inputs_to_values)
520 inputs = tuple(sorted(inputs_to_values.keys(), key=id))
521 if inputs not in self._fn_cache:
--> 522 self._fn_cache[inputs] = theano.function(inputs, self)
523 args = [inputs_to_values[param] for param in inputs]
524
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function.py in function(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
304 # note: pfunc will also call orig_function -- orig_function is
305 # a choke point that all compilation must pass through
--> 306 fn = pfunc(params=inputs,
307 outputs=outputs,
308 mode=mode,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/pfunc.py in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys)
481 inputs.append(si)
482
--> 483 return orig_function(inputs, cloned_outputs, mode,
484 accept_inplace=accept_inplace, name=name,
485 profile=profile, on_unused_input=on_unused_input,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys)
1830 try:
1831 Maker = getattr(mode, 'function_maker', FunctionMaker)
-> 1832 m = Maker(inputs,
1833 outputs,
1834 mode,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in __init__(self, inputs, outputs, mode, accept_inplace, function_builder, profile, on_unused_input, fgraph, output_keys, name)
1484 # make the fgraph (copies the graph, creates NEW INPUT AND
1485 # OUTPUT VARIABLES)
-> 1486 fgraph, additional_outputs = std_fgraph(inputs, outputs,
1487 accept_inplace)
1488 fgraph.profile = profile
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in std_fgraph(input_specs, output_specs, accept_inplace)
178 orig_outputs = [spec.variable for spec in output_specs] + updates
179
--> 180 fgraph = gof.fg.FunctionGraph(orig_inputs, orig_outputs,
181 update_mapping=update_mapping)
182
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __init__(self, inputs, outputs, features, clone, update_mapping)
173
174 for output in outputs:
--> 175 self.__import_r__(output, reason="init")
176 for i, output in enumerate(outputs):
177 output.clients.append(('output', i))
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __import_r__(self, variable, reason)
344 # Imports the owners of the variables
345 if variable.owner and variable.owner not in self.apply_nodes:
--> 346 self.__import__(variable.owner, reason=reason)
347 elif (variable.owner is None and
348 not isinstance(variable, graph.Constant) and
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __import__(self, apply_node, check, reason)
389 "for more information on this error."
390 % (node.inputs.index(r), str(node)))
--> 391 raise MissingInputError(error_msg, variable=r)
392
393 for node in new_nodes:
MissingInputError: Input 0 of the graph (indices start from 0), used to compute InplaceDimShuffle{x}(x), was not provided and not given a value. Use the Theano flag exception_verbosity='high', for more information on this error.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/data/io_pymc3.py`
Content:
```
1 """PyMC3-specific conversion code."""
2 import logging
3 from typing import Dict, List, Any, Optional, TYPE_CHECKING
4 from types import ModuleType
5
6 import numpy as np
7 import xarray as xr
8 from .. import utils
9 from .inference_data import InferenceData, concat
10 from .base import requires, dict_to_dataset, generate_dims_coords, make_attrs
11
12 if TYPE_CHECKING:
13 import pymc3 as pm
14 from pymc3 import MultiTrace, Model # pylint: disable=invalid-name
15 import theano
16 from typing import Set # pylint: disable=ungrouped-imports
17 else:
18 MultiTrace = Any # pylint: disable=invalid-name
19 Model = Any # pylint: disable=invalid-name
20
21 ___all__ = [""]
22
23 _log = logging.getLogger(__name__)
24
25 Coords = Dict[str, List[Any]]
26 Dims = Dict[str, List[str]]
27 # random variable object ...
28 Var = Any # pylint: disable=invalid-name
29
30
31 def _monkey_patch_pymc3(pm: ModuleType) -> None: # pylint: disable=invalid-name
32 assert pm.__name__ == "pymc3"
33
34 def fixed_eq(self, other):
35 """Use object identity for MultiObservedRV equality."""
36 return self is other
37
38 if tuple([int(x) for x in pm.__version__.split(".")]) < (3, 9): # type: ignore
39 pm.model.MultiObservedRV.__eq__ = fixed_eq # type: ignore
40
41
42 class PyMC3Converter: # pylint: disable=too-many-instance-attributes
43 """Encapsulate PyMC3 specific logic."""
44
45 model = None # type: Optional[pm.Model]
46 nchains = None # type: int
47 ndraws = None # type: int
48 posterior_predictive = None # Type: Optional[Dict[str, np.ndarray]]
49 predictions = None # Type: Optional[Dict[str, np.ndarray]]
50 prior = None # Type: Optional[Dict[str, np.ndarray]]
51
52 def __init__(
53 self,
54 *,
55 trace=None,
56 prior=None,
57 posterior_predictive=None,
58 predictions=None,
59 coords: Optional[Coords] = None,
60 dims: Optional[Dims] = None,
61 model=None
62 ):
63 import pymc3
64 import theano
65
66 _monkey_patch_pymc3(pymc3)
67
68 self.pymc3 = pymc3
69 self.theano = theano
70
71 self.trace = trace
72
73 # this permits us to get the model from command-line argument or from with model:
74 try:
75 self.model = self.pymc3.modelcontext(model or self.model)
76 except TypeError:
77 self.model = None
78
79 # This next line is brittle and may not work forever, but is a secret
80 # way to access the model from the trace.
81 if trace is not None:
82 if self.model is None:
83 self.model = self.trace._straces[0].model # pylint: disable=protected-access
84 self.nchains = trace.nchains if hasattr(trace, "nchains") else 1
85 self.ndraws = len(trace)
86 else:
87 self.nchains = self.ndraws = 0
88
89 self.prior = prior
90 self.posterior_predictive = posterior_predictive
91 self.predictions = predictions
92
93 def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:
94 return next(iter(dct.values()))
95
96 if trace is None:
97 # if you have a posterior_predictive built with keep_dims,
98 # you'll lose here, but there's nothing I can do about that.
99 self.nchains = 1
100 get_from = None
101 if predictions is not None:
102 get_from = predictions
103 elif posterior_predictive is not None:
104 get_from = posterior_predictive
105 elif prior is not None:
106 get_from = prior
107 if get_from is None:
108 # pylint: disable=line-too-long
109 raise ValueError(
110 """When constructing InferenceData must have at least
111 one of trace, prior, posterior_predictive or predictions."""
112 )
113
114 aelem = arbitrary_element(get_from)
115 self.ndraws = aelem.shape[0]
116
117 self.coords = coords
118 self.dims = dims
119 self.observations = self.find_observations()
120
121 def find_observations(self) -> Optional[Dict[str, Var]]:
122 """If there are observations available, return them as a dictionary."""
123 has_observations = False
124 if self.trace is not None:
125 assert self.model is not None, "Cannot identify observations without PymC3 model"
126 if any((hasattr(obs, "observations") for obs in self.model.observed_RVs)):
127 has_observations = True
128 if has_observations:
129 assert self.model is not None
130 return {obs.name: obs.observations for obs in self.model.observed_RVs}
131 return None
132
133 def log_likelihood_vals_point(self, point, var, log_like_fun):
134 """Compute log likelihood for each observed point."""
135 log_like_val = utils.one_de(log_like_fun(point))
136 if var.missing_values:
137 log_like_val = np.where(var.observations.mask, np.nan, log_like_val)
138 return log_like_val
139
140 @requires("trace")
141 @requires("model")
142 def _extract_log_likelihood(self):
143 """Compute log likelihood of each observation."""
144 # If we have predictions, then we have a thinned trace which does not
145 # support extracting a log likelihood.
146 cached = [(var, var.logp_elemwise) for var in self.model.observed_RVs]
147 log_likelihood_dict = {}
148 for var, log_like_fun in cached:
149 chain_likelihoods = []
150 for chain in self.trace.chains:
151 log_like_chain = [
152 self.log_likelihood_vals_point(point, var, log_like_fun)
153 for point in self.trace.points([chain])
154 ]
155 chain_likelihoods.append(np.stack(log_like_chain))
156 log_likelihood_dict[var.name] = np.stack(chain_likelihoods)
157 return log_likelihood_dict
158
159 @requires("trace")
160 def posterior_to_xarray(self):
161 """Convert the posterior to an xarray dataset."""
162 var_names = self.pymc3.util.get_default_varnames( # pylint: disable=no-member
163 self.trace.varnames, include_transformed=False
164 )
165 data = {}
166 for var_name in var_names:
167 data[var_name] = np.array(self.trace.get_values(var_name, combine=False, squeeze=False))
168 return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)
169
170 @requires("trace")
171 def sample_stats_to_xarray(self):
172 """Extract sample_stats from PyMC3 trace."""
173 data = {}
174 rename_key = {"model_logp": "lp"}
175 data = {}
176 for stat in self.trace.stat_names:
177 name = rename_key.get(stat, stat)
178 data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False))
179
180 return dict_to_dataset(data, library=self.pymc3, dims=None, coords=self.coords)
181
182 @requires("trace")
183 @requires("model")
184 def log_likelihood_to_xarray(self):
185 """Extract log likelihood and log_p data from PyMC3 trace."""
186 if self.predictions:
187 return None
188 data = self._extract_log_likelihood()
189 return dict_to_dataset(data, library=self.pymc3, dims=self.dims, coords=self.coords)
190
191 def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:
192 """Take Dict of variables to numpy ndarrays (samples) and translate into dataset."""
193 data = {}
194 for k, ary in dct.items():
195 shape = ary.shape
196 if shape[0] == self.nchains and shape[1] == self.ndraws:
197 data[k] = ary
198 elif shape[0] == self.nchains * self.ndraws:
199 data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))
200 else:
201 data[k] = utils.expand_dims(ary)
202 # pylint: disable=line-too-long
203 _log.warning(
204 "posterior predictive variable %s's shape not compatible with number of chains and draws. "
205 "This can mean that some draws or even whole chains are not represented.",
206 k,
207 )
208 return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)
209
210 @requires(["posterior_predictive"])
211 def posterior_predictive_to_xarray(self):
212 """Convert posterior_predictive samples to xarray."""
213 return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)
214
215 @requires(["predictions"])
216 def predictions_to_xarray(self):
217 """Convert predictions (out of sample predictions) to xarray."""
218 return self.translate_posterior_predictive_dict_to_xarray(self.predictions)
219
220 def priors_to_xarray(self):
221 """Convert prior samples (and if possible prior predictive too) to xarray."""
222 if self.prior is None:
223 return {"prior": None, "prior_predictive": None}
224 if self.trace is not None:
225 prior_vars = self.pymc3.util.get_default_varnames( # pylint: disable=no-member
226 self.trace.varnames, include_transformed=False
227 )
228 prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]
229 else:
230 prior_vars = list(self.prior.keys())
231 prior_predictive_vars = None
232
233 priors_dict = {}
234 for group, var_names in zip(
235 ("prior", "prior_predictive"), (prior_vars, prior_predictive_vars)
236 ):
237 priors_dict[group] = (
238 None
239 if var_names is None
240 else dict_to_dataset(
241 {k: utils.expand_dims(self.prior[k]) for k in var_names},
242 library=self.pymc3,
243 coords=self.coords,
244 dims=self.dims,
245 )
246 )
247 return priors_dict
248
249 @requires("observations")
250 @requires("model")
251 def observed_data_to_xarray(self):
252 """Convert observed data to xarray."""
253 if self.dims is None:
254 dims = {}
255 else:
256 dims = self.dims
257 observed_data = {}
258 for name, vals in self.observations.items():
259 if hasattr(vals, "get_value"):
260 vals = vals.get_value()
261 vals = utils.one_de(vals)
262 val_dims = dims.get(name)
263 val_dims, coords = generate_dims_coords(
264 vals.shape, name, dims=val_dims, coords=self.coords
265 )
266 # filter coords based on the dims
267 coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
268 observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
269 return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pymc3))
270
271 @requires(["trace", "predictions"])
272 @requires("model")
273 def constant_data_to_xarray(self):
274 """Convert constant data to xarray."""
275 # For constant data, we are concerned only with deterministics and data.
276 # The constant data vars must be either pm.Data (TensorSharedVariable) or pm.Deterministic
277 constant_data_vars = {} # type: Dict[str, Var]
278 for var in self.model.deterministics:
279 ancestors = self.theano.tensor.gof.graph.ancestors(var.owner.inputs)
280 # no dependency on a random variable
281 if not any((isinstance(a, self.pymc3.model.PyMC3Variable) for a in ancestors)):
282 constant_data_vars[var.name] = var
283
284 def is_data(name, var) -> bool:
285 assert self.model is not None
286 return (
287 var not in self.model.deterministics
288 and var not in self.model.observed_RVs
289 and var not in self.model.free_RVs
290 and (self.observations is None or name not in self.observations)
291 )
292
293 # I don't know how to find pm.Data, except that they are named variables that aren't
294 # observed or free RVs, nor are they deterministics, and then we eliminate observations.
295 for name, var in self.model.named_vars.items():
296 if is_data(name, var):
297 constant_data_vars[name] = var
298
299 if not constant_data_vars:
300 return None
301 if self.dims is None:
302 dims = {}
303 else:
304 dims = self.dims
305 constant_data = {}
306 for name, vals in constant_data_vars.items():
307 if hasattr(vals, "get_value"):
308 vals = vals.get_value()
309 # this might be a Deterministic, and must be evaluated
310 elif hasattr(self.model[name], "eval"):
311 vals = self.model[name].eval()
312 vals = np.atleast_1d(vals)
313 val_dims = dims.get(name)
314 val_dims, coords = generate_dims_coords(
315 vals.shape, name, dims=val_dims, coords=self.coords
316 )
317 # filter coords based on the dims
318 coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
319 try:
320 constant_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
321 except ValueError as e: # pylint: disable=invalid-name
322 raise ValueError("Error translating constant_data variable %s: %s" % (name, e))
323 return xr.Dataset(data_vars=constant_data, attrs=make_attrs(library=self.pymc3))
324
325 def to_inference_data(self):
326 """Convert all available data to an InferenceData object.
327
328 Note that if groups can not be created (e.g., there is no `trace`, so
329 the `posterior` and `sample_stats` can not be extracted), then the InferenceData
330 will not have those groups.
331 """
332 id_dict = {
333 "posterior": self.posterior_to_xarray(),
334 "sample_stats": self.sample_stats_to_xarray(),
335 "log_likelihood": self.log_likelihood_to_xarray(),
336 "posterior_predictive": self.posterior_predictive_to_xarray(),
337 "predictions": self.predictions_to_xarray(),
338 **self.priors_to_xarray(),
339 "observed_data": self.observed_data_to_xarray(),
340 }
341 if self.predictions:
342 id_dict["predictions_constant_data"] = self.constant_data_to_xarray()
343 else:
344 id_dict["constant_data"] = self.constant_data_to_xarray()
345 return InferenceData(**id_dict)
346
347
348 def from_pymc3(
349 trace=None, *, prior=None, posterior_predictive=None, coords=None, dims=None, model=None
350 ):
351 """Convert pymc3 data into an InferenceData object."""
352 return PyMC3Converter(
353 trace=trace,
354 prior=prior,
355 posterior_predictive=posterior_predictive,
356 coords=coords,
357 dims=dims,
358 model=model,
359 ).to_inference_data()
360
361
362 ### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But
363 ### perhaps we should have an inplace argument?
364 def from_pymc3_predictions(
365 predictions,
366 posterior_trace: Optional[MultiTrace] = None,
367 model: Optional[Model] = None,
368 coords=None,
369 dims=None,
370 idata_orig: Optional[InferenceData] = None,
371 inplace: bool = False,
372 ) -> InferenceData:
373 """Translate out-of-sample predictions into ``InferenceData``.
374
375 Parameters
376 ----------
377 predictions: Dict[str, np.ndarray]
378 The predictions are the return value of ``pymc3.sample_posterior_predictive``,
379 a dictionary of strings (variable names) to numpy ndarrays (draws).
380 posterior_trace: pm.MultiTrace
381 This should be a trace that has been thinned appropriately for
382 ``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is
383 a deterministic function of the shape of any predictor (explanatory, independent, etc.)
384 variables must be *removed* from this trace.
385 model: pymc3.Model
386 This argument is *not* optional, unlike in conventional uses of ``from_pymc3``.
387 The reason is that the posterior_trace argument is likely to supply an incorrect
388 value of model.
389 coords: Dict[str, array-like[Any]]
390 Coordinates for the variables. Map from coordinate names to coordinate values.
391 dims: Dict[str, array-like[str]]
392 Map from variable name to ordered set of coordinate names.
393 idata_orig: InferenceData, optional
394 If supplied, then modify this inference data in place, adding ``predictions`` and
395 (if available) ``predictions_constant_data`` groups. If this is not supplied, make a
396 fresh InferenceData
397 inplace: boolean, optional
398 If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,
399 rather than returning a fresh InferenceData object.
400
401 Returns
402 -------
403 InferenceData:
404 May be modified ``idata_orig``.
405 """
406 if inplace and not idata_orig:
407 raise ValueError(
408 (
409 "Do not pass True for inplace unless passing"
410 "an existing InferenceData as idata_orig"
411 )
412 )
413 new_idata = PyMC3Converter(
414 trace=posterior_trace, predictions=predictions, model=model, coords=coords, dims=dims
415 ).to_inference_data()
416 if idata_orig is None:
417 return new_idata
418 elif inplace:
419 concat([idata_orig, new_idata], dim=None, inplace=True)
420 return idata_orig
421 else:
422 # if we are not returning in place, then merge the old groups into the new inference
423 # data and return that.
424 concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)
425 return new_idata
426
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/data/io_pymc3.py b/arviz/data/io_pymc3.py
--- a/arviz/data/io_pymc3.py
+++ b/arviz/data/io_pymc3.py
@@ -287,6 +287,7 @@
var not in self.model.deterministics
and var not in self.model.observed_RVs
and var not in self.model.free_RVs
+ and var not in self.model.potentials
and (self.observations is None or name not in self.observations)
)
| {"golden_diff": "diff --git a/arviz/data/io_pymc3.py b/arviz/data/io_pymc3.py\n--- a/arviz/data/io_pymc3.py\n+++ b/arviz/data/io_pymc3.py\n@@ -287,6 +287,7 @@\n var not in self.model.deterministics\n and var not in self.model.observed_RVs\n and var not in self.model.free_RVs\n+ and var not in self.model.potentials\n and (self.observations is None or name not in self.observations)\n )\n", "issue": "Error with PyMC3 model that contains Potential\n**Describe the bug**\r\nFor PyMC3 model that contains Potential, io_pymc3 is attempting to call `eval()` without graph dependence.\r\n\r\n**To Reproduce**\r\n```python\r\nwith pm.Model() as m:\r\n x = pm.Normal('x', 0., 1.)\r\n pm.Potential('z', pm.Normal.dist(x, 1.).logp(np.random.randn(10)))\r\n trace = pm.sample()\r\n```\r\nreturns:\r\n```python\r\n---------------------------------------------------------------------------\r\nMissingInputError Traceback (most recent call last)\r\n<ipython-input-45-c2e72dd27111> in <module>\r\n 2 x = pm.Normal('x', 0., 1.)\r\n 3 pm.Potential('z', pm.Normal.dist(x, 1.).logp(np.random.randn(10)))\r\n----> 4 trace = pm.sample()\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/sampling.py in sample(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, progressbar, model, random_seed, discard_tuned_samples, compute_convergence_checks, callback, **kwargs)\r\n 539 warnings.warn(\"The number of samples is too small to check convergence reliably.\")\r\n 540 else:\r\n--> 541 trace.report._run_convergence_checks(trace, model)\r\n 542 \r\n 543 trace.report._log_summary()\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/backends/report.py in _run_convergence_checks(self, trace, model)\r\n 96 varnames.append(rv_name)\r\n 97 \r\n---> 98 self._ess = ess = ess(trace, var_names=varnames)\r\n 99 self._rhat = rhat = rhat(trace, var_names=varnames)\r\n 100 \r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/stats/__init__.py in wrapped(*args, **kwargs)\r\n 36 )\r\n 37 kwargs[new] = kwargs.pop(old)\r\n---> 38 return func(*args, **kwargs)\r\n 39 \r\n 40 return wrapped\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/stats/diagnostics.py in ess(data, var_names, method, relative, prob)\r\n 187 raise TypeError(msg)\r\n 188 \r\n--> 189 dataset = convert_to_dataset(data, group=\"posterior\")\r\n 190 var_names = _var_names(var_names, dataset)\r\n 191 \r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/converters.py in convert_to_dataset(obj, group, coords, dims)\r\n 166 xarray.Dataset\r\n 167 \"\"\"\r\n--> 168 inference_data = convert_to_inference_data(obj, group=group, coords=coords, dims=dims)\r\n 169 dataset = getattr(inference_data, group, None)\r\n 170 if dataset is None:\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/converters.py in convert_to_inference_data(obj, group, coords, dims, **kwargs)\r\n 87 return from_pystan(**kwargs)\r\n 88 elif obj.__class__.__name__ == \"MultiTrace\": # ugly, but doesn't make PyMC3 a requirement\r\n---> 89 return from_pymc3(trace=kwargs.pop(group), **kwargs)\r\n 90 elif obj.__class__.__name__ == \"EnsembleSampler\": # ugly, but doesn't make emcee a requirement\r\n 91 return from_emcee(sampler=kwargs.pop(group), **kwargs)\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in from_pymc3(trace, prior, posterior_predictive, coords, dims, model)\r\n 350 ):\r\n 351 \"\"\"Convert pymc3 data into an InferenceData object.\"\"\"\r\n--> 352 return PyMC3Converter(\r\n 353 trace=trace,\r\n 354 prior=prior,\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in to_inference_data(self)\r\n 342 id_dict[\"predictions_constant_data\"] = self.constant_data_to_xarray()\r\n 343 else:\r\n--> 344 id_dict[\"constant_data\"] = self.constant_data_to_xarray()\r\n 345 return InferenceData(**id_dict)\r\n 346 \r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/base.py in wrapped(cls, *args, **kwargs)\r\n 34 if all([getattr(cls, prop_i) is None for prop_i in prop]):\r\n 35 return None\r\n---> 36 return func(cls, *args, **kwargs)\r\n 37 \r\n 38 return wrapped\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/base.py in wrapped(cls, *args, **kwargs)\r\n 34 if all([getattr(cls, prop_i) is None for prop_i in prop]):\r\n 35 return None\r\n---> 36 return func(cls, *args, **kwargs)\r\n 37 \r\n 38 return wrapped\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in constant_data_to_xarray(self)\r\n 309 # this might be a Deterministic, and must be evaluated\r\n 310 elif hasattr(self.model[name], \"eval\"):\r\n--> 311 vals = self.model[name].eval()\r\n 312 vals = np.atleast_1d(vals)\r\n 313 val_dims = dims.get(name)\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/graph.py in eval(self, inputs_to_values)\r\n 520 inputs = tuple(sorted(inputs_to_values.keys(), key=id))\r\n 521 if inputs not in self._fn_cache:\r\n--> 522 self._fn_cache[inputs] = theano.function(inputs, self)\r\n 523 args = [inputs_to_values[param] for param in inputs]\r\n 524 \r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function.py in function(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)\r\n 304 # note: pfunc will also call orig_function -- orig_function is\r\n 305 # a choke point that all compilation must pass through\r\n--> 306 fn = pfunc(params=inputs,\r\n 307 outputs=outputs,\r\n 308 mode=mode,\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/pfunc.py in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys)\r\n 481 inputs.append(si)\r\n 482 \r\n--> 483 return orig_function(inputs, cloned_outputs, mode,\r\n 484 accept_inplace=accept_inplace, name=name,\r\n 485 profile=profile, on_unused_input=on_unused_input,\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys)\r\n 1830 try:\r\n 1831 Maker = getattr(mode, 'function_maker', FunctionMaker)\r\n-> 1832 m = Maker(inputs,\r\n 1833 outputs,\r\n 1834 mode,\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in __init__(self, inputs, outputs, mode, accept_inplace, function_builder, profile, on_unused_input, fgraph, output_keys, name)\r\n 1484 # make the fgraph (copies the graph, creates NEW INPUT AND\r\n 1485 # OUTPUT VARIABLES)\r\n-> 1486 fgraph, additional_outputs = std_fgraph(inputs, outputs,\r\n 1487 accept_inplace)\r\n 1488 fgraph.profile = profile\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in std_fgraph(input_specs, output_specs, accept_inplace)\r\n 178 orig_outputs = [spec.variable for spec in output_specs] + updates\r\n 179 \r\n--> 180 fgraph = gof.fg.FunctionGraph(orig_inputs, orig_outputs,\r\n 181 update_mapping=update_mapping)\r\n 182 \r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __init__(self, inputs, outputs, features, clone, update_mapping)\r\n 173 \r\n 174 for output in outputs:\r\n--> 175 self.__import_r__(output, reason=\"init\")\r\n 176 for i, output in enumerate(outputs):\r\n 177 output.clients.append(('output', i))\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __import_r__(self, variable, reason)\r\n 344 # Imports the owners of the variables\r\n 345 if variable.owner and variable.owner not in self.apply_nodes:\r\n--> 346 self.__import__(variable.owner, reason=reason)\r\n 347 elif (variable.owner is None and\r\n 348 not isinstance(variable, graph.Constant) and\r\n\r\n~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __import__(self, apply_node, check, reason)\r\n 389 \"for more information on this error.\"\r\n 390 % (node.inputs.index(r), str(node)))\r\n--> 391 raise MissingInputError(error_msg, variable=r)\r\n 392 \r\n 393 for node in new_nodes:\r\n\r\nMissingInputError: Input 0 of the graph (indices start from 0), used to compute InplaceDimShuffle{x}(x), was not provided and not given a value. Use the Theano flag exception_verbosity='high', for more information on this error.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"PyMC3-specific conversion code.\"\"\"\nimport logging\nfrom typing import Dict, List, Any, Optional, TYPE_CHECKING\nfrom types import ModuleType\n\nimport numpy as np\nimport xarray as xr\nfrom .. import utils\nfrom .inference_data import InferenceData, concat\nfrom .base import requires, dict_to_dataset, generate_dims_coords, make_attrs\n\nif TYPE_CHECKING:\n import pymc3 as pm\n from pymc3 import MultiTrace, Model # pylint: disable=invalid-name\n import theano\n from typing import Set # pylint: disable=ungrouped-imports\nelse:\n MultiTrace = Any # pylint: disable=invalid-name\n Model = Any # pylint: disable=invalid-name\n\n___all__ = [\"\"]\n\n_log = logging.getLogger(__name__)\n\nCoords = Dict[str, List[Any]]\nDims = Dict[str, List[str]]\n# random variable object ...\nVar = Any # pylint: disable=invalid-name\n\n\ndef _monkey_patch_pymc3(pm: ModuleType) -> None: # pylint: disable=invalid-name\n assert pm.__name__ == \"pymc3\"\n\n def fixed_eq(self, other):\n \"\"\"Use object identity for MultiObservedRV equality.\"\"\"\n return self is other\n\n if tuple([int(x) for x in pm.__version__.split(\".\")]) < (3, 9): # type: ignore\n pm.model.MultiObservedRV.__eq__ = fixed_eq # type: ignore\n\n\nclass PyMC3Converter: # pylint: disable=too-many-instance-attributes\n \"\"\"Encapsulate PyMC3 specific logic.\"\"\"\n\n model = None # type: Optional[pm.Model]\n nchains = None # type: int\n ndraws = None # type: int\n posterior_predictive = None # Type: Optional[Dict[str, np.ndarray]]\n predictions = None # Type: Optional[Dict[str, np.ndarray]]\n prior = None # Type: Optional[Dict[str, np.ndarray]]\n\n def __init__(\n self,\n *,\n trace=None,\n prior=None,\n posterior_predictive=None,\n predictions=None,\n coords: Optional[Coords] = None,\n dims: Optional[Dims] = None,\n model=None\n ):\n import pymc3\n import theano\n\n _monkey_patch_pymc3(pymc3)\n\n self.pymc3 = pymc3\n self.theano = theano\n\n self.trace = trace\n\n # this permits us to get the model from command-line argument or from with model:\n try:\n self.model = self.pymc3.modelcontext(model or self.model)\n except TypeError:\n self.model = None\n\n # This next line is brittle and may not work forever, but is a secret\n # way to access the model from the trace.\n if trace is not None:\n if self.model is None:\n self.model = self.trace._straces[0].model # pylint: disable=protected-access\n self.nchains = trace.nchains if hasattr(trace, \"nchains\") else 1\n self.ndraws = len(trace)\n else:\n self.nchains = self.ndraws = 0\n\n self.prior = prior\n self.posterior_predictive = posterior_predictive\n self.predictions = predictions\n\n def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:\n return next(iter(dct.values()))\n\n if trace is None:\n # if you have a posterior_predictive built with keep_dims,\n # you'll lose here, but there's nothing I can do about that.\n self.nchains = 1\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None:\n # pylint: disable=line-too-long\n raise ValueError(\n \"\"\"When constructing InferenceData must have at least\n one of trace, prior, posterior_predictive or predictions.\"\"\"\n )\n\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0]\n\n self.coords = coords\n self.dims = dims\n self.observations = self.find_observations()\n\n def find_observations(self) -> Optional[Dict[str, Var]]:\n \"\"\"If there are observations available, return them as a dictionary.\"\"\"\n has_observations = False\n if self.trace is not None:\n assert self.model is not None, \"Cannot identify observations without PymC3 model\"\n if any((hasattr(obs, \"observations\") for obs in self.model.observed_RVs)):\n has_observations = True\n if has_observations:\n assert self.model is not None\n return {obs.name: obs.observations for obs in self.model.observed_RVs}\n return None\n\n def log_likelihood_vals_point(self, point, var, log_like_fun):\n \"\"\"Compute log likelihood for each observed point.\"\"\"\n log_like_val = utils.one_de(log_like_fun(point))\n if var.missing_values:\n log_like_val = np.where(var.observations.mask, np.nan, log_like_val)\n return log_like_val\n\n @requires(\"trace\")\n @requires(\"model\")\n def _extract_log_likelihood(self):\n \"\"\"Compute log likelihood of each observation.\"\"\"\n # If we have predictions, then we have a thinned trace which does not\n # support extracting a log likelihood.\n cached = [(var, var.logp_elemwise) for var in self.model.observed_RVs]\n log_likelihood_dict = {}\n for var, log_like_fun in cached:\n chain_likelihoods = []\n for chain in self.trace.chains:\n log_like_chain = [\n self.log_likelihood_vals_point(point, var, log_like_fun)\n for point in self.trace.points([chain])\n ]\n chain_likelihoods.append(np.stack(log_like_chain))\n log_likelihood_dict[var.name] = np.stack(chain_likelihoods)\n return log_likelihood_dict\n\n @requires(\"trace\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n var_names = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n data = {}\n for var_name in var_names:\n data[var_name] = np.array(self.trace.get_values(var_name, combine=False, squeeze=False))\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires(\"trace\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from PyMC3 trace.\"\"\"\n data = {}\n rename_key = {\"model_logp\": \"lp\"}\n data = {}\n for stat in self.trace.stat_names:\n name = rename_key.get(stat, stat)\n data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False))\n\n return dict_to_dataset(data, library=self.pymc3, dims=None, coords=self.coords)\n\n @requires(\"trace\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood and log_p data from PyMC3 trace.\"\"\"\n if self.predictions:\n return None\n data = self._extract_log_likelihood()\n return dict_to_dataset(data, library=self.pymc3, dims=self.dims, coords=self.coords)\n\n def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:\n \"\"\"Take Dict of variables to numpy ndarrays (samples) and translate into dataset.\"\"\"\n data = {}\n for k, ary in dct.items():\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n # pylint: disable=line-too-long\n _log.warning(\n \"posterior predictive variable %s's shape not compatible with number of chains and draws. \"\n \"This can mean that some draws or even whole chains are not represented.\",\n k,\n )\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires([\"posterior_predictive\"])\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)\n\n @requires([\"predictions\"])\n def predictions_to_xarray(self):\n \"\"\"Convert predictions (out of sample predictions) to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.trace is not None:\n prior_vars = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]\n else:\n prior_vars = list(self.prior.keys())\n prior_predictive_vars = None\n\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {k: utils.expand_dims(self.prior[k]) for k in var_names},\n library=self.pymc3,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n observed_data = {}\n for name, vals in self.observations.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n vals = utils.one_de(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pymc3))\n\n @requires([\"trace\", \"predictions\"])\n @requires(\"model\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant data to xarray.\"\"\"\n # For constant data, we are concerned only with deterministics and data.\n # The constant data vars must be either pm.Data (TensorSharedVariable) or pm.Deterministic\n constant_data_vars = {} # type: Dict[str, Var]\n for var in self.model.deterministics:\n ancestors = self.theano.tensor.gof.graph.ancestors(var.owner.inputs)\n # no dependency on a random variable\n if not any((isinstance(a, self.pymc3.model.PyMC3Variable) for a in ancestors)):\n constant_data_vars[var.name] = var\n\n def is_data(name, var) -> bool:\n assert self.model is not None\n return (\n var not in self.model.deterministics\n and var not in self.model.observed_RVs\n and var not in self.model.free_RVs\n and (self.observations is None or name not in self.observations)\n )\n\n # I don't know how to find pm.Data, except that they are named variables that aren't\n # observed or free RVs, nor are they deterministics, and then we eliminate observations.\n for name, var in self.model.named_vars.items():\n if is_data(name, var):\n constant_data_vars[name] = var\n\n if not constant_data_vars:\n return None\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n constant_data = {}\n for name, vals in constant_data_vars.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n # this might be a Deterministic, and must be evaluated\n elif hasattr(self.model[name], \"eval\"):\n vals = self.model[name].eval()\n vals = np.atleast_1d(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n try:\n constant_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n except ValueError as e: # pylint: disable=invalid-name\n raise ValueError(\"Error translating constant_data variable %s: %s\" % (name, e))\n return xr.Dataset(data_vars=constant_data, attrs=make_attrs(library=self.pymc3))\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (e.g., there is no `trace`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n \"\"\"\n id_dict = {\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n if self.predictions:\n id_dict[\"predictions_constant_data\"] = self.constant_data_to_xarray()\n else:\n id_dict[\"constant_data\"] = self.constant_data_to_xarray()\n return InferenceData(**id_dict)\n\n\ndef from_pymc3(\n trace=None, *, prior=None, posterior_predictive=None, coords=None, dims=None, model=None\n):\n \"\"\"Convert pymc3 data into an InferenceData object.\"\"\"\n return PyMC3Converter(\n trace=trace,\n prior=prior,\n posterior_predictive=posterior_predictive,\n coords=coords,\n dims=dims,\n model=model,\n ).to_inference_data()\n\n\n### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But\n### perhaps we should have an inplace argument?\ndef from_pymc3_predictions(\n predictions,\n posterior_trace: Optional[MultiTrace] = None,\n model: Optional[Model] = None,\n coords=None,\n dims=None,\n idata_orig: Optional[InferenceData] = None,\n inplace: bool = False,\n) -> InferenceData:\n \"\"\"Translate out-of-sample predictions into ``InferenceData``.\n\n Parameters\n ----------\n predictions: Dict[str, np.ndarray]\n The predictions are the return value of ``pymc3.sample_posterior_predictive``,\n a dictionary of strings (variable names) to numpy ndarrays (draws).\n posterior_trace: pm.MultiTrace\n This should be a trace that has been thinned appropriately for\n ``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is\n a deterministic function of the shape of any predictor (explanatory, independent, etc.)\n variables must be *removed* from this trace.\n model: pymc3.Model\n This argument is *not* optional, unlike in conventional uses of ``from_pymc3``.\n The reason is that the posterior_trace argument is likely to supply an incorrect\n value of model.\n coords: Dict[str, array-like[Any]]\n Coordinates for the variables. Map from coordinate names to coordinate values.\n dims: Dict[str, array-like[str]]\n Map from variable name to ordered set of coordinate names.\n idata_orig: InferenceData, optional\n If supplied, then modify this inference data in place, adding ``predictions`` and\n (if available) ``predictions_constant_data`` groups. If this is not supplied, make a\n fresh InferenceData\n inplace: boolean, optional\n If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,\n rather than returning a fresh InferenceData object.\n\n Returns\n -------\n InferenceData:\n May be modified ``idata_orig``.\n \"\"\"\n if inplace and not idata_orig:\n raise ValueError(\n (\n \"Do not pass True for inplace unless passing\"\n \"an existing InferenceData as idata_orig\"\n )\n )\n new_idata = PyMC3Converter(\n trace=posterior_trace, predictions=predictions, model=model, coords=coords, dims=dims\n ).to_inference_data()\n if idata_orig is None:\n return new_idata\n elif inplace:\n concat([idata_orig, new_idata], dim=None, inplace=True)\n return idata_orig\n else:\n # if we are not returning in place, then merge the old groups into the new inference\n # data and return that.\n concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)\n return new_idata\n", "path": "arviz/data/io_pymc3.py"}], "after_files": [{"content": "\"\"\"PyMC3-specific conversion code.\"\"\"\nimport logging\nfrom typing import Dict, List, Any, Optional, TYPE_CHECKING\nfrom types import ModuleType\n\nimport numpy as np\nimport xarray as xr\nfrom .. import utils\nfrom .inference_data import InferenceData, concat\nfrom .base import requires, dict_to_dataset, generate_dims_coords, make_attrs\n\nif TYPE_CHECKING:\n import pymc3 as pm\n from pymc3 import MultiTrace, Model # pylint: disable=invalid-name\n import theano\n from typing import Set # pylint: disable=ungrouped-imports\nelse:\n MultiTrace = Any # pylint: disable=invalid-name\n Model = Any # pylint: disable=invalid-name\n\n___all__ = [\"\"]\n\n_log = logging.getLogger(__name__)\n\nCoords = Dict[str, List[Any]]\nDims = Dict[str, List[str]]\n# random variable object ...\nVar = Any # pylint: disable=invalid-name\n\n\ndef _monkey_patch_pymc3(pm: ModuleType) -> None: # pylint: disable=invalid-name\n assert pm.__name__ == \"pymc3\"\n\n def fixed_eq(self, other):\n \"\"\"Use object identity for MultiObservedRV equality.\"\"\"\n return self is other\n\n if tuple([int(x) for x in pm.__version__.split(\".\")]) < (3, 9): # type: ignore\n pm.model.MultiObservedRV.__eq__ = fixed_eq # type: ignore\n\n\nclass PyMC3Converter: # pylint: disable=too-many-instance-attributes\n \"\"\"Encapsulate PyMC3 specific logic.\"\"\"\n\n model = None # type: Optional[pm.Model]\n nchains = None # type: int\n ndraws = None # type: int\n posterior_predictive = None # Type: Optional[Dict[str, np.ndarray]]\n predictions = None # Type: Optional[Dict[str, np.ndarray]]\n prior = None # Type: Optional[Dict[str, np.ndarray]]\n\n def __init__(\n self,\n *,\n trace=None,\n prior=None,\n posterior_predictive=None,\n predictions=None,\n coords: Optional[Coords] = None,\n dims: Optional[Dims] = None,\n model=None\n ):\n import pymc3\n import theano\n\n _monkey_patch_pymc3(pymc3)\n\n self.pymc3 = pymc3\n self.theano = theano\n\n self.trace = trace\n\n # this permits us to get the model from command-line argument or from with model:\n try:\n self.model = self.pymc3.modelcontext(model or self.model)\n except TypeError:\n self.model = None\n\n # This next line is brittle and may not work forever, but is a secret\n # way to access the model from the trace.\n if trace is not None:\n if self.model is None:\n self.model = self.trace._straces[0].model # pylint: disable=protected-access\n self.nchains = trace.nchains if hasattr(trace, \"nchains\") else 1\n self.ndraws = len(trace)\n else:\n self.nchains = self.ndraws = 0\n\n self.prior = prior\n self.posterior_predictive = posterior_predictive\n self.predictions = predictions\n\n def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:\n return next(iter(dct.values()))\n\n if trace is None:\n # if you have a posterior_predictive built with keep_dims,\n # you'll lose here, but there's nothing I can do about that.\n self.nchains = 1\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None:\n # pylint: disable=line-too-long\n raise ValueError(\n \"\"\"When constructing InferenceData must have at least\n one of trace, prior, posterior_predictive or predictions.\"\"\"\n )\n\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0]\n\n self.coords = coords\n self.dims = dims\n self.observations = self.find_observations()\n\n def find_observations(self) -> Optional[Dict[str, Var]]:\n \"\"\"If there are observations available, return them as a dictionary.\"\"\"\n has_observations = False\n if self.trace is not None:\n assert self.model is not None, \"Cannot identify observations without PymC3 model\"\n if any((hasattr(obs, \"observations\") for obs in self.model.observed_RVs)):\n has_observations = True\n if has_observations:\n assert self.model is not None\n return {obs.name: obs.observations for obs in self.model.observed_RVs}\n return None\n\n def log_likelihood_vals_point(self, point, var, log_like_fun):\n \"\"\"Compute log likelihood for each observed point.\"\"\"\n log_like_val = utils.one_de(log_like_fun(point))\n if var.missing_values:\n log_like_val = np.where(var.observations.mask, np.nan, log_like_val)\n return log_like_val\n\n @requires(\"trace\")\n @requires(\"model\")\n def _extract_log_likelihood(self):\n \"\"\"Compute log likelihood of each observation.\"\"\"\n # If we have predictions, then we have a thinned trace which does not\n # support extracting a log likelihood.\n cached = [(var, var.logp_elemwise) for var in self.model.observed_RVs]\n log_likelihood_dict = {}\n for var, log_like_fun in cached:\n chain_likelihoods = []\n for chain in self.trace.chains:\n log_like_chain = [\n self.log_likelihood_vals_point(point, var, log_like_fun)\n for point in self.trace.points([chain])\n ]\n chain_likelihoods.append(np.stack(log_like_chain))\n log_likelihood_dict[var.name] = np.stack(chain_likelihoods)\n return log_likelihood_dict\n\n @requires(\"trace\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n var_names = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n data = {}\n for var_name in var_names:\n data[var_name] = np.array(self.trace.get_values(var_name, combine=False, squeeze=False))\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires(\"trace\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from PyMC3 trace.\"\"\"\n data = {}\n rename_key = {\"model_logp\": \"lp\"}\n data = {}\n for stat in self.trace.stat_names:\n name = rename_key.get(stat, stat)\n data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False))\n\n return dict_to_dataset(data, library=self.pymc3, dims=None, coords=self.coords)\n\n @requires(\"trace\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood and log_p data from PyMC3 trace.\"\"\"\n if self.predictions:\n return None\n data = self._extract_log_likelihood()\n return dict_to_dataset(data, library=self.pymc3, dims=self.dims, coords=self.coords)\n\n def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:\n \"\"\"Take Dict of variables to numpy ndarrays (samples) and translate into dataset.\"\"\"\n data = {}\n for k, ary in dct.items():\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n # pylint: disable=line-too-long\n _log.warning(\n \"posterior predictive variable %s's shape not compatible with number of chains and draws. \"\n \"This can mean that some draws or even whole chains are not represented.\",\n k,\n )\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires([\"posterior_predictive\"])\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)\n\n @requires([\"predictions\"])\n def predictions_to_xarray(self):\n \"\"\"Convert predictions (out of sample predictions) to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.trace is not None:\n prior_vars = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]\n else:\n prior_vars = list(self.prior.keys())\n prior_predictive_vars = None\n\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {k: utils.expand_dims(self.prior[k]) for k in var_names},\n library=self.pymc3,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n observed_data = {}\n for name, vals in self.observations.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n vals = utils.one_de(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pymc3))\n\n @requires([\"trace\", \"predictions\"])\n @requires(\"model\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant data to xarray.\"\"\"\n # For constant data, we are concerned only with deterministics and data.\n # The constant data vars must be either pm.Data (TensorSharedVariable) or pm.Deterministic\n constant_data_vars = {} # type: Dict[str, Var]\n for var in self.model.deterministics:\n ancestors = self.theano.tensor.gof.graph.ancestors(var.owner.inputs)\n # no dependency on a random variable\n if not any((isinstance(a, self.pymc3.model.PyMC3Variable) for a in ancestors)):\n constant_data_vars[var.name] = var\n\n def is_data(name, var) -> bool:\n assert self.model is not None\n return (\n var not in self.model.deterministics\n and var not in self.model.observed_RVs\n and var not in self.model.free_RVs\n and var not in self.model.potentials\n and (self.observations is None or name not in self.observations)\n )\n\n # I don't know how to find pm.Data, except that they are named variables that aren't\n # observed or free RVs, nor are they deterministics, and then we eliminate observations.\n for name, var in self.model.named_vars.items():\n if is_data(name, var):\n constant_data_vars[name] = var\n\n if not constant_data_vars:\n return None\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n constant_data = {}\n for name, vals in constant_data_vars.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n # this might be a Deterministic, and must be evaluated\n elif hasattr(self.model[name], \"eval\"):\n vals = self.model[name].eval()\n vals = np.atleast_1d(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n try:\n constant_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n except ValueError as e: # pylint: disable=invalid-name\n raise ValueError(\"Error translating constant_data variable %s: %s\" % (name, e))\n return xr.Dataset(data_vars=constant_data, attrs=make_attrs(library=self.pymc3))\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (e.g., there is no `trace`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n \"\"\"\n id_dict = {\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n if self.predictions:\n id_dict[\"predictions_constant_data\"] = self.constant_data_to_xarray()\n else:\n id_dict[\"constant_data\"] = self.constant_data_to_xarray()\n return InferenceData(**id_dict)\n\n\ndef from_pymc3(\n trace=None, *, prior=None, posterior_predictive=None, coords=None, dims=None, model=None\n):\n \"\"\"Convert pymc3 data into an InferenceData object.\"\"\"\n return PyMC3Converter(\n trace=trace,\n prior=prior,\n posterior_predictive=posterior_predictive,\n coords=coords,\n dims=dims,\n model=model,\n ).to_inference_data()\n\n\n### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But\n### perhaps we should have an inplace argument?\ndef from_pymc3_predictions(\n predictions,\n posterior_trace: Optional[MultiTrace] = None,\n model: Optional[Model] = None,\n coords=None,\n dims=None,\n idata_orig: Optional[InferenceData] = None,\n inplace: bool = False,\n) -> InferenceData:\n \"\"\"Translate out-of-sample predictions into ``InferenceData``.\n\n Parameters\n ----------\n predictions: Dict[str, np.ndarray]\n The predictions are the return value of ``pymc3.sample_posterior_predictive``,\n a dictionary of strings (variable names) to numpy ndarrays (draws).\n posterior_trace: pm.MultiTrace\n This should be a trace that has been thinned appropriately for\n ``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is\n a deterministic function of the shape of any predictor (explanatory, independent, etc.)\n variables must be *removed* from this trace.\n model: pymc3.Model\n This argument is *not* optional, unlike in conventional uses of ``from_pymc3``.\n The reason is that the posterior_trace argument is likely to supply an incorrect\n value of model.\n coords: Dict[str, array-like[Any]]\n Coordinates for the variables. Map from coordinate names to coordinate values.\n dims: Dict[str, array-like[str]]\n Map from variable name to ordered set of coordinate names.\n idata_orig: InferenceData, optional\n If supplied, then modify this inference data in place, adding ``predictions`` and\n (if available) ``predictions_constant_data`` groups. If this is not supplied, make a\n fresh InferenceData\n inplace: boolean, optional\n If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,\n rather than returning a fresh InferenceData object.\n\n Returns\n -------\n InferenceData:\n May be modified ``idata_orig``.\n \"\"\"\n if inplace and not idata_orig:\n raise ValueError(\n (\n \"Do not pass True for inplace unless passing\"\n \"an existing InferenceData as idata_orig\"\n )\n )\n new_idata = PyMC3Converter(\n trace=posterior_trace, predictions=predictions, model=model, coords=coords, dims=dims\n ).to_inference_data()\n if idata_orig is None:\n return new_idata\n elif inplace:\n concat([idata_orig, new_idata], dim=None, inplace=True)\n return idata_orig\n else:\n # if we are not returning in place, then merge the old groups into the new inference\n # data and return that.\n concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)\n return new_idata\n", "path": "arviz/data/io_pymc3.py"}]} |
gh_patches_debug_1470 | rasdani/github-patches | git_diff | pretix__pretix-2537 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot change month in the widget
In the current version it is not possible to change the month in the widget. When you hit it, it reloads but does nothing. The cause seems to be because the call https://XXXXX/widget/product_list?lang=es&year=2022&month=03 always returns the same regardless of the value you put in the month parameter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretix/presale/views/organizer.py`
Content:
```
1 #
2 # This file is part of pretix (Community Edition).
3 #
4 # Copyright (C) 2014-2020 Raphael Michel and contributors
5 # Copyright (C) 2020-2021 rami.io GmbH and contributors
6 #
7 # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
8 # Public License as published by the Free Software Foundation in version 3 of the License.
9 #
10 # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
11 # applicable granting you additional permissions and placing additional restrictions on your usage of this software.
12 # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
13 # this file, see <https://pretix.eu/about/en/license>.
14 #
15 # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
17 # details.
18 #
19 # You should have received a copy of the GNU Affero General Public License along with this program. If not, see
20 # <https://www.gnu.org/licenses/>.
21 #
22
23 # This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
24 # the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
25 #
26 # This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
27 # full history of changes and contributors is available at <https://github.com/pretix/pretix>.
28 #
29 # This file contains Apache-licensed contributions copyrighted by: Jan Felix Wiebe, Mohit Jindal
30 #
31 # Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
32 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
33 # License for the specific language governing permissions and limitations under the License.
34 import calendar
35 import hashlib
36 import math
37 from collections import defaultdict
38 from datetime import date, datetime, time, timedelta
39 from functools import reduce
40 from urllib.parse import quote, urlencode
41
42 import dateutil
43 import isoweek
44 import pytz
45 from django.conf import settings
46 from django.core.cache import caches
47 from django.db.models import Exists, Max, Min, OuterRef, Prefetch, Q
48 from django.db.models.functions import Coalesce, Greatest
49 from django.http import Http404, HttpResponse
50 from django.shortcuts import redirect
51 from django.utils.decorators import method_decorator
52 from django.utils.formats import date_format, get_format
53 from django.utils.timezone import get_current_timezone, now
54 from django.views import View
55 from django.views.decorators.cache import cache_page
56 from django.views.generic import ListView, TemplateView
57 from pytz import UTC
58
59 from pretix.base.i18n import language
60 from pretix.base.models import (
61 Event, EventMetaValue, Organizer, Quota, SubEvent, SubEventMetaValue,
62 )
63 from pretix.base.services.quotas import QuotaAvailability
64 from pretix.helpers.compat import date_fromisocalendar
65 from pretix.helpers.daterange import daterange
66 from pretix.helpers.formats.en.formats import (
67 SHORT_MONTH_DAY_FORMAT, WEEK_FORMAT,
68 )
69 from pretix.multidomain.urlreverse import eventreverse
70 from pretix.presale.ical import get_public_ical
71 from pretix.presale.views import OrganizerViewMixin
72
73
74 def filter_qs_by_attr(qs, request):
75 """
76 We'll allow to filter the event list using attributes defined in the event meta data
77 models in the format ?attr[meta_name]=meta_value
78 """
79 attrs = {}
80 for i, item in enumerate(request.GET.items()):
81 k, v = item
82 if k.startswith("attr[") and k.endswith("]"):
83 attrs[k[5:-1]] = v
84
85 skey = 'filter_qs_by_attr_{}_{}'.format(request.organizer.pk, request.event.pk if hasattr(request, 'event') else '')
86 if request.GET.get('attr_persist'):
87 request.session[skey] = attrs
88 elif skey in request.session:
89 attrs = request.session[skey]
90
91 props = {
92 p.name: p for p in request.organizer.meta_properties.filter(
93 name__in=attrs.keys()
94 )
95 }
96
97 for i, item in enumerate(attrs.items()):
98 attr, v = item
99 emv_with_value = EventMetaValue.objects.filter(
100 event=OuterRef('event' if qs.model == SubEvent else 'pk'),
101 property__name=attr,
102 value=v
103 )
104 emv_with_any_value = EventMetaValue.objects.filter(
105 event=OuterRef('event' if qs.model == SubEvent else 'pk'),
106 property__name=attr,
107 )
108 if qs.model == SubEvent:
109 semv_with_value = SubEventMetaValue.objects.filter(
110 subevent=OuterRef('pk'),
111 property__name=attr,
112 value=v
113 )
114 semv_with_any_value = SubEventMetaValue.objects.filter(
115 subevent=OuterRef('pk'),
116 property__name=attr,
117 )
118
119 prop = props.get(attr)
120 if not prop:
121 continue
122 annotations = {'attr_{}'.format(i): Exists(emv_with_value)}
123 if qs.model == SubEvent:
124 annotations['attr_{}_sub'.format(i)] = Exists(semv_with_value)
125 annotations['attr_{}_sub_any'.format(i)] = Exists(semv_with_any_value)
126 filters = Q(**{'attr_{}_sub'.format(i): True})
127 filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}'.format(i): True}))
128 if prop.default == v:
129 annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)
130 filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}_any'.format(i): False}))
131 else:
132 filters = Q(**{'attr_{}'.format(i): True})
133 if prop.default == v:
134 annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)
135 filters |= Q(**{'attr_{}_any'.format(i): False})
136
137 qs = qs.annotate(**annotations).filter(filters)
138 return qs
139
140
141 class EventListMixin:
142
143 def _get_event_queryset(self):
144 query = Q(is_public=True) & Q(live=True)
145 qs = self.request.organizer.events.using(settings.DATABASE_REPLICA).filter(query)
146 qs = qs.filter(sales_channels__contains=self.request.sales_channel.identifier)
147 qs = qs.annotate(
148 min_from=Min('subevents__date_from'),
149 min_to=Min('subevents__date_to'),
150 max_from=Max('subevents__date_from'),
151 max_to=Max('subevents__date_to'),
152 max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from')),
153 )
154 if "old" in self.request.GET:
155 qs = qs.filter(
156 Q(Q(has_subevents=False) & Q(
157 Q(date_to__lt=now()) | Q(Q(date_to__isnull=True) & Q(date_from__lt=now()))
158 )) | Q(Q(has_subevents=True) & Q(
159 Q(min_to__lt=now()) | Q(min_from__lt=now()))
160 )
161 ).annotate(
162 order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),
163 ).order_by('-order_to')
164 else:
165 qs = qs.filter(
166 Q(Q(has_subevents=False) & Q(
167 Q(date_to__gte=now()) | Q(Q(date_to__isnull=True) & Q(date_from__gte=now()))
168 )) | Q(Q(has_subevents=True) & Q(
169 Q(max_to__gte=now()) | Q(max_from__gte=now()))
170 )
171 ).annotate(
172 order_from=Coalesce('min_from', 'date_from'),
173 ).order_by('order_from')
174 qs = Event.annotated(filter_qs_by_attr(qs, self.request))
175 return qs
176
177 def _set_month_to_next_subevent(self):
178 tz = pytz.timezone(self.request.event.settings.timezone)
179 next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(
180 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
181 active=True,
182 is_public=True,
183 ).select_related('event').order_by('date_from').first()
184
185 if next_sev:
186 datetime_from = next_sev.date_from
187 self.year = datetime_from.astimezone(tz).year
188 self.month = datetime_from.astimezone(tz).month
189 else:
190 self.year = now().year
191 self.month = now().month
192
193 def _set_month_to_next_event(self):
194 next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
195 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
196 organizer=self.request.organizer,
197 live=True,
198 is_public=True,
199 has_subevents=False
200 ), self.request).order_by('date_from').first()
201 next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
202 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
203 event__organizer=self.request.organizer,
204 event__is_public=True,
205 event__live=True,
206 active=True,
207 is_public=True,
208 ), self.request).select_related('event').order_by('date_from').first()
209
210 datetime_from = None
211 if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
212 datetime_from = next_sev.date_from
213 next_ev = next_sev.event
214 elif next_ev:
215 datetime_from = next_ev.date_from
216
217 if datetime_from:
218 tz = pytz.timezone(next_ev.settings.timezone)
219 self.year = datetime_from.astimezone(tz).year
220 self.month = datetime_from.astimezone(tz).month
221 else:
222 self.year = now().year
223 self.month = now().month
224
225 def _set_month_year(self):
226 if 'date' in self.request.GET:
227 try:
228 date = dateutil.parser.parse(self.request.GET.get('date')).date()
229 except ValueError:
230 date = now().date()
231 self.year = date.year
232 self.month = date.month
233 else:
234 if hasattr(self.request, 'event'):
235 self._set_month_to_next_subevent()
236 else:
237 self._set_month_to_next_event()
238
239 def _set_week_to_next_subevent(self):
240 tz = pytz.timezone(self.request.event.settings.timezone)
241 next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(
242 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
243 active=True,
244 is_public=True,
245 ).select_related('event').order_by('date_from').first()
246
247 if next_sev:
248 datetime_from = next_sev.date_from
249 self.year = datetime_from.astimezone(tz).isocalendar()[0]
250 self.week = datetime_from.astimezone(tz).isocalendar()[1]
251 else:
252 self.year = now().isocalendar()[0]
253 self.week = now().isocalendar()[1]
254
255 def _set_week_to_next_event(self):
256 next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
257 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
258 organizer=self.request.organizer,
259 live=True,
260 is_public=True,
261 has_subevents=False
262 ), self.request).order_by('date_from').first()
263 next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
264 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
265 event__organizer=self.request.organizer,
266 event__is_public=True,
267 event__live=True,
268 active=True,
269 is_public=True,
270 ), self.request).select_related('event').order_by('date_from').first()
271
272 datetime_from = None
273 if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
274 datetime_from = next_sev.date_from
275 next_ev = next_sev.event
276 elif next_ev:
277 datetime_from = next_ev.date_from
278
279 if datetime_from:
280 tz = pytz.timezone(next_ev.settings.timezone)
281 self.year = datetime_from.astimezone(tz).isocalendar()[0]
282 self.week = datetime_from.astimezone(tz).isocalendar()[1]
283 else:
284 self.year = now().isocalendar()[0]
285 self.week = now().isocalendar()[1]
286
287 def _set_week_year(self):
288 if 'date' in self.request.GET:
289 try:
290 iso = dateutil.parser.isoparse(self.request.GET.get('date')).isocalendar()
291 except ValueError:
292 iso = now().isocalendar()
293 self.year = iso[0]
294 self.week = iso[1]
295 else:
296 if hasattr(self.request, 'event'):
297 self._set_week_to_next_subevent()
298 else:
299 self._set_week_to_next_event()
300
301
302 class OrganizerIndex(OrganizerViewMixin, EventListMixin, ListView):
303 model = Event
304 context_object_name = 'events'
305 template_name = 'pretixpresale/organizers/index.html'
306 paginate_by = 30
307
308 def dispatch(self, request, *args, **kwargs):
309 # In stock pretix, nothing on this page is session-dependent except for the language and the customer login part,
310 # so we can cache pretty aggressively if the user is anonymous. Note that we deliberately implement the caching
311 # on the view layer, *after* all middlewares have been ran, so we have access to the computed locale, as well
312 # as the login status etc.
313 cache_allowed = (
314 settings.CACHE_LARGE_VALUES_ALLOWED and
315 not getattr(request, 'customer', None) and
316 not request.user.is_authenticated
317 )
318
319 if not cache_allowed:
320 return super().dispatch(request, *args, **kwargs)
321
322 cache_key_parts = [
323 request.method,
324 request.host,
325 str(request.organizer.pk),
326 request.get_full_path(),
327 request.LANGUAGE_CODE,
328 self.request.sales_channel.identifier,
329 ]
330 for c, v in request.COOKIES.items():
331 # If the cookie is not one we know, it might be set by a plugin and we need to include it in the
332 # cache key to be safe. A known example includes plugins that e.g. store cookie banner state.
333 if c not in (settings.SESSION_COOKIE_NAME, settings.LANGUAGE_COOKIE_NAME, settings.CSRF_COOKIE_NAME) and not c.startswith('__'):
334 cache_key_parts.append(f'{c}={v}')
335 for c, v in request.session.items():
336 # If the session key is not one we know, it might be set by a plugin and we need to include it in the
337 # cache key to be safe. A known example would be the pretix-campaigns plugin setting the campaign ID.
338 if (
339 not c.startswith('_auth') and
340 not c.startswith('pretix_auth_') and
341 not c.startswith('customer_auth_') and
342 not c.startswith('current_cart_') and
343 not c.startswith('cart_') and
344 not c.startswith('payment_') and
345 c not in ('carts', 'payment', 'pinned_user_agent')
346 ):
347 cache_key_parts.append(f'{c}={repr(v)}')
348
349 cache_key = f'pretix.presale.views.organizer.OrganizerIndex:{hashlib.md5(":".join(cache_key_parts).encode()).hexdigest()}'
350 cache_timeout = 15
351 cache = caches[settings.CACHE_LARGE_VALUES_ALIAS]
352
353 response = cache.get(cache_key)
354 if response is not None:
355 return response
356
357 response = super().dispatch(request, *kwargs, **kwargs)
358 if response.status_code >= 400:
359 return response
360
361 if hasattr(response, 'render') and callable(response.render):
362 def _store_to_cache(r):
363 cache.set(cache_key, r, cache_timeout)
364
365 response.add_post_render_callback(_store_to_cache)
366 else:
367 cache.set(cache_key, response, cache_timeout)
368 return response
369
370 def get(self, request, *args, **kwargs):
371 style = request.GET.get("style", request.organizer.settings.event_list_type)
372 if style == "calendar":
373 cv = CalendarView()
374 cv.request = request
375 return cv.get(request, *args, **kwargs)
376 elif style == "day":
377 cv = DayCalendarView()
378 cv.request = request
379 return cv.get(request, *args, **kwargs)
380 elif style == "week":
381 cv = WeekCalendarView()
382 cv.request = request
383 return cv.get(request, *args, **kwargs)
384 else:
385 return super().get(request, *args, **kwargs)
386
387 def get_queryset(self):
388 return self._get_event_queryset()
389
390 def get_context_data(self, **kwargs):
391 ctx = super().get_context_data(**kwargs)
392 for event in ctx['events']:
393 event.tzname = pytz.timezone(event.cache.get_or_set('timezone', lambda: event.settings.timezone))
394 if event.has_subevents:
395 event.daterange = daterange(
396 event.min_from.astimezone(event.tzname),
397 (event.max_fromto or event.max_to or event.max_from).astimezone(event.tzname)
398 )
399 return ctx
400
401
402 def has_before_after(eventqs, subeventqs, before, after):
403 eqs = eventqs.filter(is_public=True, live=True, has_subevents=False)
404 sqs = subeventqs.filter(active=True, is_public=True)
405 return (
406 eqs.filter(Q(date_from__lte=before)).exists() or sqs.filter(Q(date_from__lte=before)).exists(),
407 eqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists() or sqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists()
408 )
409
410
411 def add_events_for_days(request, baseqs, before, after, ebd, timezones):
412 qs = baseqs.filter(is_public=True, live=True, has_subevents=False).filter(
413 Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |
414 Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |
415 Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))
416 ).order_by(
417 'date_from'
418 ).prefetch_related(
419 '_settings_objects',
420 Prefetch(
421 'organizer',
422 queryset=Organizer.objects.prefetch_related('_settings_objects')
423 )
424 )
425 if hasattr(request, 'organizer'):
426 qs = filter_qs_by_attr(qs, request)
427 for event in qs:
428 timezones.add(event.settings.timezones)
429 tz = pytz.timezone(event.settings.timezone)
430 datetime_from = event.date_from.astimezone(tz)
431 date_from = datetime_from.date()
432 if event.settings.show_date_to and event.date_to:
433 datetime_to = event.date_to.astimezone(tz)
434 date_to = event.date_to.astimezone(tz).date()
435 d = max(date_from, before.date())
436 while d <= date_to and d <= after.date():
437 first = d == date_from
438 ebd[d].append({
439 'event': event,
440 'continued': not first,
441 'time': datetime_from.time().replace(tzinfo=None) if first and event.settings.show_times else None,
442 'time_end': (
443 datetime_to.time().replace(tzinfo=None)
444 if (date_to == date_from or (
445 date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()
446 )) and event.settings.show_times
447 else None
448 ),
449 'time_end_today': (
450 datetime_to.time().replace(tzinfo=None)
451 if date_to == d and event.settings.show_times
452 else None
453 ),
454 'url': eventreverse(event, 'presale:event.index'),
455 'timezone': event.settings.timezone,
456 })
457 d += timedelta(days=1)
458
459 else:
460 ebd[date_from].append({
461 'event': event,
462 'continued': False,
463 'time': datetime_from.time().replace(tzinfo=None) if event.settings.show_times else None,
464 'url': eventreverse(event, 'presale:event.index'),
465 'timezone': event.settings.timezone,
466 })
467
468
469 def add_subevents_for_days(qs, before, after, ebd, timezones, event=None, cart_namespace=None, voucher=None):
470 qs = qs.filter(active=True, is_public=True).filter(
471 Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |
472 Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |
473 Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))
474 ).order_by(
475 'date_from'
476 )
477
478 quotas_to_compute = []
479 for se in qs:
480 if se.presale_is_running:
481 quotas_to_compute += se.active_quotas
482
483 qcache = {}
484 if quotas_to_compute:
485 qa = QuotaAvailability()
486 qa.queue(*quotas_to_compute)
487 qa.compute(allow_cache=True)
488 qcache.update(qa.results)
489
490 for se in qs:
491 if qcache:
492 se._quota_cache = qcache
493 kwargs = {'subevent': se.pk}
494 if cart_namespace:
495 kwargs['cart_namespace'] = cart_namespace
496
497 s = event.settings if event else se.event.settings
498
499 if s.event_list_available_only:
500 hide = se.presale_has_ended or (
501 (not voucher or not voucher.allow_ignore_quota) and
502 se.best_availability_state is not None and
503 se.best_availability_state < Quota.AVAILABILITY_RESERVED
504 )
505 if hide:
506 continue
507
508 timezones.add(s.timezones)
509 tz = pytz.timezone(s.timezone)
510 datetime_from = se.date_from.astimezone(tz)
511 date_from = datetime_from.date()
512 if s.show_date_to and se.date_to:
513 datetime_to = se.date_to.astimezone(tz)
514 date_to = se.date_to.astimezone(tz).date()
515 d = max(date_from, before.date())
516 while d <= date_to and d <= after.date():
517 first = d == date_from
518 ebd[d].append({
519 'continued': not first,
520 'timezone': s.timezone,
521 'time': datetime_from.time().replace(tzinfo=None) if first and s.show_times else None,
522 'time_end': (
523 datetime_to.time().replace(tzinfo=None)
524 if (date_to == date_from or (
525 date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()
526 )) and s.show_times
527 else None
528 ),
529 'time_end_today': (
530 datetime_to.time().replace(tzinfo=None)
531 if date_to == d and s.show_times
532 else None
533 ),
534 'event': se,
535 'url': (
536 eventreverse(se.event, 'presale:event.redeem',
537 kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'
538 if voucher
539 else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)
540 )
541 })
542 d += timedelta(days=1)
543
544 else:
545 ebd[date_from].append({
546 'event': se,
547 'continued': False,
548 'time': datetime_from.time().replace(tzinfo=None) if s.show_times else None,
549 'url': (
550 eventreverse(se.event, 'presale:event.redeem',
551 kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'
552 if voucher
553 else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)
554 ),
555 'timezone': s.timezone,
556 })
557
558
559 def sort_ev(e):
560 return e['time'] or time(0, 0, 0), str(e['event'].name)
561
562
563 def days_for_template(ebd, week):
564 day_format = get_format('WEEK_DAY_FORMAT')
565 if day_format == 'WEEK_DAY_FORMAT':
566 day_format = 'SHORT_DATE_FORMAT'
567 return [
568 {
569 'day_formatted': date_format(day, day_format),
570 'date': day,
571 'today': day == now().astimezone(get_current_timezone()).date(),
572 'events': sorted(ebd.get(day), key=sort_ev) if day in ebd else []
573 }
574 for day in week.days()
575 ]
576
577
578 def weeks_for_template(ebd, year, month):
579 calendar.setfirstweekday(0) # TODO: Configurable
580 return [
581 [
582 {
583 'day': day,
584 'date': date(year, month, day),
585 'events': (
586 sorted(ebd.get(date(year, month, day)), key=sort_ev)
587 if date(year, month, day) in ebd else None
588 )
589 }
590 if day > 0
591 else None
592 for day in week
593 ]
594 for week in calendar.monthcalendar(year, month)
595 ]
596
597
598 class CalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
599 template_name = 'pretixpresale/organizers/calendar.html'
600
601 def get(self, request, *args, **kwargs):
602 # redirect old month-year-URLs to new date-URLs
603 keys = ("month", "year")
604 if all(k in request.GET for k in keys):
605 get_params = {k: v for k, v in request.GET.items() if k not in keys}
606 get_params["date"] = "%s-%s" % (request.GET.get("year"), request.GET.get("month"))
607 return redirect(self.request.path + "?" + urlencode(get_params))
608
609 self._set_month_year()
610 return super().get(request, *args, **kwargs)
611
612 def get_context_data(self, **kwargs):
613 ctx = super().get_context_data()
614
615 try:
616 _, ndays = calendar.monthrange(self.year, self.month)
617 except calendar.IllegalMonthError:
618 raise Http404()
619 before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=UTC) - timedelta(days=1)
620 after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=UTC) + timedelta(days=1)
621
622 ctx['date'] = date(self.year, self.month, 1)
623 ctx['before'] = before
624 ctx['after'] = after
625 ebd = self._events_by_day(before, after)
626
627 ctx['has_before'], ctx['has_after'] = has_before_after(
628 self.request.organizer.events.filter(
629 sales_channels__contains=self.request.sales_channel.identifier
630 ),
631 SubEvent.objects.filter(
632 event__organizer=self.request.organizer,
633 event__is_public=True,
634 event__live=True,
635 event__sales_channels__contains=self.request.sales_channel.identifier
636 ),
637 before,
638 after,
639 )
640
641 ctx['multiple_timezones'] = self._multiple_timezones
642 ctx['weeks'] = weeks_for_template(ebd, self.year, self.month)
643 ctx['months'] = [date(self.year, i + 1, 1) for i in range(12)]
644 ctx['years'] = range(now().year - 2, now().year + 3)
645
646 return ctx
647
648 def _events_by_day(self, before, after):
649 ebd = defaultdict(list)
650 timezones = set()
651 add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
652 settings.DATABASE_REPLICA
653 ).filter(
654 sales_channels__contains=self.request.sales_channel.identifier
655 ), before, after, ebd, timezones)
656 add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
657 event__organizer=self.request.organizer,
658 event__is_public=True,
659 event__live=True,
660 event__sales_channels__contains=self.request.sales_channel.identifier
661 ).prefetch_related(
662 Prefetch(
663 'event',
664 queryset=Event.objects.prefetch_related(
665 '_settings_objects',
666 Prefetch(
667 'organizer',
668 queryset=Organizer.objects.prefetch_related('_settings_objects')
669 )
670 )
671 )
672 )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
673 self._multiple_timezones = len(timezones) > 1
674 return ebd
675
676
677 class WeekCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
678 template_name = 'pretixpresale/organizers/calendar_week.html'
679
680 def get(self, request, *args, **kwargs):
681 # redirect old week-year-URLs to new date-URLs
682 keys = ("week", "year")
683 if all(k in request.GET for k in keys):
684 get_params = {k: v for k, v in request.GET.items() if k not in keys}
685 get_params["date"] = "%s-W%s" % (request.GET.get("year"), request.GET.get("week"))
686 return redirect(self.request.path + "?" + urlencode(get_params))
687
688 self._set_week_year()
689 return super().get(request, *args, **kwargs)
690
691 def get_context_data(self, **kwargs):
692 ctx = super().get_context_data()
693
694 week = isoweek.Week(self.year, self.week)
695 before = datetime(
696 week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=UTC
697 ) - timedelta(days=1)
698 after = datetime(
699 week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=UTC
700 ) + timedelta(days=1)
701
702 ctx['date'] = week.monday()
703 ctx['before'] = before
704 ctx['after'] = after
705
706 ebd = self._events_by_day(before, after)
707
708 ctx['has_before'], ctx['has_after'] = has_before_after(
709 self.request.organizer.events.filter(
710 sales_channels__contains=self.request.sales_channel.identifier
711 ),
712 SubEvent.objects.filter(
713 event__organizer=self.request.organizer,
714 event__is_public=True,
715 event__live=True,
716 event__sales_channels__contains=self.request.sales_channel.identifier
717 ),
718 before,
719 after,
720 )
721
722 ctx['days'] = days_for_template(ebd, week)
723 years = (self.year - 1, self.year, self.year + 1)
724 weeks = []
725 for year in years:
726 weeks += [
727 (date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))
728 for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)
729 ]
730 ctx['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]
731 ctx['week_format'] = get_format('WEEK_FORMAT')
732 if ctx['week_format'] == 'WEEK_FORMAT':
733 ctx['week_format'] = WEEK_FORMAT
734 ctx['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')
735 if ctx['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':
736 ctx['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT
737 ctx['multiple_timezones'] = self._multiple_timezones
738
739 return ctx
740
741 def _events_by_day(self, before, after):
742 ebd = defaultdict(list)
743 timezones = set()
744 add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
745 settings.DATABASE_REPLICA
746 ).filter(
747 sales_channels__contains=self.request.sales_channel.identifier
748 ), before, after, ebd, timezones)
749 add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
750 event__organizer=self.request.organizer,
751 event__is_public=True,
752 event__live=True,
753 event__sales_channels__contains=self.request.sales_channel.identifier
754 ).prefetch_related(
755 Prefetch(
756 'event',
757 queryset=Event.objects.prefetch_related(
758 '_settings_objects',
759 Prefetch(
760 'organizer',
761 queryset=Organizer.objects.prefetch_related('_settings_objects')
762 )
763 )
764 )
765 )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
766 self._multiple_timezones = len(timezones) > 1
767 return ebd
768
769
770 class DayCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
771 template_name = 'pretixpresale/organizers/calendar_day.html'
772
773 def _set_date_to_next_event(self):
774 next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
775 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
776 organizer=self.request.organizer,
777 live=True,
778 is_public=True,
779 date_from__gte=now(),
780 ), self.request).order_by('date_from').first()
781 next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
782 Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
783 event__organizer=self.request.organizer,
784 event__is_public=True,
785 event__live=True,
786 active=True,
787 is_public=True,
788 ), self.request).select_related('event').order_by('date_from').first()
789
790 datetime_from = None
791 if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
792 datetime_from = next_sev.date_from
793 next_ev = next_sev.event
794 elif next_ev:
795 datetime_from = next_ev.date_from
796
797 if datetime_from:
798 self.tz = pytz.timezone(next_ev.settings.timezone)
799 self.date = datetime_from.astimezone(self.tz).date()
800 else:
801 self.tz = self.request.organizer.timezone
802 self.date = now().astimezone(self.tz).date()
803
804 def _set_date(self):
805 if 'date' in self.request.GET:
806 self.tz = self.request.organizer.timezone
807 try:
808 self.date = dateutil.parser.parse(self.request.GET.get('date')).date()
809 except ValueError:
810 self.date = now().astimezone(self.tz).date()
811 else:
812 self._set_date_to_next_event()
813
814 def get(self, request, *args, **kwargs):
815 self._set_date()
816 return super().get(request, *args, **kwargs)
817
818 def get_context_data(self, **kwargs):
819 ctx = super().get_context_data()
820
821 before = datetime(
822 self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC
823 ) - timedelta(days=1)
824 after = datetime(
825 self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC
826 ) + timedelta(days=1)
827
828 ctx['date'] = self.date
829 ctx['cal_tz'] = self.tz
830 ctx['before'] = before
831 ctx['after'] = after
832
833 ctx['has_before'], ctx['has_after'] = has_before_after(
834 self.request.organizer.events.filter(
835 sales_channels__contains=self.request.sales_channel.identifier
836 ),
837 SubEvent.objects.filter(
838 event__organizer=self.request.organizer,
839 event__is_public=True,
840 event__live=True,
841 event__sales_channels__contains=self.request.sales_channel.identifier
842 ),
843 before,
844 after,
845 )
846
847 ebd = self._events_by_day(before, after)
848 if not ebd[self.date]:
849 return ctx
850
851 events = ebd[self.date]
852 shortest_duration = self._get_shortest_duration(events).total_seconds() // 60
853 # pick the next biggest tick_duration based on shortest_duration, max. 180 minutes
854 tick_duration = next((d for d in [5, 10, 15, 30, 60, 120, 180] if d >= shortest_duration), 180)
855
856 raster_size = min(self._get_raster_size(events), tick_duration)
857 events, start, end = self._rasterize_events(events, tick_duration=tick_duration, raster_size=raster_size)
858 calendar_duration = self._get_time_duration(start, end)
859 ctx["calendar_duration"] = self._format_duration(calendar_duration)
860 ctx['time_ticks'] = self._get_time_ticks(start, end, tick_duration)
861 ctx['start'] = datetime.combine(self.date, start)
862 ctx['raster_size'] = raster_size
863 # ctx['end'] = end
864 # size of each grid-column is based on shortest event duration and raster_size
865 # raster_size is based on start/end times, so it could happen we have a small raster but long running events
866 # raster_size will always be smaller or equals tick_duration
867 ctx['raster_to_shortest_ratio'] = round((8 * raster_size) / shortest_duration)
868
869 ctx['events'] = events
870
871 events_by_series = self._grid_for_template(events)
872 ctx['collections'] = events_by_series
873 ctx['no_headlines'] = not any([series for series, events in events_by_series])
874 ctx['multiple_timezones'] = self._multiple_timezones
875 return ctx
876
877 def _get_raster_size(self, events):
878 # get best raster-size for min. # of columns in grid
879 # due to grid-col-calculations in CSS raster_size cannot be bigger than 60 (minutes)
880
881 # all start- and end-times (minute-part) except full hour
882 times = [
883 e["time"].minute for e in events if e["time"] and e["time"].minute
884 ] + [
885 e["time_end_today"].minute for e in events if "time_end_today" in e and e["time_end_today"] and e["time_end_today"].minute
886 ]
887 if not times:
888 # no time other than full hour, so raster can be 1 hour/60 minutes
889 return 60
890 gcd = reduce(math.gcd, set(times))
891 return next((d for d in [5, 10, 15, 30, 60] if d >= gcd), 60)
892
893 def _get_time_duration(self, start, end):
894 midnight = time(0, 0)
895 return datetime.combine(
896 self.date if end != midnight else self.date + timedelta(days=1),
897 end
898 ) - datetime.combine(
899 self.date,
900 start
901 )
902
903 def _format_duration(self, duration):
904 return ":".join([
905 "%02d" % i for i in (
906 (duration.days * 24) + (duration.seconds // 3600),
907 (duration.seconds // 60) % 60
908 )
909 ])
910
911 def _floor_time(self, t, raster_size=5):
912 # raster_size based on minutes, might be factored into a helper class with a timedelta as raster
913 minutes = t.hour * 60 + t.minute
914 if minutes % raster_size:
915 minutes = (minutes // raster_size) * raster_size
916 return t.replace(hour=minutes // 60, minute=minutes % 60)
917 return t
918
919 def _ceil_time(self, t, raster_size=5):
920 # raster_size based on minutes, might be factored into a helper class with a timedelta as raster
921 minutes = t.hour * 60 + t.minute
922 if not minutes % raster_size:
923 return t
924 minutes = math.ceil(minutes / raster_size) * raster_size
925 minute = minutes % 60
926 hour = minutes // 60
927 if hour > 23:
928 hour = hour % 24
929 return t.replace(minute=minute, hour=hour)
930
931 def _rasterize_events(self, events, tick_duration, raster_size=5):
932 rastered_events = []
933 start, end = self._get_time_range(events)
934 start = self._floor_time(start, raster_size=tick_duration)
935 end = self._ceil_time(end, raster_size=tick_duration)
936
937 midnight = time(0, 0)
938 for e in events:
939 t = e["time"] or time(0, 0)
940 e["offset_shift_start"] = 0
941 if e["continued"]:
942 e["time_rastered"] = midnight
943 elif t.minute % raster_size:
944 e["time_rastered"] = t.replace(minute=(t.minute // raster_size) * raster_size)
945 e["offset_shift_start"] = t.minute % raster_size
946 else:
947 e["time_rastered"] = t
948
949 e["offset_shift_end"] = 0
950 if "time_end_today" in e and e["time_end_today"]:
951 if e["time_end_today"].minute % raster_size:
952 minute = math.ceil(e["time_end_today"].minute / raster_size) * raster_size
953 hour = e["time_end_today"].hour
954 if minute > 59:
955 minute = minute % 60
956 hour = (hour + 1) % 24
957 e["time_end_today_rastered"] = e["time_end_today"].replace(minute=minute, hour=hour)
958 e["offset_shift_end"] = raster_size - e["time_end_today"].minute % raster_size
959 else:
960 e["time_end_today_rastered"] = e["time_end_today"]
961 else:
962 e["time_end_today"] = e["time_end_today_rastered"] = time(0, 0)
963
964 e["duration_rastered"] = self._format_duration(datetime.combine(
965 self.date if e["time_end_today_rastered"] != midnight else self.date + timedelta(days=1),
966 e["time_end_today_rastered"]
967 ) - datetime.combine(
968 self.date,
969 e['time_rastered']
970 ))
971
972 e["offset_rastered"] = datetime.combine(self.date, time(0, 0)) + self._get_time_duration(start, e["time_rastered"])
973
974 rastered_events.append(e)
975
976 return rastered_events, start, end
977
978 def _get_shortest_duration(self, events):
979 midnight = time(0, 0)
980 durations = [
981 datetime.combine(
982 self.date if e.get('time_end_today') and e['time_end_today'] != midnight else self.date + timedelta(days=1),
983 e['time_end_today'] if e.get('time_end_today') else time(0, 0)
984 )
985 -
986 datetime.combine(
987 self.date,
988 time(0, 0) if e['continued'] else (e['time'] or time(0, 0))
989 )
990 for e in events
991 ]
992 return min([d for d in durations])
993
994 def _get_time_range(self, events):
995 if any(e['continued'] for e in events) or any(e['time'] is None for e in events):
996 starting_at = time(0, 0)
997 else:
998 starting_at = min(e['time'] for e in events)
999
1000 if any(e.get('time_end_today') is None for e in events):
1001 ending_at = time(0, 0)
1002 else:
1003 ending_at = max(e['time_end_today'] for e in events)
1004
1005 return starting_at, ending_at
1006
1007 def _get_time_ticks(self, start, end, tick_duration):
1008 ticks = []
1009 tick_duration = timedelta(minutes=tick_duration)
1010
1011 # convert time to datetime for timedelta calc
1012 start = datetime.combine(self.date, start)
1013 end = datetime.combine(self.date, end)
1014 if end <= start:
1015 end = end + timedelta(days=1)
1016
1017 tick_start = start
1018 offset = datetime.utcfromtimestamp(0)
1019 duration = datetime.utcfromtimestamp(tick_duration.total_seconds())
1020 while tick_start < end:
1021 tick = {
1022 "start": tick_start,
1023 "duration": duration,
1024 "offset": offset,
1025 }
1026 ticks.append(tick)
1027 tick_start += tick_duration
1028 offset += tick_duration
1029
1030 return ticks
1031
1032 def _grid_for_template(self, events):
1033 midnight = time(0, 0)
1034 rows_by_collection = defaultdict(list)
1035
1036 # We sort the events into "collections": all subevents from the same
1037 # event series together and all non-series events into a "None"
1038 # collection. Then, we look if there's already an event in the
1039 # collection that overlaps, in which case we need to split the
1040 # collection into multiple rows.
1041 for counter, e in enumerate(events):
1042 collection = e['event'].event if isinstance(e['event'], SubEvent) else None
1043
1044 placed_in_row = False
1045 for row in rows_by_collection[collection]:
1046 if any(
1047 (e['time_rastered'] < o['time_end_today_rastered'] or o['time_end_today_rastered'] == midnight) and
1048 (o['time_rastered'] < e['time_end_today_rastered'] or e['time_end_today_rastered'] == midnight)
1049 for o in row
1050 ):
1051 continue
1052 row.append(e)
1053 placed_in_row = True
1054 break
1055
1056 if not placed_in_row:
1057 rows_by_collection[collection].append([e])
1058
1059 # flatten rows to one stream of events with attribute row
1060 # for better keyboard-tab-order in html
1061 for collection in rows_by_collection:
1062 for i, row in enumerate(rows_by_collection[collection]):
1063 concurrency = i + 1
1064 for e in row:
1065 e["concurrency"] = concurrency
1066 rows_by_collection[collection] = {
1067 "concurrency": len(rows_by_collection[collection]),
1068 "events": sorted([e for row in rows_by_collection[collection] for e in row], key=lambda d: d['time'] or time(0, 0)),
1069 }
1070
1071 def sort_key(c):
1072 collection, row = c
1073 if collection is None:
1074 return ''
1075 else:
1076 return str(collection.name)
1077 return sorted(rows_by_collection.items(), key=sort_key)
1078
1079 def _events_by_day(self, before, after):
1080 ebd = defaultdict(list)
1081 timezones = set()
1082 add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
1083 settings.DATABASE_REPLICA
1084 ).filter(
1085 sales_channels__contains=self.request.sales_channel.identifier
1086 ), before, after, ebd, timezones)
1087 add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
1088 event__organizer=self.request.organizer,
1089 event__is_public=True,
1090 event__live=True,
1091 event__sales_channels__contains=self.request.sales_channel.identifier
1092 ).prefetch_related(
1093 Prefetch(
1094 'event',
1095 queryset=Event.objects.prefetch_related(
1096 '_settings_objects',
1097 Prefetch(
1098 'organizer',
1099 queryset=Organizer.objects.prefetch_related('_settings_objects')
1100 )
1101 )
1102 )
1103 )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
1104 self._multiple_timezones = len(timezones) > 1
1105 return ebd
1106
1107
1108 @method_decorator(cache_page(300), name='dispatch')
1109 class OrganizerIcalDownload(OrganizerViewMixin, View):
1110 def get(self, request, *args, **kwargs):
1111 cutoff = now() - timedelta(days=31)
1112 events = list(
1113 filter_qs_by_attr(
1114 self.request.organizer.events.filter(
1115 Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),
1116 is_public=True,
1117 live=True,
1118 has_subevents=False,
1119 sales_channels__contains=self.request.sales_channel.identifier,
1120 ),
1121 request
1122 ).order_by(
1123 'date_from'
1124 ).prefetch_related(
1125 '_settings_objects',
1126 Prefetch(
1127 'organizer',
1128 queryset=Organizer.objects.prefetch_related('_settings_objects')
1129 )
1130 )
1131 )
1132 events += list(
1133 filter_qs_by_attr(
1134 SubEvent.objects.filter(
1135 Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),
1136 event__organizer=self.request.organizer,
1137 event__is_public=True,
1138 event__live=True,
1139 is_public=True,
1140 active=True,
1141 event__sales_channels__contains=self.request.sales_channel.identifier
1142 ),
1143 request
1144 ).prefetch_related(
1145 Prefetch(
1146 'event',
1147 queryset=Event.objects.prefetch_related(
1148 '_settings_objects',
1149 Prefetch(
1150 'organizer',
1151 queryset=Organizer.objects.prefetch_related('_settings_objects')
1152 )
1153 )
1154 )
1155 ).order_by(
1156 'date_from'
1157 )
1158 )
1159
1160 if 'locale' in request.GET and request.GET.get('locale') in dict(settings.LANGUAGES):
1161 with language(request.GET.get('locale'), self.request.organizer.settings.region):
1162 cal = get_public_ical(events)
1163 else:
1164 cal = get_public_ical(events)
1165
1166 resp = HttpResponse(cal.serialize(), content_type='text/calendar')
1167 resp['Content-Disposition'] = 'attachment; filename="{}.ics"'.format(
1168 request.organizer.slug
1169 )
1170 if request.organizer.settings.meta_noindex:
1171 resp['X-Robots-Tag'] = 'noindex'
1172 return resp
1173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pretix/presale/views/organizer.py b/src/pretix/presale/views/organizer.py
--- a/src/pretix/presale/views/organizer.py
+++ b/src/pretix/presale/views/organizer.py
@@ -225,7 +225,7 @@
def _set_month_year(self):
if 'date' in self.request.GET:
try:
- date = dateutil.parser.parse(self.request.GET.get('date')).date()
+ date = dateutil.parser.isoparse(self.request.GET.get('date')).date()
except ValueError:
date = now().date()
self.year = date.year
| {"golden_diff": "diff --git a/src/pretix/presale/views/organizer.py b/src/pretix/presale/views/organizer.py\n--- a/src/pretix/presale/views/organizer.py\n+++ b/src/pretix/presale/views/organizer.py\n@@ -225,7 +225,7 @@\n def _set_month_year(self):\n if 'date' in self.request.GET:\n try:\n- date = dateutil.parser.parse(self.request.GET.get('date')).date()\n+ date = dateutil.parser.isoparse(self.request.GET.get('date')).date()\n except ValueError:\n date = now().date()\n self.year = date.year\n", "issue": "Cannot change month in the widget\nIn the current version it is not possible to change the month in the widget. When you hit it, it reloads but does nothing. The cause seems to be because the call https://XXXXX/widget/product_list?lang=es&year=2022&month=03 always returns the same regardless of the value you put in the month parameter.\n", "before_files": [{"content": "#\n# This file is part of pretix (Community Edition).\n#\n# Copyright (C) 2014-2020 Raphael Michel and contributors\n# Copyright (C) 2020-2021 rami.io GmbH and contributors\n#\n# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General\n# Public License as published by the Free Software Foundation in version 3 of the License.\n#\n# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are\n# applicable granting you additional permissions and placing additional restrictions on your usage of this software.\n# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive\n# this file, see <https://pretix.eu/about/en/license>.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Affero General Public License along with this program. If not, see\n# <https://www.gnu.org/licenses/>.\n#\n\n# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of\n# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.\n#\n# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A\n# full history of changes and contributors is available at <https://github.com/pretix/pretix>.\n#\n# This file contains Apache-licensed contributions copyrighted by: Jan Felix Wiebe, Mohit Jindal\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\nimport calendar\nimport hashlib\nimport math\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\nfrom functools import reduce\nfrom urllib.parse import quote, urlencode\n\nimport dateutil\nimport isoweek\nimport pytz\nfrom django.conf import settings\nfrom django.core.cache import caches\nfrom django.db.models import Exists, Max, Min, OuterRef, Prefetch, Q\nfrom django.db.models.functions import Coalesce, Greatest\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.formats import date_format, get_format\nfrom django.utils.timezone import get_current_timezone, now\nfrom django.views import View\nfrom django.views.decorators.cache import cache_page\nfrom django.views.generic import ListView, TemplateView\nfrom pytz import UTC\n\nfrom pretix.base.i18n import language\nfrom pretix.base.models import (\n Event, EventMetaValue, Organizer, Quota, SubEvent, SubEventMetaValue,\n)\nfrom pretix.base.services.quotas import QuotaAvailability\nfrom pretix.helpers.compat import date_fromisocalendar\nfrom pretix.helpers.daterange import daterange\nfrom pretix.helpers.formats.en.formats import (\n SHORT_MONTH_DAY_FORMAT, WEEK_FORMAT,\n)\nfrom pretix.multidomain.urlreverse import eventreverse\nfrom pretix.presale.ical import get_public_ical\nfrom pretix.presale.views import OrganizerViewMixin\n\n\ndef filter_qs_by_attr(qs, request):\n \"\"\"\n We'll allow to filter the event list using attributes defined in the event meta data\n models in the format ?attr[meta_name]=meta_value\n \"\"\"\n attrs = {}\n for i, item in enumerate(request.GET.items()):\n k, v = item\n if k.startswith(\"attr[\") and k.endswith(\"]\"):\n attrs[k[5:-1]] = v\n\n skey = 'filter_qs_by_attr_{}_{}'.format(request.organizer.pk, request.event.pk if hasattr(request, 'event') else '')\n if request.GET.get('attr_persist'):\n request.session[skey] = attrs\n elif skey in request.session:\n attrs = request.session[skey]\n\n props = {\n p.name: p for p in request.organizer.meta_properties.filter(\n name__in=attrs.keys()\n )\n }\n\n for i, item in enumerate(attrs.items()):\n attr, v = item\n emv_with_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n value=v\n )\n emv_with_any_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n )\n if qs.model == SubEvent:\n semv_with_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n value=v\n )\n semv_with_any_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n )\n\n prop = props.get(attr)\n if not prop:\n continue\n annotations = {'attr_{}'.format(i): Exists(emv_with_value)}\n if qs.model == SubEvent:\n annotations['attr_{}_sub'.format(i)] = Exists(semv_with_value)\n annotations['attr_{}_sub_any'.format(i)] = Exists(semv_with_any_value)\n filters = Q(**{'attr_{}_sub'.format(i): True})\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}'.format(i): True}))\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}_any'.format(i): False}))\n else:\n filters = Q(**{'attr_{}'.format(i): True})\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(**{'attr_{}_any'.format(i): False})\n\n qs = qs.annotate(**annotations).filter(filters)\n return qs\n\n\nclass EventListMixin:\n\n def _get_event_queryset(self):\n query = Q(is_public=True) & Q(live=True)\n qs = self.request.organizer.events.using(settings.DATABASE_REPLICA).filter(query)\n qs = qs.filter(sales_channels__contains=self.request.sales_channel.identifier)\n qs = qs.annotate(\n min_from=Min('subevents__date_from'),\n min_to=Min('subevents__date_to'),\n max_from=Max('subevents__date_from'),\n max_to=Max('subevents__date_to'),\n max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from')),\n )\n if \"old\" in self.request.GET:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__lt=now()) | Q(Q(date_to__isnull=True) & Q(date_from__lt=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(min_to__lt=now()) | Q(min_from__lt=now()))\n )\n ).annotate(\n order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),\n ).order_by('-order_to')\n else:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__gte=now()) | Q(Q(date_to__isnull=True) & Q(date_from__gte=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(max_to__gte=now()) | Q(max_from__gte=now()))\n )\n ).annotate(\n order_from=Coalesce('min_from', 'date_from'),\n ).order_by('order_from')\n qs = Event.annotated(filter_qs_by_attr(qs, self.request))\n return qs\n\n def _set_month_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_year(self):\n if 'date' in self.request.GET:\n try:\n date = dateutil.parser.parse(self.request.GET.get('date')).date()\n except ValueError:\n date = now().date()\n self.year = date.year\n self.month = date.month\n else:\n if hasattr(self.request, 'event'):\n self._set_month_to_next_subevent()\n else:\n self._set_month_to_next_event()\n\n def _set_week_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_year(self):\n if 'date' in self.request.GET:\n try:\n iso = dateutil.parser.isoparse(self.request.GET.get('date')).isocalendar()\n except ValueError:\n iso = now().isocalendar()\n self.year = iso[0]\n self.week = iso[1]\n else:\n if hasattr(self.request, 'event'):\n self._set_week_to_next_subevent()\n else:\n self._set_week_to_next_event()\n\n\nclass OrganizerIndex(OrganizerViewMixin, EventListMixin, ListView):\n model = Event\n context_object_name = 'events'\n template_name = 'pretixpresale/organizers/index.html'\n paginate_by = 30\n\n def dispatch(self, request, *args, **kwargs):\n # In stock pretix, nothing on this page is session-dependent except for the language and the customer login part,\n # so we can cache pretty aggressively if the user is anonymous. Note that we deliberately implement the caching\n # on the view layer, *after* all middlewares have been ran, so we have access to the computed locale, as well\n # as the login status etc.\n cache_allowed = (\n settings.CACHE_LARGE_VALUES_ALLOWED and\n not getattr(request, 'customer', None) and\n not request.user.is_authenticated\n )\n\n if not cache_allowed:\n return super().dispatch(request, *args, **kwargs)\n\n cache_key_parts = [\n request.method,\n request.host,\n str(request.organizer.pk),\n request.get_full_path(),\n request.LANGUAGE_CODE,\n self.request.sales_channel.identifier,\n ]\n for c, v in request.COOKIES.items():\n # If the cookie is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example includes plugins that e.g. store cookie banner state.\n if c not in (settings.SESSION_COOKIE_NAME, settings.LANGUAGE_COOKIE_NAME, settings.CSRF_COOKIE_NAME) and not c.startswith('__'):\n cache_key_parts.append(f'{c}={v}')\n for c, v in request.session.items():\n # If the session key is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example would be the pretix-campaigns plugin setting the campaign ID.\n if (\n not c.startswith('_auth') and\n not c.startswith('pretix_auth_') and\n not c.startswith('customer_auth_') and\n not c.startswith('current_cart_') and\n not c.startswith('cart_') and\n not c.startswith('payment_') and\n c not in ('carts', 'payment', 'pinned_user_agent')\n ):\n cache_key_parts.append(f'{c}={repr(v)}')\n\n cache_key = f'pretix.presale.views.organizer.OrganizerIndex:{hashlib.md5(\":\".join(cache_key_parts).encode()).hexdigest()}'\n cache_timeout = 15\n cache = caches[settings.CACHE_LARGE_VALUES_ALIAS]\n\n response = cache.get(cache_key)\n if response is not None:\n return response\n\n response = super().dispatch(request, *kwargs, **kwargs)\n if response.status_code >= 400:\n return response\n\n if hasattr(response, 'render') and callable(response.render):\n def _store_to_cache(r):\n cache.set(cache_key, r, cache_timeout)\n\n response.add_post_render_callback(_store_to_cache)\n else:\n cache.set(cache_key, response, cache_timeout)\n return response\n\n def get(self, request, *args, **kwargs):\n style = request.GET.get(\"style\", request.organizer.settings.event_list_type)\n if style == \"calendar\":\n cv = CalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"day\":\n cv = DayCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"week\":\n cv = WeekCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n else:\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n return self._get_event_queryset()\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n for event in ctx['events']:\n event.tzname = pytz.timezone(event.cache.get_or_set('timezone', lambda: event.settings.timezone))\n if event.has_subevents:\n event.daterange = daterange(\n event.min_from.astimezone(event.tzname),\n (event.max_fromto or event.max_to or event.max_from).astimezone(event.tzname)\n )\n return ctx\n\n\ndef has_before_after(eventqs, subeventqs, before, after):\n eqs = eventqs.filter(is_public=True, live=True, has_subevents=False)\n sqs = subeventqs.filter(active=True, is_public=True)\n return (\n eqs.filter(Q(date_from__lte=before)).exists() or sqs.filter(Q(date_from__lte=before)).exists(),\n eqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists() or sqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists()\n )\n\n\ndef add_events_for_days(request, baseqs, before, after, ebd, timezones):\n qs = baseqs.filter(is_public=True, live=True, has_subevents=False).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n if hasattr(request, 'organizer'):\n qs = filter_qs_by_attr(qs, request)\n for event in qs:\n timezones.add(event.settings.timezones)\n tz = pytz.timezone(event.settings.timezone)\n datetime_from = event.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if event.settings.show_date_to and event.date_to:\n datetime_to = event.date_to.astimezone(tz)\n date_to = event.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'event': event,\n 'continued': not first,\n 'time': datetime_from.time().replace(tzinfo=None) if first and event.settings.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and event.settings.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and event.settings.show_times\n else None\n ),\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': event,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if event.settings.show_times else None,\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n\n\ndef add_subevents_for_days(qs, before, after, ebd, timezones, event=None, cart_namespace=None, voucher=None):\n qs = qs.filter(active=True, is_public=True).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n )\n\n quotas_to_compute = []\n for se in qs:\n if se.presale_is_running:\n quotas_to_compute += se.active_quotas\n\n qcache = {}\n if quotas_to_compute:\n qa = QuotaAvailability()\n qa.queue(*quotas_to_compute)\n qa.compute(allow_cache=True)\n qcache.update(qa.results)\n\n for se in qs:\n if qcache:\n se._quota_cache = qcache\n kwargs = {'subevent': se.pk}\n if cart_namespace:\n kwargs['cart_namespace'] = cart_namespace\n\n s = event.settings if event else se.event.settings\n\n if s.event_list_available_only:\n hide = se.presale_has_ended or (\n (not voucher or not voucher.allow_ignore_quota) and\n se.best_availability_state is not None and\n se.best_availability_state < Quota.AVAILABILITY_RESERVED\n )\n if hide:\n continue\n\n timezones.add(s.timezones)\n tz = pytz.timezone(s.timezone)\n datetime_from = se.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if s.show_date_to and se.date_to:\n datetime_to = se.date_to.astimezone(tz)\n date_to = se.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'continued': not first,\n 'timezone': s.timezone,\n 'time': datetime_from.time().replace(tzinfo=None) if first and s.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and s.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and s.show_times\n else None\n ),\n 'event': se,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n )\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': se,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if s.show_times else None,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n ),\n 'timezone': s.timezone,\n })\n\n\ndef sort_ev(e):\n return e['time'] or time(0, 0, 0), str(e['event'].name)\n\n\ndef days_for_template(ebd, week):\n day_format = get_format('WEEK_DAY_FORMAT')\n if day_format == 'WEEK_DAY_FORMAT':\n day_format = 'SHORT_DATE_FORMAT'\n return [\n {\n 'day_formatted': date_format(day, day_format),\n 'date': day,\n 'today': day == now().astimezone(get_current_timezone()).date(),\n 'events': sorted(ebd.get(day), key=sort_ev) if day in ebd else []\n }\n for day in week.days()\n ]\n\n\ndef weeks_for_template(ebd, year, month):\n calendar.setfirstweekday(0) # TODO: Configurable\n return [\n [\n {\n 'day': day,\n 'date': date(year, month, day),\n 'events': (\n sorted(ebd.get(date(year, month, day)), key=sort_ev)\n if date(year, month, day) in ebd else None\n )\n }\n if day > 0\n else None\n for day in week\n ]\n for week in calendar.monthcalendar(year, month)\n ]\n\n\nclass CalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old month-year-URLs to new date-URLs\n keys = (\"month\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-%s\" % (request.GET.get(\"year\"), request.GET.get(\"month\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_month_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n try:\n _, ndays = calendar.monthrange(self.year, self.month)\n except calendar.IllegalMonthError:\n raise Http404()\n before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=UTC) - timedelta(days=1)\n after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=UTC) + timedelta(days=1)\n\n ctx['date'] = date(self.year, self.month, 1)\n ctx['before'] = before\n ctx['after'] = after\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['multiple_timezones'] = self._multiple_timezones\n ctx['weeks'] = weeks_for_template(ebd, self.year, self.month)\n ctx['months'] = [date(self.year, i + 1, 1) for i in range(12)]\n ctx['years'] = range(now().year - 2, now().year + 3)\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass WeekCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_week.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old week-year-URLs to new date-URLs\n keys = (\"week\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-W%s\" % (request.GET.get(\"year\"), request.GET.get(\"week\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_week_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n week = isoweek.Week(self.year, self.week)\n before = datetime(\n week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = week.monday()\n ctx['before'] = before\n ctx['after'] = after\n\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['days'] = days_for_template(ebd, week)\n years = (self.year - 1, self.year, self.year + 1)\n weeks = []\n for year in years:\n weeks += [\n (date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))\n for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)\n ]\n ctx['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]\n ctx['week_format'] = get_format('WEEK_FORMAT')\n if ctx['week_format'] == 'WEEK_FORMAT':\n ctx['week_format'] = WEEK_FORMAT\n ctx['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')\n if ctx['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':\n ctx['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT\n ctx['multiple_timezones'] = self._multiple_timezones\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass DayCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_day.html'\n\n def _set_date_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n date_from__gte=now(),\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n self.tz = pytz.timezone(next_ev.settings.timezone)\n self.date = datetime_from.astimezone(self.tz).date()\n else:\n self.tz = self.request.organizer.timezone\n self.date = now().astimezone(self.tz).date()\n\n def _set_date(self):\n if 'date' in self.request.GET:\n self.tz = self.request.organizer.timezone\n try:\n self.date = dateutil.parser.parse(self.request.GET.get('date')).date()\n except ValueError:\n self.date = now().astimezone(self.tz).date()\n else:\n self._set_date_to_next_event()\n\n def get(self, request, *args, **kwargs):\n self._set_date()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n before = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = self.date\n ctx['cal_tz'] = self.tz\n ctx['before'] = before\n ctx['after'] = after\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ebd = self._events_by_day(before, after)\n if not ebd[self.date]:\n return ctx\n\n events = ebd[self.date]\n shortest_duration = self._get_shortest_duration(events).total_seconds() // 60\n # pick the next biggest tick_duration based on shortest_duration, max. 180 minutes\n tick_duration = next((d for d in [5, 10, 15, 30, 60, 120, 180] if d >= shortest_duration), 180)\n\n raster_size = min(self._get_raster_size(events), tick_duration)\n events, start, end = self._rasterize_events(events, tick_duration=tick_duration, raster_size=raster_size)\n calendar_duration = self._get_time_duration(start, end)\n ctx[\"calendar_duration\"] = self._format_duration(calendar_duration)\n ctx['time_ticks'] = self._get_time_ticks(start, end, tick_duration)\n ctx['start'] = datetime.combine(self.date, start)\n ctx['raster_size'] = raster_size\n # ctx['end'] = end\n # size of each grid-column is based on shortest event duration and raster_size\n # raster_size is based on start/end times, so it could happen we have a small raster but long running events\n # raster_size will always be smaller or equals tick_duration\n ctx['raster_to_shortest_ratio'] = round((8 * raster_size) / shortest_duration)\n\n ctx['events'] = events\n\n events_by_series = self._grid_for_template(events)\n ctx['collections'] = events_by_series\n ctx['no_headlines'] = not any([series for series, events in events_by_series])\n ctx['multiple_timezones'] = self._multiple_timezones\n return ctx\n\n def _get_raster_size(self, events):\n # get best raster-size for min. # of columns in grid\n # due to grid-col-calculations in CSS raster_size cannot be bigger than 60 (minutes)\n\n # all start- and end-times (minute-part) except full hour\n times = [\n e[\"time\"].minute for e in events if e[\"time\"] and e[\"time\"].minute\n ] + [\n e[\"time_end_today\"].minute for e in events if \"time_end_today\" in e and e[\"time_end_today\"] and e[\"time_end_today\"].minute\n ]\n if not times:\n # no time other than full hour, so raster can be 1 hour/60 minutes\n return 60\n gcd = reduce(math.gcd, set(times))\n return next((d for d in [5, 10, 15, 30, 60] if d >= gcd), 60)\n\n def _get_time_duration(self, start, end):\n midnight = time(0, 0)\n return datetime.combine(\n self.date if end != midnight else self.date + timedelta(days=1),\n end\n ) - datetime.combine(\n self.date,\n start\n )\n\n def _format_duration(self, duration):\n return \":\".join([\n \"%02d\" % i for i in (\n (duration.days * 24) + (duration.seconds // 3600),\n (duration.seconds // 60) % 60\n )\n ])\n\n def _floor_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if minutes % raster_size:\n minutes = (minutes // raster_size) * raster_size\n return t.replace(hour=minutes // 60, minute=minutes % 60)\n return t\n\n def _ceil_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if not minutes % raster_size:\n return t\n minutes = math.ceil(minutes / raster_size) * raster_size\n minute = minutes % 60\n hour = minutes // 60\n if hour > 23:\n hour = hour % 24\n return t.replace(minute=minute, hour=hour)\n\n def _rasterize_events(self, events, tick_duration, raster_size=5):\n rastered_events = []\n start, end = self._get_time_range(events)\n start = self._floor_time(start, raster_size=tick_duration)\n end = self._ceil_time(end, raster_size=tick_duration)\n\n midnight = time(0, 0)\n for e in events:\n t = e[\"time\"] or time(0, 0)\n e[\"offset_shift_start\"] = 0\n if e[\"continued\"]:\n e[\"time_rastered\"] = midnight\n elif t.minute % raster_size:\n e[\"time_rastered\"] = t.replace(minute=(t.minute // raster_size) * raster_size)\n e[\"offset_shift_start\"] = t.minute % raster_size\n else:\n e[\"time_rastered\"] = t\n\n e[\"offset_shift_end\"] = 0\n if \"time_end_today\" in e and e[\"time_end_today\"]:\n if e[\"time_end_today\"].minute % raster_size:\n minute = math.ceil(e[\"time_end_today\"].minute / raster_size) * raster_size\n hour = e[\"time_end_today\"].hour\n if minute > 59:\n minute = minute % 60\n hour = (hour + 1) % 24\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"].replace(minute=minute, hour=hour)\n e[\"offset_shift_end\"] = raster_size - e[\"time_end_today\"].minute % raster_size\n else:\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"]\n else:\n e[\"time_end_today\"] = e[\"time_end_today_rastered\"] = time(0, 0)\n\n e[\"duration_rastered\"] = self._format_duration(datetime.combine(\n self.date if e[\"time_end_today_rastered\"] != midnight else self.date + timedelta(days=1),\n e[\"time_end_today_rastered\"]\n ) - datetime.combine(\n self.date,\n e['time_rastered']\n ))\n\n e[\"offset_rastered\"] = datetime.combine(self.date, time(0, 0)) + self._get_time_duration(start, e[\"time_rastered\"])\n\n rastered_events.append(e)\n\n return rastered_events, start, end\n\n def _get_shortest_duration(self, events):\n midnight = time(0, 0)\n durations = [\n datetime.combine(\n self.date if e.get('time_end_today') and e['time_end_today'] != midnight else self.date + timedelta(days=1),\n e['time_end_today'] if e.get('time_end_today') else time(0, 0)\n )\n -\n datetime.combine(\n self.date,\n time(0, 0) if e['continued'] else (e['time'] or time(0, 0))\n )\n for e in events\n ]\n return min([d for d in durations])\n\n def _get_time_range(self, events):\n if any(e['continued'] for e in events) or any(e['time'] is None for e in events):\n starting_at = time(0, 0)\n else:\n starting_at = min(e['time'] for e in events)\n\n if any(e.get('time_end_today') is None for e in events):\n ending_at = time(0, 0)\n else:\n ending_at = max(e['time_end_today'] for e in events)\n\n return starting_at, ending_at\n\n def _get_time_ticks(self, start, end, tick_duration):\n ticks = []\n tick_duration = timedelta(minutes=tick_duration)\n\n # convert time to datetime for timedelta calc\n start = datetime.combine(self.date, start)\n end = datetime.combine(self.date, end)\n if end <= start:\n end = end + timedelta(days=1)\n\n tick_start = start\n offset = datetime.utcfromtimestamp(0)\n duration = datetime.utcfromtimestamp(tick_duration.total_seconds())\n while tick_start < end:\n tick = {\n \"start\": tick_start,\n \"duration\": duration,\n \"offset\": offset,\n }\n ticks.append(tick)\n tick_start += tick_duration\n offset += tick_duration\n\n return ticks\n\n def _grid_for_template(self, events):\n midnight = time(0, 0)\n rows_by_collection = defaultdict(list)\n\n # We sort the events into \"collections\": all subevents from the same\n # event series together and all non-series events into a \"None\"\n # collection. Then, we look if there's already an event in the\n # collection that overlaps, in which case we need to split the\n # collection into multiple rows.\n for counter, e in enumerate(events):\n collection = e['event'].event if isinstance(e['event'], SubEvent) else None\n\n placed_in_row = False\n for row in rows_by_collection[collection]:\n if any(\n (e['time_rastered'] < o['time_end_today_rastered'] or o['time_end_today_rastered'] == midnight) and\n (o['time_rastered'] < e['time_end_today_rastered'] or e['time_end_today_rastered'] == midnight)\n for o in row\n ):\n continue\n row.append(e)\n placed_in_row = True\n break\n\n if not placed_in_row:\n rows_by_collection[collection].append([e])\n\n # flatten rows to one stream of events with attribute row\n # for better keyboard-tab-order in html\n for collection in rows_by_collection:\n for i, row in enumerate(rows_by_collection[collection]):\n concurrency = i + 1\n for e in row:\n e[\"concurrency\"] = concurrency\n rows_by_collection[collection] = {\n \"concurrency\": len(rows_by_collection[collection]),\n \"events\": sorted([e for row in rows_by_collection[collection] for e in row], key=lambda d: d['time'] or time(0, 0)),\n }\n\n def sort_key(c):\n collection, row = c\n if collection is None:\n return ''\n else:\n return str(collection.name)\n return sorted(rows_by_collection.items(), key=sort_key)\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\n@method_decorator(cache_page(300), name='dispatch')\nclass OrganizerIcalDownload(OrganizerViewMixin, View):\n def get(self, request, *args, **kwargs):\n cutoff = now() - timedelta(days=31)\n events = list(\n filter_qs_by_attr(\n self.request.organizer.events.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n is_public=True,\n live=True,\n has_subevents=False,\n sales_channels__contains=self.request.sales_channel.identifier,\n ),\n request\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n events += list(\n filter_qs_by_attr(\n SubEvent.objects.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n is_public=True,\n active=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n request\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n ).order_by(\n 'date_from'\n )\n )\n\n if 'locale' in request.GET and request.GET.get('locale') in dict(settings.LANGUAGES):\n with language(request.GET.get('locale'), self.request.organizer.settings.region):\n cal = get_public_ical(events)\n else:\n cal = get_public_ical(events)\n\n resp = HttpResponse(cal.serialize(), content_type='text/calendar')\n resp['Content-Disposition'] = 'attachment; filename=\"{}.ics\"'.format(\n request.organizer.slug\n )\n if request.organizer.settings.meta_noindex:\n resp['X-Robots-Tag'] = 'noindex'\n return resp\n", "path": "src/pretix/presale/views/organizer.py"}], "after_files": [{"content": "#\n# This file is part of pretix (Community Edition).\n#\n# Copyright (C) 2014-2020 Raphael Michel and contributors\n# Copyright (C) 2020-2021 rami.io GmbH and contributors\n#\n# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General\n# Public License as published by the Free Software Foundation in version 3 of the License.\n#\n# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are\n# applicable granting you additional permissions and placing additional restrictions on your usage of this software.\n# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive\n# this file, see <https://pretix.eu/about/en/license>.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Affero General Public License along with this program. If not, see\n# <https://www.gnu.org/licenses/>.\n#\n\n# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of\n# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.\n#\n# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A\n# full history of changes and contributors is available at <https://github.com/pretix/pretix>.\n#\n# This file contains Apache-licensed contributions copyrighted by: Jan Felix Wiebe, Mohit Jindal\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\nimport calendar\nimport hashlib\nimport math\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\nfrom functools import reduce\nfrom urllib.parse import quote, urlencode\n\nimport dateutil\nimport isoweek\nimport pytz\nfrom django.conf import settings\nfrom django.core.cache import caches\nfrom django.db.models import Exists, Max, Min, OuterRef, Prefetch, Q\nfrom django.db.models.functions import Coalesce, Greatest\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.formats import date_format, get_format\nfrom django.utils.timezone import get_current_timezone, now\nfrom django.views import View\nfrom django.views.decorators.cache import cache_page\nfrom django.views.generic import ListView, TemplateView\nfrom pytz import UTC\n\nfrom pretix.base.i18n import language\nfrom pretix.base.models import (\n Event, EventMetaValue, Organizer, Quota, SubEvent, SubEventMetaValue,\n)\nfrom pretix.base.services.quotas import QuotaAvailability\nfrom pretix.helpers.compat import date_fromisocalendar\nfrom pretix.helpers.daterange import daterange\nfrom pretix.helpers.formats.en.formats import (\n SHORT_MONTH_DAY_FORMAT, WEEK_FORMAT,\n)\nfrom pretix.multidomain.urlreverse import eventreverse\nfrom pretix.presale.ical import get_public_ical\nfrom pretix.presale.views import OrganizerViewMixin\n\n\ndef filter_qs_by_attr(qs, request):\n \"\"\"\n We'll allow to filter the event list using attributes defined in the event meta data\n models in the format ?attr[meta_name]=meta_value\n \"\"\"\n attrs = {}\n for i, item in enumerate(request.GET.items()):\n k, v = item\n if k.startswith(\"attr[\") and k.endswith(\"]\"):\n attrs[k[5:-1]] = v\n\n skey = 'filter_qs_by_attr_{}_{}'.format(request.organizer.pk, request.event.pk if hasattr(request, 'event') else '')\n if request.GET.get('attr_persist'):\n request.session[skey] = attrs\n elif skey in request.session:\n attrs = request.session[skey]\n\n props = {\n p.name: p for p in request.organizer.meta_properties.filter(\n name__in=attrs.keys()\n )\n }\n\n for i, item in enumerate(attrs.items()):\n attr, v = item\n emv_with_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n value=v\n )\n emv_with_any_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n )\n if qs.model == SubEvent:\n semv_with_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n value=v\n )\n semv_with_any_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n )\n\n prop = props.get(attr)\n if not prop:\n continue\n annotations = {'attr_{}'.format(i): Exists(emv_with_value)}\n if qs.model == SubEvent:\n annotations['attr_{}_sub'.format(i)] = Exists(semv_with_value)\n annotations['attr_{}_sub_any'.format(i)] = Exists(semv_with_any_value)\n filters = Q(**{'attr_{}_sub'.format(i): True})\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}'.format(i): True}))\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}_any'.format(i): False}))\n else:\n filters = Q(**{'attr_{}'.format(i): True})\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(**{'attr_{}_any'.format(i): False})\n\n qs = qs.annotate(**annotations).filter(filters)\n return qs\n\n\nclass EventListMixin:\n\n def _get_event_queryset(self):\n query = Q(is_public=True) & Q(live=True)\n qs = self.request.organizer.events.using(settings.DATABASE_REPLICA).filter(query)\n qs = qs.filter(sales_channels__contains=self.request.sales_channel.identifier)\n qs = qs.annotate(\n min_from=Min('subevents__date_from'),\n min_to=Min('subevents__date_to'),\n max_from=Max('subevents__date_from'),\n max_to=Max('subevents__date_to'),\n max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from')),\n )\n if \"old\" in self.request.GET:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__lt=now()) | Q(Q(date_to__isnull=True) & Q(date_from__lt=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(min_to__lt=now()) | Q(min_from__lt=now()))\n )\n ).annotate(\n order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),\n ).order_by('-order_to')\n else:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__gte=now()) | Q(Q(date_to__isnull=True) & Q(date_from__gte=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(max_to__gte=now()) | Q(max_from__gte=now()))\n )\n ).annotate(\n order_from=Coalesce('min_from', 'date_from'),\n ).order_by('order_from')\n qs = Event.annotated(filter_qs_by_attr(qs, self.request))\n return qs\n\n def _set_month_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_year(self):\n if 'date' in self.request.GET:\n try:\n date = dateutil.parser.isoparse(self.request.GET.get('date')).date()\n except ValueError:\n date = now().date()\n self.year = date.year\n self.month = date.month\n else:\n if hasattr(self.request, 'event'):\n self._set_month_to_next_subevent()\n else:\n self._set_month_to_next_event()\n\n def _set_week_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_year(self):\n if 'date' in self.request.GET:\n try:\n iso = dateutil.parser.isoparse(self.request.GET.get('date')).isocalendar()\n except ValueError:\n iso = now().isocalendar()\n self.year = iso[0]\n self.week = iso[1]\n else:\n if hasattr(self.request, 'event'):\n self._set_week_to_next_subevent()\n else:\n self._set_week_to_next_event()\n\n\nclass OrganizerIndex(OrganizerViewMixin, EventListMixin, ListView):\n model = Event\n context_object_name = 'events'\n template_name = 'pretixpresale/organizers/index.html'\n paginate_by = 30\n\n def dispatch(self, request, *args, **kwargs):\n # In stock pretix, nothing on this page is session-dependent except for the language and the customer login part,\n # so we can cache pretty aggressively if the user is anonymous. Note that we deliberately implement the caching\n # on the view layer, *after* all middlewares have been ran, so we have access to the computed locale, as well\n # as the login status etc.\n cache_allowed = (\n settings.CACHE_LARGE_VALUES_ALLOWED and\n not getattr(request, 'customer', None) and\n not request.user.is_authenticated\n )\n\n if not cache_allowed:\n return super().dispatch(request, *args, **kwargs)\n\n cache_key_parts = [\n request.method,\n request.host,\n str(request.organizer.pk),\n request.get_full_path(),\n request.LANGUAGE_CODE,\n self.request.sales_channel.identifier,\n ]\n for c, v in request.COOKIES.items():\n # If the cookie is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example includes plugins that e.g. store cookie banner state.\n if c not in (settings.SESSION_COOKIE_NAME, settings.LANGUAGE_COOKIE_NAME, settings.CSRF_COOKIE_NAME) and not c.startswith('__'):\n cache_key_parts.append(f'{c}={v}')\n for c, v in request.session.items():\n # If the session key is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example would be the pretix-campaigns plugin setting the campaign ID.\n if (\n not c.startswith('_auth') and\n not c.startswith('pretix_auth_') and\n not c.startswith('customer_auth_') and\n not c.startswith('current_cart_') and\n not c.startswith('cart_') and\n not c.startswith('payment_') and\n c not in ('carts', 'payment', 'pinned_user_agent')\n ):\n cache_key_parts.append(f'{c}={repr(v)}')\n\n cache_key = f'pretix.presale.views.organizer.OrganizerIndex:{hashlib.md5(\":\".join(cache_key_parts).encode()).hexdigest()}'\n cache_timeout = 15\n cache = caches[settings.CACHE_LARGE_VALUES_ALIAS]\n\n response = cache.get(cache_key)\n if response is not None:\n return response\n\n response = super().dispatch(request, *kwargs, **kwargs)\n if response.status_code >= 400:\n return response\n\n if hasattr(response, 'render') and callable(response.render):\n def _store_to_cache(r):\n cache.set(cache_key, r, cache_timeout)\n\n response.add_post_render_callback(_store_to_cache)\n else:\n cache.set(cache_key, response, cache_timeout)\n return response\n\n def get(self, request, *args, **kwargs):\n style = request.GET.get(\"style\", request.organizer.settings.event_list_type)\n if style == \"calendar\":\n cv = CalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"day\":\n cv = DayCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"week\":\n cv = WeekCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n else:\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n return self._get_event_queryset()\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n for event in ctx['events']:\n event.tzname = pytz.timezone(event.cache.get_or_set('timezone', lambda: event.settings.timezone))\n if event.has_subevents:\n event.daterange = daterange(\n event.min_from.astimezone(event.tzname),\n (event.max_fromto or event.max_to or event.max_from).astimezone(event.tzname)\n )\n return ctx\n\n\ndef has_before_after(eventqs, subeventqs, before, after):\n eqs = eventqs.filter(is_public=True, live=True, has_subevents=False)\n sqs = subeventqs.filter(active=True, is_public=True)\n return (\n eqs.filter(Q(date_from__lte=before)).exists() or sqs.filter(Q(date_from__lte=before)).exists(),\n eqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists() or sqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists()\n )\n\n\ndef add_events_for_days(request, baseqs, before, after, ebd, timezones):\n qs = baseqs.filter(is_public=True, live=True, has_subevents=False).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n if hasattr(request, 'organizer'):\n qs = filter_qs_by_attr(qs, request)\n for event in qs:\n timezones.add(event.settings.timezones)\n tz = pytz.timezone(event.settings.timezone)\n datetime_from = event.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if event.settings.show_date_to and event.date_to:\n datetime_to = event.date_to.astimezone(tz)\n date_to = event.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'event': event,\n 'continued': not first,\n 'time': datetime_from.time().replace(tzinfo=None) if first and event.settings.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and event.settings.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and event.settings.show_times\n else None\n ),\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': event,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if event.settings.show_times else None,\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n\n\ndef add_subevents_for_days(qs, before, after, ebd, timezones, event=None, cart_namespace=None, voucher=None):\n qs = qs.filter(active=True, is_public=True).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n )\n\n quotas_to_compute = []\n for se in qs:\n if se.presale_is_running:\n quotas_to_compute += se.active_quotas\n\n qcache = {}\n if quotas_to_compute:\n qa = QuotaAvailability()\n qa.queue(*quotas_to_compute)\n qa.compute(allow_cache=True)\n qcache.update(qa.results)\n\n for se in qs:\n if qcache:\n se._quota_cache = qcache\n kwargs = {'subevent': se.pk}\n if cart_namespace:\n kwargs['cart_namespace'] = cart_namespace\n\n s = event.settings if event else se.event.settings\n\n if s.event_list_available_only:\n hide = se.presale_has_ended or (\n (not voucher or not voucher.allow_ignore_quota) and\n se.best_availability_state is not None and\n se.best_availability_state < Quota.AVAILABILITY_RESERVED\n )\n if hide:\n continue\n\n timezones.add(s.timezones)\n tz = pytz.timezone(s.timezone)\n datetime_from = se.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if s.show_date_to and se.date_to:\n datetime_to = se.date_to.astimezone(tz)\n date_to = se.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'continued': not first,\n 'timezone': s.timezone,\n 'time': datetime_from.time().replace(tzinfo=None) if first and s.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and s.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and s.show_times\n else None\n ),\n 'event': se,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n )\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': se,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if s.show_times else None,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n ),\n 'timezone': s.timezone,\n })\n\n\ndef sort_ev(e):\n return e['time'] or time(0, 0, 0), str(e['event'].name)\n\n\ndef days_for_template(ebd, week):\n day_format = get_format('WEEK_DAY_FORMAT')\n if day_format == 'WEEK_DAY_FORMAT':\n day_format = 'SHORT_DATE_FORMAT'\n return [\n {\n 'day_formatted': date_format(day, day_format),\n 'date': day,\n 'today': day == now().astimezone(get_current_timezone()).date(),\n 'events': sorted(ebd.get(day), key=sort_ev) if day in ebd else []\n }\n for day in week.days()\n ]\n\n\ndef weeks_for_template(ebd, year, month):\n calendar.setfirstweekday(0) # TODO: Configurable\n return [\n [\n {\n 'day': day,\n 'date': date(year, month, day),\n 'events': (\n sorted(ebd.get(date(year, month, day)), key=sort_ev)\n if date(year, month, day) in ebd else None\n )\n }\n if day > 0\n else None\n for day in week\n ]\n for week in calendar.monthcalendar(year, month)\n ]\n\n\nclass CalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old month-year-URLs to new date-URLs\n keys = (\"month\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-%s\" % (request.GET.get(\"year\"), request.GET.get(\"month\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_month_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n try:\n _, ndays = calendar.monthrange(self.year, self.month)\n except calendar.IllegalMonthError:\n raise Http404()\n before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=UTC) - timedelta(days=1)\n after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=UTC) + timedelta(days=1)\n\n ctx['date'] = date(self.year, self.month, 1)\n ctx['before'] = before\n ctx['after'] = after\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['multiple_timezones'] = self._multiple_timezones\n ctx['weeks'] = weeks_for_template(ebd, self.year, self.month)\n ctx['months'] = [date(self.year, i + 1, 1) for i in range(12)]\n ctx['years'] = range(now().year - 2, now().year + 3)\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass WeekCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_week.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old week-year-URLs to new date-URLs\n keys = (\"week\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-W%s\" % (request.GET.get(\"year\"), request.GET.get(\"week\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_week_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n week = isoweek.Week(self.year, self.week)\n before = datetime(\n week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = week.monday()\n ctx['before'] = before\n ctx['after'] = after\n\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['days'] = days_for_template(ebd, week)\n years = (self.year - 1, self.year, self.year + 1)\n weeks = []\n for year in years:\n weeks += [\n (date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))\n for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)\n ]\n ctx['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]\n ctx['week_format'] = get_format('WEEK_FORMAT')\n if ctx['week_format'] == 'WEEK_FORMAT':\n ctx['week_format'] = WEEK_FORMAT\n ctx['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')\n if ctx['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':\n ctx['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT\n ctx['multiple_timezones'] = self._multiple_timezones\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass DayCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_day.html'\n\n def _set_date_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n date_from__gte=now(),\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n self.tz = pytz.timezone(next_ev.settings.timezone)\n self.date = datetime_from.astimezone(self.tz).date()\n else:\n self.tz = self.request.organizer.timezone\n self.date = now().astimezone(self.tz).date()\n\n def _set_date(self):\n if 'date' in self.request.GET:\n self.tz = self.request.organizer.timezone\n try:\n self.date = dateutil.parser.parse(self.request.GET.get('date')).date()\n except ValueError:\n self.date = now().astimezone(self.tz).date()\n else:\n self._set_date_to_next_event()\n\n def get(self, request, *args, **kwargs):\n self._set_date()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n before = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = self.date\n ctx['cal_tz'] = self.tz\n ctx['before'] = before\n ctx['after'] = after\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ebd = self._events_by_day(before, after)\n if not ebd[self.date]:\n return ctx\n\n events = ebd[self.date]\n shortest_duration = self._get_shortest_duration(events).total_seconds() // 60\n # pick the next biggest tick_duration based on shortest_duration, max. 180 minutes\n tick_duration = next((d for d in [5, 10, 15, 30, 60, 120, 180] if d >= shortest_duration), 180)\n\n raster_size = min(self._get_raster_size(events), tick_duration)\n events, start, end = self._rasterize_events(events, tick_duration=tick_duration, raster_size=raster_size)\n calendar_duration = self._get_time_duration(start, end)\n ctx[\"calendar_duration\"] = self._format_duration(calendar_duration)\n ctx['time_ticks'] = self._get_time_ticks(start, end, tick_duration)\n ctx['start'] = datetime.combine(self.date, start)\n ctx['raster_size'] = raster_size\n # ctx['end'] = end\n # size of each grid-column is based on shortest event duration and raster_size\n # raster_size is based on start/end times, so it could happen we have a small raster but long running events\n # raster_size will always be smaller or equals tick_duration\n ctx['raster_to_shortest_ratio'] = round((8 * raster_size) / shortest_duration)\n\n ctx['events'] = events\n\n events_by_series = self._grid_for_template(events)\n ctx['collections'] = events_by_series\n ctx['no_headlines'] = not any([series for series, events in events_by_series])\n ctx['multiple_timezones'] = self._multiple_timezones\n return ctx\n\n def _get_raster_size(self, events):\n # get best raster-size for min. # of columns in grid\n # due to grid-col-calculations in CSS raster_size cannot be bigger than 60 (minutes)\n\n # all start- and end-times (minute-part) except full hour\n times = [\n e[\"time\"].minute for e in events if e[\"time\"] and e[\"time\"].minute\n ] + [\n e[\"time_end_today\"].minute for e in events if \"time_end_today\" in e and e[\"time_end_today\"] and e[\"time_end_today\"].minute\n ]\n if not times:\n # no time other than full hour, so raster can be 1 hour/60 minutes\n return 60\n gcd = reduce(math.gcd, set(times))\n return next((d for d in [5, 10, 15, 30, 60] if d >= gcd), 60)\n\n def _get_time_duration(self, start, end):\n midnight = time(0, 0)\n return datetime.combine(\n self.date if end != midnight else self.date + timedelta(days=1),\n end\n ) - datetime.combine(\n self.date,\n start\n )\n\n def _format_duration(self, duration):\n return \":\".join([\n \"%02d\" % i for i in (\n (duration.days * 24) + (duration.seconds // 3600),\n (duration.seconds // 60) % 60\n )\n ])\n\n def _floor_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if minutes % raster_size:\n minutes = (minutes // raster_size) * raster_size\n return t.replace(hour=minutes // 60, minute=minutes % 60)\n return t\n\n def _ceil_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if not minutes % raster_size:\n return t\n minutes = math.ceil(minutes / raster_size) * raster_size\n minute = minutes % 60\n hour = minutes // 60\n if hour > 23:\n hour = hour % 24\n return t.replace(minute=minute, hour=hour)\n\n def _rasterize_events(self, events, tick_duration, raster_size=5):\n rastered_events = []\n start, end = self._get_time_range(events)\n start = self._floor_time(start, raster_size=tick_duration)\n end = self._ceil_time(end, raster_size=tick_duration)\n\n midnight = time(0, 0)\n for e in events:\n t = e[\"time\"] or time(0, 0)\n e[\"offset_shift_start\"] = 0\n if e[\"continued\"]:\n e[\"time_rastered\"] = midnight\n elif t.minute % raster_size:\n e[\"time_rastered\"] = t.replace(minute=(t.minute // raster_size) * raster_size)\n e[\"offset_shift_start\"] = t.minute % raster_size\n else:\n e[\"time_rastered\"] = t\n\n e[\"offset_shift_end\"] = 0\n if \"time_end_today\" in e and e[\"time_end_today\"]:\n if e[\"time_end_today\"].minute % raster_size:\n minute = math.ceil(e[\"time_end_today\"].minute / raster_size) * raster_size\n hour = e[\"time_end_today\"].hour\n if minute > 59:\n minute = minute % 60\n hour = (hour + 1) % 24\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"].replace(minute=minute, hour=hour)\n e[\"offset_shift_end\"] = raster_size - e[\"time_end_today\"].minute % raster_size\n else:\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"]\n else:\n e[\"time_end_today\"] = e[\"time_end_today_rastered\"] = time(0, 0)\n\n e[\"duration_rastered\"] = self._format_duration(datetime.combine(\n self.date if e[\"time_end_today_rastered\"] != midnight else self.date + timedelta(days=1),\n e[\"time_end_today_rastered\"]\n ) - datetime.combine(\n self.date,\n e['time_rastered']\n ))\n\n e[\"offset_rastered\"] = datetime.combine(self.date, time(0, 0)) + self._get_time_duration(start, e[\"time_rastered\"])\n\n rastered_events.append(e)\n\n return rastered_events, start, end\n\n def _get_shortest_duration(self, events):\n midnight = time(0, 0)\n durations = [\n datetime.combine(\n self.date if e.get('time_end_today') and e['time_end_today'] != midnight else self.date + timedelta(days=1),\n e['time_end_today'] if e.get('time_end_today') else time(0, 0)\n )\n -\n datetime.combine(\n self.date,\n time(0, 0) if e['continued'] else (e['time'] or time(0, 0))\n )\n for e in events\n ]\n return min([d for d in durations])\n\n def _get_time_range(self, events):\n if any(e['continued'] for e in events) or any(e['time'] is None for e in events):\n starting_at = time(0, 0)\n else:\n starting_at = min(e['time'] for e in events)\n\n if any(e.get('time_end_today') is None for e in events):\n ending_at = time(0, 0)\n else:\n ending_at = max(e['time_end_today'] for e in events)\n\n return starting_at, ending_at\n\n def _get_time_ticks(self, start, end, tick_duration):\n ticks = []\n tick_duration = timedelta(minutes=tick_duration)\n\n # convert time to datetime for timedelta calc\n start = datetime.combine(self.date, start)\n end = datetime.combine(self.date, end)\n if end <= start:\n end = end + timedelta(days=1)\n\n tick_start = start\n offset = datetime.utcfromtimestamp(0)\n duration = datetime.utcfromtimestamp(tick_duration.total_seconds())\n while tick_start < end:\n tick = {\n \"start\": tick_start,\n \"duration\": duration,\n \"offset\": offset,\n }\n ticks.append(tick)\n tick_start += tick_duration\n offset += tick_duration\n\n return ticks\n\n def _grid_for_template(self, events):\n midnight = time(0, 0)\n rows_by_collection = defaultdict(list)\n\n # We sort the events into \"collections\": all subevents from the same\n # event series together and all non-series events into a \"None\"\n # collection. Then, we look if there's already an event in the\n # collection that overlaps, in which case we need to split the\n # collection into multiple rows.\n for counter, e in enumerate(events):\n collection = e['event'].event if isinstance(e['event'], SubEvent) else None\n\n placed_in_row = False\n for row in rows_by_collection[collection]:\n if any(\n (e['time_rastered'] < o['time_end_today_rastered'] or o['time_end_today_rastered'] == midnight) and\n (o['time_rastered'] < e['time_end_today_rastered'] or e['time_end_today_rastered'] == midnight)\n for o in row\n ):\n continue\n row.append(e)\n placed_in_row = True\n break\n\n if not placed_in_row:\n rows_by_collection[collection].append([e])\n\n # flatten rows to one stream of events with attribute row\n # for better keyboard-tab-order in html\n for collection in rows_by_collection:\n for i, row in enumerate(rows_by_collection[collection]):\n concurrency = i + 1\n for e in row:\n e[\"concurrency\"] = concurrency\n rows_by_collection[collection] = {\n \"concurrency\": len(rows_by_collection[collection]),\n \"events\": sorted([e for row in rows_by_collection[collection] for e in row], key=lambda d: d['time'] or time(0, 0)),\n }\n\n def sort_key(c):\n collection, row = c\n if collection is None:\n return ''\n else:\n return str(collection.name)\n return sorted(rows_by_collection.items(), key=sort_key)\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\n@method_decorator(cache_page(300), name='dispatch')\nclass OrganizerIcalDownload(OrganizerViewMixin, View):\n def get(self, request, *args, **kwargs):\n cutoff = now() - timedelta(days=31)\n events = list(\n filter_qs_by_attr(\n self.request.organizer.events.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n is_public=True,\n live=True,\n has_subevents=False,\n sales_channels__contains=self.request.sales_channel.identifier,\n ),\n request\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n events += list(\n filter_qs_by_attr(\n SubEvent.objects.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n is_public=True,\n active=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n request\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n ).order_by(\n 'date_from'\n )\n )\n\n if 'locale' in request.GET and request.GET.get('locale') in dict(settings.LANGUAGES):\n with language(request.GET.get('locale'), self.request.organizer.settings.region):\n cal = get_public_ical(events)\n else:\n cal = get_public_ical(events)\n\n resp = HttpResponse(cal.serialize(), content_type='text/calendar')\n resp['Content-Disposition'] = 'attachment; filename=\"{}.ics\"'.format(\n request.organizer.slug\n )\n if request.organizer.settings.meta_noindex:\n resp['X-Robots-Tag'] = 'noindex'\n return resp\n", "path": "src/pretix/presale/views/organizer.py"}]} |
gh_patches_debug_1471 | rasdani/github-patches | git_diff | getsentry__sentry-21581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hardcoded MAX_RETRIES = 1
https://github.com/getsentry/sentry/blob/master/src/sentry/tasks/deletion.py#L18
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/tasks/deletion.py`
Content:
```
1 from __future__ import absolute_import
2
3 from uuid import uuid4
4
5 from django.apps import apps
6 from django.conf import settings
7 from django.db import transaction
8 from django.utils import timezone
9
10 from sentry.constants import ObjectStatus
11 from sentry.exceptions import DeleteAborted
12 from sentry.signals import pending_delete
13 from sentry.tasks.base import instrumented_task, retry, track_group_async_operation
14
15 # in prod we run with infinite retries to recover from errors
16 # in debug/development, we assume these tasks generally shouldn't fail
17 MAX_RETRIES = 1 if settings.DEBUG else None
18 MAX_RETRIES = 1
19
20
21 @instrumented_task(name="sentry.tasks.deletion.run_scheduled_deletions", queue="cleanup")
22 def run_scheduled_deletions():
23 from sentry.models import ScheduledDeletion
24
25 queryset = ScheduledDeletion.objects.filter(
26 in_progress=False, aborted=False, date_scheduled__lte=timezone.now()
27 )
28 for item in queryset:
29 with transaction.atomic():
30 affected = ScheduledDeletion.objects.filter(
31 id=item.id, in_progress=False, aborted=False
32 ).update(in_progress=True)
33 if not affected:
34 continue
35
36 run_deletion.delay(deletion_id=item.id)
37
38
39 @instrumented_task(
40 name="sentry.tasks.deletion.run_deletion",
41 queue="cleanup",
42 default_retry_delay=60 * 5,
43 max_retries=MAX_RETRIES,
44 )
45 @retry(exclude=(DeleteAborted,))
46 def run_deletion(deletion_id):
47 from sentry import deletions
48 from sentry.models import ScheduledDeletion
49
50 try:
51 deletion = ScheduledDeletion.objects.get(id=deletion_id)
52 except ScheduledDeletion.DoesNotExist:
53 return
54
55 if deletion.aborted:
56 raise DeleteAborted
57
58 if not deletion.in_progress:
59 actor = deletion.get_actor()
60 instance = deletion.get_instance()
61 with transaction.atomic():
62 deletion.update(in_progress=True)
63 pending_delete.send(sender=type(instance), instance=instance, actor=actor)
64
65 task = deletions.get(
66 model=deletion.get_model(),
67 query={"id": deletion.object_id},
68 transaction_id=deletion.guid,
69 actor_id=deletion.actor_id,
70 )
71 has_more = task.chunk()
72 if has_more:
73 run_deletion.apply_async(kwargs={"deletion_id": deletion_id}, countdown=15)
74 deletion.delete()
75
76
77 @instrumented_task(
78 name="sentry.tasks.deletion.revoke_api_tokens",
79 queue="cleanup",
80 default_retry_delay=60 * 5,
81 max_retries=MAX_RETRIES,
82 )
83 @retry(exclude=(DeleteAborted,))
84 def revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):
85 from sentry.models import ApiToken
86
87 queryset = ApiToken.objects.filter(application=object_id)
88 if timestamp:
89 queryset = queryset.filter(date_added__lte=timestamp)
90
91 # we're using a slow deletion strategy to avoid a lot of custom code for
92 # postgres
93 has_more = False
94 for obj in queryset[:1000]:
95 obj.delete()
96 has_more = True
97
98 if has_more:
99 revoke_api_tokens.apply_async(
100 kwargs={
101 "object_id": object_id,
102 "transaction_id": transaction_id,
103 "timestamp": timestamp,
104 },
105 countdown=15,
106 )
107 return has_more
108
109
110 @instrumented_task(
111 name="sentry.tasks.deletion.delete_organization",
112 queue="cleanup",
113 default_retry_delay=60 * 5,
114 max_retries=MAX_RETRIES,
115 )
116 @retry(exclude=(DeleteAborted,))
117 def delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):
118 from sentry import deletions
119 from sentry.models import Organization, OrganizationStatus
120
121 try:
122 instance = Organization.objects.get(id=object_id)
123 except Organization.DoesNotExist:
124 return
125
126 if instance.status == OrganizationStatus.VISIBLE:
127 raise DeleteAborted
128
129 # compat: can be removed after we switch to scheduled deletions
130 if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:
131 pending_delete.send(sender=type(instance), instance=instance)
132
133 task = deletions.get(
134 model=Organization,
135 query={"id": object_id},
136 transaction_id=transaction_id or uuid4().hex,
137 actor_id=actor_id,
138 )
139 has_more = task.chunk()
140 if has_more:
141 delete_organization.apply_async(
142 kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
143 countdown=15,
144 )
145
146
147 @instrumented_task(
148 name="sentry.tasks.deletion.delete_team",
149 queue="cleanup",
150 default_retry_delay=60 * 5,
151 max_retries=MAX_RETRIES,
152 )
153 @retry(exclude=(DeleteAborted,))
154 def delete_team(object_id, transaction_id=None, **kwargs):
155 from sentry import deletions
156 from sentry.models import Team, TeamStatus
157
158 try:
159 instance = Team.objects.get(id=object_id)
160 except Team.DoesNotExist:
161 return
162
163 if instance.status == TeamStatus.VISIBLE:
164 raise DeleteAborted
165
166 task = deletions.get(
167 model=Team, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
168 )
169 has_more = task.chunk()
170 if has_more:
171 delete_team.apply_async(
172 kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
173 )
174
175
176 @instrumented_task(
177 name="sentry.tasks.deletion.delete_project",
178 queue="cleanup",
179 default_retry_delay=60 * 5,
180 max_retries=MAX_RETRIES,
181 )
182 @retry(exclude=(DeleteAborted,))
183 def delete_project(object_id, transaction_id=None, **kwargs):
184 from sentry import deletions
185 from sentry.models import Project, ProjectStatus
186
187 try:
188 instance = Project.objects.get(id=object_id)
189 except Project.DoesNotExist:
190 return
191
192 if instance.status == ProjectStatus.VISIBLE:
193 raise DeleteAborted
194
195 task = deletions.get(
196 model=Project, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
197 )
198 has_more = task.chunk()
199 if has_more:
200 delete_project.apply_async(
201 kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
202 )
203
204
205 @instrumented_task(
206 name="sentry.tasks.deletion.delete_groups",
207 queue="cleanup",
208 default_retry_delay=60 * 5,
209 max_retries=MAX_RETRIES,
210 )
211 @retry(exclude=(DeleteAborted,))
212 @track_group_async_operation
213 def delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):
214 from sentry import deletions, eventstream
215 from sentry.models import Group
216
217 transaction_id = transaction_id or uuid4().hex
218
219 max_batch_size = 100
220 current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]
221
222 task = deletions.get(
223 model=Group, query={"id__in": current_batch}, transaction_id=transaction_id
224 )
225 has_more = task.chunk()
226 if has_more or rest:
227 delete_groups.apply_async(
228 kwargs={
229 "object_ids": object_ids if has_more else rest,
230 "transaction_id": transaction_id,
231 "eventstream_state": eventstream_state,
232 },
233 countdown=15,
234 )
235 else:
236 # all groups have been deleted
237 if eventstream_state:
238 eventstream.end_delete_groups(eventstream_state)
239
240
241 @instrumented_task(
242 name="sentry.tasks.deletion.delete_api_application",
243 queue="cleanup",
244 default_retry_delay=60 * 5,
245 max_retries=MAX_RETRIES,
246 )
247 @retry(exclude=(DeleteAborted,))
248 def delete_api_application(object_id, transaction_id=None, **kwargs):
249 from sentry import deletions
250 from sentry.models import ApiApplication, ApiApplicationStatus
251
252 try:
253 instance = ApiApplication.objects.get(id=object_id)
254 except ApiApplication.DoesNotExist:
255 return
256
257 if instance.status == ApiApplicationStatus.active:
258 raise DeleteAborted
259
260 task = deletions.get(
261 model=ApiApplication, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
262 )
263 has_more = task.chunk()
264 if has_more:
265 delete_api_application.apply_async(
266 kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
267 )
268
269
270 @instrumented_task(
271 name="sentry.tasks.deletion.generic_delete",
272 queue="cleanup",
273 default_retry_delay=60 * 5,
274 max_retries=MAX_RETRIES,
275 )
276 @retry(exclude=(DeleteAborted,))
277 def generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):
278 from sentry import deletions
279 from sentry.models import User
280
281 model = apps.get_model(app_label, model_name)
282
283 try:
284 instance = model.objects.get(id=object_id)
285 except model.DoesNotExist:
286 return
287
288 if instance.status != ObjectStatus.DELETION_IN_PROGRESS:
289 pending_delete.send(
290 sender=type(instance),
291 instance=instance,
292 actor=User.objects.get(id=actor_id) if actor_id else None,
293 )
294
295 if instance.status == ObjectStatus.VISIBLE:
296 raise DeleteAborted
297
298 task = deletions.get(
299 model=model,
300 actor_id=actor_id,
301 query={"id": object_id},
302 transaction_id=transaction_id or uuid4().hex,
303 )
304 has_more = task.chunk()
305 if has_more:
306 generic_delete.apply_async(
307 kwargs={
308 "app_label": app_label,
309 "model_name": model_name,
310 "object_id": object_id,
311 "transaction_id": transaction_id,
312 "actor_id": actor_id,
313 },
314 countdown=15,
315 )
316
317
318 @instrumented_task(
319 name="sentry.tasks.deletion.delete_repository",
320 queue="cleanup",
321 default_retry_delay=60 * 5,
322 max_retries=MAX_RETRIES,
323 )
324 @retry(exclude=(DeleteAborted,))
325 def delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):
326 from sentry import deletions
327 from sentry.models import Repository, User
328
329 try:
330 instance = Repository.objects.get(id=object_id)
331 except Repository.DoesNotExist:
332 return
333
334 if instance.status == ObjectStatus.VISIBLE:
335 raise DeleteAborted
336
337 # compat: can be removed after we switch to scheduled deletions
338 if instance.status != ObjectStatus.DELETION_IN_PROGRESS:
339 pending_delete.send(
340 sender=type(instance),
341 instance=instance,
342 actor=User.objects.get(id=actor_id) if actor_id else None,
343 )
344
345 task = deletions.get(
346 model=Repository,
347 actor_id=actor_id,
348 query={"id": object_id},
349 transaction_id=transaction_id or uuid4().hex,
350 )
351 has_more = task.chunk()
352 if has_more:
353 delete_repository.apply_async(
354 kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
355 countdown=15,
356 )
357
358
359 @instrumented_task(
360 name="sentry.tasks.deletion.delete_organization_integration",
361 queue="cleanup",
362 default_retry_delay=60 * 5,
363 max_retries=MAX_RETRIES,
364 )
365 @retry(exclude=(DeleteAborted,))
366 def delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):
367 from sentry import deletions
368 from sentry.models import OrganizationIntegration, Repository
369
370 try:
371 instance = OrganizationIntegration.objects.get(id=object_id)
372 except OrganizationIntegration.DoesNotExist:
373 return
374
375 if instance.status == ObjectStatus.VISIBLE:
376 raise DeleteAborted
377
378 # dissociate repos from that integration
379 Repository.objects.filter(
380 organization_id=instance.organization_id, integration_id=instance.integration_id
381 ).update(integration_id=None)
382
383 task = deletions.get(
384 model=OrganizationIntegration,
385 actor_id=actor_id,
386 query={"id": object_id},
387 transaction_id=transaction_id or uuid4().hex,
388 )
389 has_more = task.chunk()
390 if has_more:
391 delete_organization_integration.apply_async(
392 kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
393 countdown=15,
394 )
395
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/tasks/deletion.py b/src/sentry/tasks/deletion.py
--- a/src/sentry/tasks/deletion.py
+++ b/src/sentry/tasks/deletion.py
@@ -14,8 +14,7 @@
# in prod we run with infinite retries to recover from errors
# in debug/development, we assume these tasks generally shouldn't fail
-MAX_RETRIES = 1 if settings.DEBUG else None
-MAX_RETRIES = 1
+MAX_RETRIES = 1 if settings.DEBUG else 5
@instrumented_task(name="sentry.tasks.deletion.run_scheduled_deletions", queue="cleanup")
| {"golden_diff": "diff --git a/src/sentry/tasks/deletion.py b/src/sentry/tasks/deletion.py\n--- a/src/sentry/tasks/deletion.py\n+++ b/src/sentry/tasks/deletion.py\n@@ -14,8 +14,7 @@\n \n # in prod we run with infinite retries to recover from errors\n # in debug/development, we assume these tasks generally shouldn't fail\n-MAX_RETRIES = 1 if settings.DEBUG else None\n-MAX_RETRIES = 1\n+MAX_RETRIES = 1 if settings.DEBUG else 5\n \n \n @instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\n", "issue": "Hardcoded MAX_RETRIES = 1\nhttps://github.com/getsentry/sentry/blob/master/src/sentry/tasks/deletion.py#L18\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom sentry.constants import ObjectStatus\nfrom sentry.exceptions import DeleteAborted\nfrom sentry.signals import pending_delete\nfrom sentry.tasks.base import instrumented_task, retry, track_group_async_operation\n\n# in prod we run with infinite retries to recover from errors\n# in debug/development, we assume these tasks generally shouldn't fail\nMAX_RETRIES = 1 if settings.DEBUG else None\nMAX_RETRIES = 1\n\n\n@instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\ndef run_scheduled_deletions():\n from sentry.models import ScheduledDeletion\n\n queryset = ScheduledDeletion.objects.filter(\n in_progress=False, aborted=False, date_scheduled__lte=timezone.now()\n )\n for item in queryset:\n with transaction.atomic():\n affected = ScheduledDeletion.objects.filter(\n id=item.id, in_progress=False, aborted=False\n ).update(in_progress=True)\n if not affected:\n continue\n\n run_deletion.delay(deletion_id=item.id)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.run_deletion\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef run_deletion(deletion_id):\n from sentry import deletions\n from sentry.models import ScheduledDeletion\n\n try:\n deletion = ScheduledDeletion.objects.get(id=deletion_id)\n except ScheduledDeletion.DoesNotExist:\n return\n\n if deletion.aborted:\n raise DeleteAborted\n\n if not deletion.in_progress:\n actor = deletion.get_actor()\n instance = deletion.get_instance()\n with transaction.atomic():\n deletion.update(in_progress=True)\n pending_delete.send(sender=type(instance), instance=instance, actor=actor)\n\n task = deletions.get(\n model=deletion.get_model(),\n query={\"id\": deletion.object_id},\n transaction_id=deletion.guid,\n actor_id=deletion.actor_id,\n )\n has_more = task.chunk()\n if has_more:\n run_deletion.apply_async(kwargs={\"deletion_id\": deletion_id}, countdown=15)\n deletion.delete()\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.revoke_api_tokens\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):\n from sentry.models import ApiToken\n\n queryset = ApiToken.objects.filter(application=object_id)\n if timestamp:\n queryset = queryset.filter(date_added__lte=timestamp)\n\n # we're using a slow deletion strategy to avoid a lot of custom code for\n # postgres\n has_more = False\n for obj in queryset[:1000]:\n obj.delete()\n has_more = True\n\n if has_more:\n revoke_api_tokens.apply_async(\n kwargs={\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"timestamp\": timestamp,\n },\n countdown=15,\n )\n return has_more\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Organization, OrganizationStatus\n\n try:\n instance = Organization.objects.get(id=object_id)\n except Organization.DoesNotExist:\n return\n\n if instance.status == OrganizationStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:\n pending_delete.send(sender=type(instance), instance=instance)\n\n task = deletions.get(\n model=Organization,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n actor_id=actor_id,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_team\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_team(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Team, TeamStatus\n\n try:\n instance = Team.objects.get(id=object_id)\n except Team.DoesNotExist:\n return\n\n if instance.status == TeamStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Team, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_team.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_project\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_project(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Project, ProjectStatus\n\n try:\n instance = Project.objects.get(id=object_id)\n except Project.DoesNotExist:\n return\n\n if instance.status == ProjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Project, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_project.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_groups\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\n@track_group_async_operation\ndef delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):\n from sentry import deletions, eventstream\n from sentry.models import Group\n\n transaction_id = transaction_id or uuid4().hex\n\n max_batch_size = 100\n current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]\n\n task = deletions.get(\n model=Group, query={\"id__in\": current_batch}, transaction_id=transaction_id\n )\n has_more = task.chunk()\n if has_more or rest:\n delete_groups.apply_async(\n kwargs={\n \"object_ids\": object_ids if has_more else rest,\n \"transaction_id\": transaction_id,\n \"eventstream_state\": eventstream_state,\n },\n countdown=15,\n )\n else:\n # all groups have been deleted\n if eventstream_state:\n eventstream.end_delete_groups(eventstream_state)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_api_application\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_api_application(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import ApiApplication, ApiApplicationStatus\n\n try:\n instance = ApiApplication.objects.get(id=object_id)\n except ApiApplication.DoesNotExist:\n return\n\n if instance.status == ApiApplicationStatus.active:\n raise DeleteAborted\n\n task = deletions.get(\n model=ApiApplication, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_api_application.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.generic_delete\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import User\n\n model = apps.get_model(app_label, model_name)\n\n try:\n instance = model.objects.get(id=object_id)\n except model.DoesNotExist:\n return\n\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=model,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n generic_delete.apply_async(\n kwargs={\n \"app_label\": app_label,\n \"model_name\": model_name,\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"actor_id\": actor_id,\n },\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_repository\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Repository, User\n\n try:\n instance = Repository.objects.get(id=object_id)\n except Repository.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n task = deletions.get(\n model=Repository,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_repository.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization_integration\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import OrganizationIntegration, Repository\n\n try:\n instance = OrganizationIntegration.objects.get(id=object_id)\n except OrganizationIntegration.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # dissociate repos from that integration\n Repository.objects.filter(\n organization_id=instance.organization_id, integration_id=instance.integration_id\n ).update(integration_id=None)\n\n task = deletions.get(\n model=OrganizationIntegration,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization_integration.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n", "path": "src/sentry/tasks/deletion.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom sentry.constants import ObjectStatus\nfrom sentry.exceptions import DeleteAborted\nfrom sentry.signals import pending_delete\nfrom sentry.tasks.base import instrumented_task, retry, track_group_async_operation\n\n# in prod we run with infinite retries to recover from errors\n# in debug/development, we assume these tasks generally shouldn't fail\nMAX_RETRIES = 1 if settings.DEBUG else 5\n\n\n@instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\ndef run_scheduled_deletions():\n from sentry.models import ScheduledDeletion\n\n queryset = ScheduledDeletion.objects.filter(\n in_progress=False, aborted=False, date_scheduled__lte=timezone.now()\n )\n for item in queryset:\n with transaction.atomic():\n affected = ScheduledDeletion.objects.filter(\n id=item.id, in_progress=False, aborted=False\n ).update(in_progress=True)\n if not affected:\n continue\n\n run_deletion.delay(deletion_id=item.id)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.run_deletion\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef run_deletion(deletion_id):\n from sentry import deletions\n from sentry.models import ScheduledDeletion\n\n try:\n deletion = ScheduledDeletion.objects.get(id=deletion_id)\n except ScheduledDeletion.DoesNotExist:\n return\n\n if deletion.aborted:\n raise DeleteAborted\n\n if not deletion.in_progress:\n actor = deletion.get_actor()\n instance = deletion.get_instance()\n with transaction.atomic():\n deletion.update(in_progress=True)\n pending_delete.send(sender=type(instance), instance=instance, actor=actor)\n\n task = deletions.get(\n model=deletion.get_model(),\n query={\"id\": deletion.object_id},\n transaction_id=deletion.guid,\n actor_id=deletion.actor_id,\n )\n has_more = task.chunk()\n if has_more:\n run_deletion.apply_async(kwargs={\"deletion_id\": deletion_id}, countdown=15)\n deletion.delete()\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.revoke_api_tokens\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):\n from sentry.models import ApiToken\n\n queryset = ApiToken.objects.filter(application=object_id)\n if timestamp:\n queryset = queryset.filter(date_added__lte=timestamp)\n\n # we're using a slow deletion strategy to avoid a lot of custom code for\n # postgres\n has_more = False\n for obj in queryset[:1000]:\n obj.delete()\n has_more = True\n\n if has_more:\n revoke_api_tokens.apply_async(\n kwargs={\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"timestamp\": timestamp,\n },\n countdown=15,\n )\n return has_more\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Organization, OrganizationStatus\n\n try:\n instance = Organization.objects.get(id=object_id)\n except Organization.DoesNotExist:\n return\n\n if instance.status == OrganizationStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:\n pending_delete.send(sender=type(instance), instance=instance)\n\n task = deletions.get(\n model=Organization,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n actor_id=actor_id,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_team\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_team(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Team, TeamStatus\n\n try:\n instance = Team.objects.get(id=object_id)\n except Team.DoesNotExist:\n return\n\n if instance.status == TeamStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Team, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_team.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_project\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_project(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Project, ProjectStatus\n\n try:\n instance = Project.objects.get(id=object_id)\n except Project.DoesNotExist:\n return\n\n if instance.status == ProjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Project, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_project.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_groups\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\n@track_group_async_operation\ndef delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):\n from sentry import deletions, eventstream\n from sentry.models import Group\n\n transaction_id = transaction_id or uuid4().hex\n\n max_batch_size = 100\n current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]\n\n task = deletions.get(\n model=Group, query={\"id__in\": current_batch}, transaction_id=transaction_id\n )\n has_more = task.chunk()\n if has_more or rest:\n delete_groups.apply_async(\n kwargs={\n \"object_ids\": object_ids if has_more else rest,\n \"transaction_id\": transaction_id,\n \"eventstream_state\": eventstream_state,\n },\n countdown=15,\n )\n else:\n # all groups have been deleted\n if eventstream_state:\n eventstream.end_delete_groups(eventstream_state)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_api_application\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_api_application(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import ApiApplication, ApiApplicationStatus\n\n try:\n instance = ApiApplication.objects.get(id=object_id)\n except ApiApplication.DoesNotExist:\n return\n\n if instance.status == ApiApplicationStatus.active:\n raise DeleteAborted\n\n task = deletions.get(\n model=ApiApplication, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_api_application.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.generic_delete\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import User\n\n model = apps.get_model(app_label, model_name)\n\n try:\n instance = model.objects.get(id=object_id)\n except model.DoesNotExist:\n return\n\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=model,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n generic_delete.apply_async(\n kwargs={\n \"app_label\": app_label,\n \"model_name\": model_name,\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"actor_id\": actor_id,\n },\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_repository\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Repository, User\n\n try:\n instance = Repository.objects.get(id=object_id)\n except Repository.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n task = deletions.get(\n model=Repository,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_repository.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization_integration\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import OrganizationIntegration, Repository\n\n try:\n instance = OrganizationIntegration.objects.get(id=object_id)\n except OrganizationIntegration.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # dissociate repos from that integration\n Repository.objects.filter(\n organization_id=instance.organization_id, integration_id=instance.integration_id\n ).update(integration_id=None)\n\n task = deletions.get(\n model=OrganizationIntegration,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization_integration.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n", "path": "src/sentry/tasks/deletion.py"}]} |
gh_patches_debug_1472 | rasdani/github-patches | git_diff | oppia__oppia-1713 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add an OutputContains rule to the CodeRepl interaction.
We've had a request to add an OutputContains rule to the CodeRepl interaction.
The use case is as follows: the student will type in the body of a function, and their code will be checked by calling the function on several inputs and printing the results. We don't want to stop the student from printing their own stuff from the function first, though, hence the idea of checking to see whether a substring of the student's output matches the expected output.
Note that this is a straightforward starter project. The files to modify are extensions/interactions/CodeRepl/CodeRepl.js (see codeReplRulesService) and the corresponding test suite in extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js.
/cc @anuzis
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `extensions/rules/code_evaluation.py`
Content:
```
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Rules for CodeEvaluation objects."""
18
19 from extensions.rules import base
20
21
22 class CodeEquals(base.CodeEvaluationRule):
23 description = 'has code equal to {{x|CodeString}}'
24
25
26 class CodeContains(base.CodeEvaluationRule):
27 description = 'has code that contains {{x|CodeString}}'
28
29
30 class CodeDoesNotContain(base.CodeEvaluationRule):
31 description = 'has code that does not contain {{x|CodeString}}'
32
33
34 class OutputEquals(base.CodeEvaluationRule):
35 description = 'has output equal to {{x|CodeString}}'
36
37
38 class ResultsInError(base.CodeEvaluationRule):
39 description = 'results in an error when run'
40
41
42 class ErrorContains(base.CodeEvaluationRule):
43 description = (
44 'has error message that contains {{x|UnicodeString}}')
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/extensions/rules/code_evaluation.py b/extensions/rules/code_evaluation.py
--- a/extensions/rules/code_evaluation.py
+++ b/extensions/rules/code_evaluation.py
@@ -30,6 +30,8 @@
class CodeDoesNotContain(base.CodeEvaluationRule):
description = 'has code that does not contain {{x|CodeString}}'
+class OutputContains(base.CodeEvaluationRule):
+ description = 'has output that contains {{x|CodeString}}'
class OutputEquals(base.CodeEvaluationRule):
description = 'has output equal to {{x|CodeString}}'
| {"golden_diff": "diff --git a/extensions/rules/code_evaluation.py b/extensions/rules/code_evaluation.py\n--- a/extensions/rules/code_evaluation.py\n+++ b/extensions/rules/code_evaluation.py\n@@ -30,6 +30,8 @@\n class CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n \n+class OutputContains(base.CodeEvaluationRule):\n+ description = 'has output that contains {{x|CodeString}}'\n \n class OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n", "issue": "Add an OutputContains rule to the CodeRepl interaction.\nWe've had a request to add an OutputContains rule to the CodeRepl interaction.\n\nThe use case is as follows: the student will type in the body of a function, and their code will be checked by calling the function on several inputs and printing the results. We don't want to stop the student from printing their own stuff from the function first, though, hence the idea of checking to see whether a substring of the student's output matches the expected output.\n\nNote that this is a straightforward starter project. The files to modify are extensions/interactions/CodeRepl/CodeRepl.js (see codeReplRulesService) and the corresponding test suite in extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js.\n\n/cc @anuzis \n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rules for CodeEvaluation objects.\"\"\"\n\nfrom extensions.rules import base\n\n\nclass CodeEquals(base.CodeEvaluationRule):\n description = 'has code equal to {{x|CodeString}}'\n\n\nclass CodeContains(base.CodeEvaluationRule):\n description = 'has code that contains {{x|CodeString}}'\n\n\nclass CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n\n\nclass OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n\n\nclass ResultsInError(base.CodeEvaluationRule):\n description = 'results in an error when run'\n\n\nclass ErrorContains(base.CodeEvaluationRule):\n description = (\n 'has error message that contains {{x|UnicodeString}}')\n", "path": "extensions/rules/code_evaluation.py"}], "after_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rules for CodeEvaluation objects.\"\"\"\n\nfrom extensions.rules import base\n\n\nclass CodeEquals(base.CodeEvaluationRule):\n description = 'has code equal to {{x|CodeString}}'\n\n\nclass CodeContains(base.CodeEvaluationRule):\n description = 'has code that contains {{x|CodeString}}'\n\n\nclass CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n\nclass OutputContains(base.CodeEvaluationRule):\n description = 'has output that contains {{x|CodeString}}'\n\nclass OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n\n\nclass ResultsInError(base.CodeEvaluationRule):\n description = 'results in an error when run'\n\n\nclass ErrorContains(base.CodeEvaluationRule):\n description = (\n 'has error message that contains {{x|UnicodeString}}')\n", "path": "extensions/rules/code_evaluation.py"}]} |
gh_patches_debug_1473 | rasdani/github-patches | git_diff | optuna__optuna-56 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incompatibility with old versions of SQLAlchemy.
Connecting to PostgreSQL fails with old versions of SQLAlchemy raising an error: `sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name`. This error is resolved once sqlalchemy version is updated.
For example:
```python
>>> import sqlalchemy
>>> sqlalchemy.__version__
'1.0.13'
>>> from pfnopt.storages import RDBStorage
>>> RDBStorage(url='postgresql://pfnopt:somepassword@localhost:5432/some_db')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/sano/PycharmProjects/pfnopt/pfnopt/storages/rdb.py", line 85, in __init__
Base.metadata.create_all(self.engine)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/schema.py", line 3695, in create_all
tables=tables)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 1856, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 1481, in _run_visitor
**kwargs).traverse_single(element)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py", line 121, in traverse_single
return meth(obj, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 720, in visit_metadata
_ddl_runner=self)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/event/attr.py", line 256, in __call__
fn(*args, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/util/langhelpers.py", line 546, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/sqltypes.py", line 1040, in _on_metadata_create
t._on_metadata_create(target, bind, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1379, in _on_metadata_create
self.create(bind=bind, checkfirst=checkfirst)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1317, in create
bind.execute(CreateEnumType(self))
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 914, in execute
return meth(self, multiparams, params)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 962, in _execute_ddl
compiled = ddl.compile(dialect=dialect)
File "<string>", line 1, in <lambda>
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/elements.py", line 494, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 26, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py", line 190, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py", line 213, in process
return obj._compiler_dispatch(self, **kwargs)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py", line 81, in _compiler_dispatch
return meth(self, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1613, in visit_create_enum_type
self.preparer.format_type(type_),
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1857, in format_type
raise exc.CompileError("Postgresql ENUM type requires a name.")
sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from setuptools import find_packages
3 from setuptools import setup
4 import sys
5
6
7 def get_version():
8 version_filepath = os.path.join(os.path.dirname(__file__), 'pfnopt', 'version.py')
9 with open(version_filepath) as f:
10 for line in f:
11 if line.startswith('__version__'):
12 return line.strip().split()[-1][1:-1]
13 assert False
14
15
16 tests_require = ['pytest', 'hacking', 'mock']
17 if sys.version_info[0] == 3:
18 tests_require.append('mypy')
19
20
21 setup(
22 name='pfnopt',
23 version=get_version(),
24 description='',
25 author='Takuya Akiba',
26 author_email='[email protected]',
27 packages=find_packages(),
28 install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],
29 tests_require=tests_require,
30 extras_require={'testing': tests_require},
31 entry_points={
32 'console_scripts': ['pfnopt = pfnopt.cli:main'],
33 'pfnopt.command': ['mkstudy = pfnopt.cli:MakeStudy']
34 }
35 )
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@
author='Takuya Akiba',
author_email='[email protected]',
packages=find_packages(),
- install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],
+ install_requires=['sqlalchemy>=1.1.0', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],
tests_require=tests_require,
extras_require={'testing': tests_require},
entry_points={
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,7 +25,7 @@\n author='Takuya Akiba',\n author_email='[email protected]',\n packages=find_packages(),\n- install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n+ install_requires=['sqlalchemy>=1.1.0', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n tests_require=tests_require,\n extras_require={'testing': tests_require},\n entry_points={\n", "issue": "Incompatibility with old versions of SQLAlchemy.\nConnecting to PostgreSQL fails with old versions of SQLAlchemy raising an error: `sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name`. This error is resolved once sqlalchemy version is updated.\r\n\r\nFor example:\r\n```python\r\n>>> import sqlalchemy\r\n>>> sqlalchemy.__version__\r\n'1.0.13'\r\n>>> from pfnopt.storages import RDBStorage\r\n>>> RDBStorage(url='postgresql://pfnopt:somepassword@localhost:5432/some_db')\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/sano/PycharmProjects/pfnopt/pfnopt/storages/rdb.py\", line 85, in __init__\r\n Base.metadata.create_all(self.engine)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/schema.py\", line 3695, in create_all\r\n tables=tables)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 1856, in _run_visitor\r\n conn._run_visitor(visitorcallable, element, **kwargs)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 1481, in _run_visitor\r\n **kwargs).traverse_single(element)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py\", line 121, in traverse_single\r\n return meth(obj, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py\", line 720, in visit_metadata\r\n _ddl_runner=self)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/event/attr.py\", line 256, in __call__\r\n fn(*args, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/util/langhelpers.py\", line 546, in __call__\r\n return getattr(self.target, self.name)(*arg, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/sqltypes.py\", line 1040, in _on_metadata_create\r\n t._on_metadata_create(target, bind, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1379, in _on_metadata_create\r\n self.create(bind=bind, checkfirst=checkfirst)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1317, in create\r\n bind.execute(CreateEnumType(self))\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 914, in execute\r\n return meth(self, multiparams, params)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py\", line 68, in _execute_on_connection\r\n return connection._execute_ddl(self, multiparams, params)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 962, in _execute_ddl\r\n compiled = ddl.compile(dialect=dialect)\r\n File \"<string>\", line 1, in <lambda>\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/elements.py\", line 494, in compile\r\n return self._compiler(dialect, bind=bind, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py\", line 26, in _compiler\r\n return dialect.ddl_compiler(dialect, self, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py\", line 190, in __init__\r\n self.string = self.process(self.statement, **compile_kwargs)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py\", line 213, in process\r\n return obj._compiler_dispatch(self, **kwargs)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py\", line 81, in _compiler_dispatch\r\n return meth(self, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1613, in visit_create_enum_type\r\n self.preparer.format_type(type_),\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1857, in format_type\r\n raise exc.CompileError(\"Postgresql ENUM type requires a name.\")\r\nsqlalchemy.exc.CompileError: Postgresql ENUM type requires a name.\r\n```\n", "before_files": [{"content": "import os\nfrom setuptools import find_packages\nfrom setuptools import setup\nimport sys\n\n\ndef get_version():\n version_filepath = os.path.join(os.path.dirname(__file__), 'pfnopt', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ntests_require = ['pytest', 'hacking', 'mock']\nif sys.version_info[0] == 3:\n tests_require.append('mypy')\n\n\nsetup(\n name='pfnopt',\n version=get_version(),\n description='',\n author='Takuya Akiba',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n tests_require=tests_require,\n extras_require={'testing': tests_require},\n entry_points={\n 'console_scripts': ['pfnopt = pfnopt.cli:main'],\n 'pfnopt.command': ['mkstudy = pfnopt.cli:MakeStudy']\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom setuptools import find_packages\nfrom setuptools import setup\nimport sys\n\n\ndef get_version():\n version_filepath = os.path.join(os.path.dirname(__file__), 'pfnopt', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ntests_require = ['pytest', 'hacking', 'mock']\nif sys.version_info[0] == 3:\n tests_require.append('mypy')\n\n\nsetup(\n name='pfnopt',\n version=get_version(),\n description='',\n author='Takuya Akiba',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=['sqlalchemy>=1.1.0', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n tests_require=tests_require,\n extras_require={'testing': tests_require},\n entry_points={\n 'console_scripts': ['pfnopt = pfnopt.cli:main'],\n 'pfnopt.command': ['mkstudy = pfnopt.cli:MakeStudy']\n }\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1474 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Source Interface requests fail with 500 error, due to session issue
## Description
In some situations, requests to the source interface may start to fail, returning the 500 error page. Once the 500 errors start, they continue until the Tor Browser cache is cleared, either explicitly or by starting a new browser session. With source error logging enabled, the following errors are seen on failing requests:
```
[Thu Apr 18 09:46:09.516056 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] [2019-04-18 09:46:09,510] ERROR in app: Exception on / [GET]
[Thu Apr 18 09:46:09.516238 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] Traceback (most recent call last):
[Thu Apr 18 09:46:09.516279 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
[Thu Apr 18 09:46:09.516317 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] response = self.full_dispatch_request()
[Thu Apr 18 09:46:09.516363 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
[Thu Apr 18 09:46:09.516442 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.handle_user_exception(e)
[Thu Apr 18 09:46:09.516479 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
[Thu Apr 18 09:46:09.516514 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] reraise(exc_type, exc_value, tb)
[Thu Apr 18 09:46:09.516549 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1811, in full_dispatch_request
[Thu Apr 18 09:46:09.516584 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.preprocess_request()
[Thu Apr 18 09:46:09.516619 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2087, in preprocess_request
[Thu Apr 18 09:46:09.516654 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = func()
[Thu Apr 18 09:46:09.516688 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/decorators.py", line 23, in decorated_function
[Thu Apr 18 09:46:09.516724 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return f(*args, **kwargs)
[Thu Apr 18 09:46:09.516758 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/__init__.py", line 159, in setup_g
[Thu Apr 18 09:46:09.516793 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] g.codename = session['codename']
[Thu Apr 18 09:46:09.516828 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 377, in <lambda>
[Thu Apr 18 09:46:09.516864 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] __getitem__ = lambda x, i: x._get_current_object()[i]
[Thu Apr 18 09:46:09.516899 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/sessions.py", line 83, in __getitem__
[Thu Apr 18 09:46:09.516933 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return super(SecureCookieSession, self).__getitem__(key)
[Thu Apr 18 09:46:09.516968 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] KeyError: 'codename'
```
## Steps to Reproduce
This error was initially hit by:
1) starting a source interface session on a 0.12.1 Xenial install
2) updating the 0.12.1 Xenial install to 0.12.2~rc1 via cron-apt
3) running a 0.12.1 db restore against the 0.12.2 database
4) attempting to continue the source session.
It's also been reproduced during a test session by creating multiple sources and logging in and out repeatedly (h/t @eloquence ), but is not reliably reproducible.
## Expected Behavior
Source Interface requests for valid URLs return the correct results.
## Actual Behavior
SI requests all return 500 errors.
## Comments
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/main.py`
Content:
```
1 import operator
2 import os
3 import io
4 import six
5
6 from datetime import datetime
7 from flask import (Blueprint, render_template, flash, redirect, url_for, g,
8 session, current_app, request, Markup, abort)
9 from flask_babel import gettext
10 from sqlalchemy.exc import IntegrityError
11
12 import store
13
14 from db import db
15 from models import Source, Submission, Reply, get_one_or_else
16 from source_app.decorators import login_required
17 from source_app.utils import (logged_in, generate_unique_codename,
18 async_genkey, normalize_timestamps,
19 valid_codename, get_entropy_estimate)
20 from source_app.forms import LoginForm
21
22
23 def make_blueprint(config):
24 view = Blueprint('main', __name__)
25
26 @view.route('/')
27 def index():
28 return render_template('index.html')
29
30 @view.route('/generate', methods=('GET', 'POST'))
31 def generate():
32 if logged_in():
33 flash(gettext(
34 "You were redirected because you are already logged in. "
35 "If you want to create a new account, you should log out "
36 "first."),
37 "notification")
38 return redirect(url_for('.lookup'))
39
40 codename = generate_unique_codename(config)
41 session['codename'] = codename
42 session['new_user'] = True
43 return render_template('generate.html', codename=codename)
44
45 @view.route('/org-logo')
46 def select_logo():
47 if os.path.exists(os.path.join(current_app.static_folder, 'i',
48 'custom_logo.png')):
49 return redirect(url_for('static', filename='i/custom_logo.png'))
50 else:
51 return redirect(url_for('static', filename='i/logo.png'))
52
53 @view.route('/create', methods=['POST'])
54 def create():
55 filesystem_id = current_app.crypto_util.hash_codename(
56 session['codename'])
57
58 source = Source(filesystem_id, current_app.crypto_util.display_id())
59 db.session.add(source)
60 try:
61 db.session.commit()
62 except IntegrityError as e:
63 db.session.rollback()
64 current_app.logger.error(
65 "Attempt to create a source with duplicate codename: %s" %
66 (e,))
67
68 # Issue 2386: don't log in on duplicates
69 del session['codename']
70 abort(500)
71 else:
72 os.mkdir(current_app.storage.path(filesystem_id))
73
74 session['logged_in'] = True
75 return redirect(url_for('.lookup'))
76
77 @view.route('/lookup', methods=('GET',))
78 @login_required
79 def lookup():
80 replies = []
81 source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \
82 .filter(Reply.deleted_by_source == False).all() # noqa
83
84 for reply in source_inbox:
85 reply_path = current_app.storage.path(
86 g.filesystem_id,
87 reply.filename,
88 )
89 try:
90 with io.open(reply_path, "rb") as f:
91 contents = f.read()
92 reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
93 if six.PY2: # Python2
94 reply.decrypted = reply_obj.decode('utf-8')
95 else:
96 reply.decrypted = reply_obj
97 except UnicodeDecodeError:
98 current_app.logger.error("Could not decode reply %s" %
99 reply.filename)
100 else:
101 reply.date = datetime.utcfromtimestamp(
102 os.stat(reply_path).st_mtime)
103 replies.append(reply)
104
105 # Sort the replies by date
106 replies.sort(key=operator.attrgetter('date'), reverse=True)
107
108 # Generate a keypair to encrypt replies from the journalist
109 # Only do this if the journalist has flagged the source as one
110 # that they would like to reply to. (Issue #140.)
111 if not current_app.crypto_util.getkey(g.filesystem_id) and \
112 g.source.flagged:
113 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']
114 async_genkey(current_app.crypto_util,
115 db_uri,
116 g.filesystem_id,
117 g.codename)
118
119 return render_template(
120 'lookup.html',
121 codename=g.codename,
122 replies=replies,
123 flagged=g.source.flagged,
124 new_user=session.get('new_user', None),
125 haskey=current_app.crypto_util.getkey(
126 g.filesystem_id))
127
128 @view.route('/submit', methods=('POST',))
129 @login_required
130 def submit():
131 msg = request.form['msg']
132 fh = None
133 if 'fh' in request.files:
134 fh = request.files['fh']
135
136 # Don't submit anything if it was an "empty" submission. #878
137 if not (msg or fh):
138 flash(gettext(
139 "You must enter a message or choose a file to submit."),
140 "error")
141 return redirect(url_for('main.lookup'))
142
143 fnames = []
144 journalist_filename = g.source.journalist_filename
145 first_submission = g.source.interaction_count == 0
146
147 if msg:
148 g.source.interaction_count += 1
149 fnames.append(
150 current_app.storage.save_message_submission(
151 g.filesystem_id,
152 g.source.interaction_count,
153 journalist_filename,
154 msg))
155 if fh:
156 g.source.interaction_count += 1
157 fnames.append(
158 current_app.storage.save_file_submission(
159 g.filesystem_id,
160 g.source.interaction_count,
161 journalist_filename,
162 fh.filename,
163 fh.stream))
164
165 if first_submission:
166 msg = render_template('first_submission_flashed_message.html')
167 flash(Markup(msg), "success")
168
169 else:
170 if msg and not fh:
171 html_contents = gettext('Thanks! We received your message.')
172 elif not msg and fh:
173 html_contents = gettext('Thanks! We received your document.')
174 else:
175 html_contents = gettext('Thanks! We received your message and '
176 'document.')
177
178 msg = render_template('next_submission_flashed_message.html',
179 html_contents=html_contents)
180 flash(Markup(msg), "success")
181
182 new_submissions = []
183 for fname in fnames:
184 submission = Submission(g.source, fname)
185 db.session.add(submission)
186 new_submissions.append(submission)
187
188 if g.source.pending:
189 g.source.pending = False
190
191 # Generate a keypair now, if there's enough entropy (issue #303)
192 # (gpg reads 300 bytes from /dev/random)
193 entropy_avail = get_entropy_estimate()
194 if entropy_avail >= 2400:
195 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']
196
197 async_genkey(current_app.crypto_util,
198 db_uri,
199 g.filesystem_id,
200 g.codename)
201 current_app.logger.info("generating key, entropy: {}".format(
202 entropy_avail))
203 else:
204 current_app.logger.warn(
205 "skipping key generation. entropy: {}".format(
206 entropy_avail))
207
208 g.source.last_updated = datetime.utcnow()
209 db.session.commit()
210
211 for sub in new_submissions:
212 store.async_add_checksum_for_file(sub)
213
214 normalize_timestamps(g.filesystem_id)
215
216 return redirect(url_for('main.lookup'))
217
218 @view.route('/delete', methods=('POST',))
219 @login_required
220 def delete():
221 """This deletes the reply from the source's inbox, but preserves
222 the history for journalists such that they can view conversation
223 history.
224 """
225
226 query = Reply.query.filter_by(
227 filename=request.form['reply_filename'],
228 source_id=g.source.id)
229 reply = get_one_or_else(query, current_app.logger, abort)
230 reply.deleted_by_source = True
231 db.session.add(reply)
232 db.session.commit()
233
234 flash(gettext("Reply deleted"), "notification")
235 return redirect(url_for('.lookup'))
236
237 @view.route('/delete-all', methods=('POST',))
238 @login_required
239 def batch_delete():
240 replies = Reply.query.filter(Reply.source_id == g.source.id) \
241 .filter(Reply.deleted_by_source == False).all() # noqa
242 if len(replies) == 0:
243 current_app.logger.error("Found no replies when at least one was "
244 "expected")
245 return redirect(url_for('.lookup'))
246
247 for reply in replies:
248 reply.deleted_by_source = True
249 db.session.add(reply)
250 db.session.commit()
251
252 flash(gettext("All replies have been deleted"), "notification")
253 return redirect(url_for('.lookup'))
254
255 @view.route('/login', methods=('GET', 'POST'))
256 def login():
257 form = LoginForm()
258 if form.validate_on_submit():
259 codename = request.form['codename'].strip()
260 if valid_codename(codename):
261 session.update(codename=codename, logged_in=True)
262 return redirect(url_for('.lookup', from_login='1'))
263 else:
264 current_app.logger.info(
265 "Login failed for invalid codename")
266 flash(gettext("Sorry, that is not a recognized codename."),
267 "error")
268 return render_template('login.html', form=form)
269
270 @view.route('/logout')
271 def logout():
272 if logged_in():
273 msg = render_template('logout_flashed_message.html')
274
275 # Clear the session after we render the message so it's localized
276 # If a user specified a locale, save it and restore it
277 user_locale = g.locale
278 session.clear()
279 session['locale'] = user_locale
280
281 flash(Markup(msg), "important hide-if-not-tor-browser")
282 return redirect(url_for('.index'))
283
284 return view
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py
--- a/securedrop/source_app/main.py
+++ b/securedrop/source_app/main.py
@@ -67,6 +67,13 @@
# Issue 2386: don't log in on duplicates
del session['codename']
+
+ # Issue 4361: Delete 'logged_in' if it's in the session
+ try:
+ del session['logged_in']
+ except KeyError:
+ pass
+
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
| {"golden_diff": "diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py\n--- a/securedrop/source_app/main.py\n+++ b/securedrop/source_app/main.py\n@@ -67,6 +67,13 @@\n \n # Issue 2386: don't log in on duplicates\n del session['codename']\n+\n+ # Issue 4361: Delete 'logged_in' if it's in the session\n+ try:\n+ del session['logged_in']\n+ except KeyError:\n+ pass\n+\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n", "issue": "Source Interface requests fail with 500 error, due to session issue\n## Description\r\n\r\nIn some situations, requests to the source interface may start to fail, returning the 500 error page. Once the 500 errors start, they continue until the Tor Browser cache is cleared, either explicitly or by starting a new browser session. With source error logging enabled, the following errors are seen on failing requests:\r\n\r\n```\r\n[Thu Apr 18 09:46:09.516056 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] [2019-04-18 09:46:09,510] ERROR in app: Exception on / [GET]\r\n[Thu Apr 18 09:46:09.516238 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] Traceback (most recent call last):\r\n[Thu Apr 18 09:46:09.516279 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 2292, in wsgi_app\r\n[Thu Apr 18 09:46:09.516317 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] response = self.full_dispatch_request()\r\n[Thu Apr 18 09:46:09.516363 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1815, in full_dispatch_request\r\n[Thu Apr 18 09:46:09.516442 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.handle_user_exception(e)\r\n[Thu Apr 18 09:46:09.516479 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1718, in handle_user_exception\r\n[Thu Apr 18 09:46:09.516514 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] reraise(exc_type, exc_value, tb)\r\n[Thu Apr 18 09:46:09.516549 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1811, in full_dispatch_request\r\n[Thu Apr 18 09:46:09.516584 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.preprocess_request()\r\n[Thu Apr 18 09:46:09.516619 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 2087, in preprocess_request\r\n[Thu Apr 18 09:46:09.516654 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = func()\r\n[Thu Apr 18 09:46:09.516688 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/var/www/securedrop/source_app/decorators.py\", line 23, in decorated_function\r\n[Thu Apr 18 09:46:09.516724 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return f(*args, **kwargs)\r\n[Thu Apr 18 09:46:09.516758 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/var/www/securedrop/source_app/__init__.py\", line 159, in setup_g\r\n[Thu Apr 18 09:46:09.516793 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] g.codename = session['codename']\r\n[Thu Apr 18 09:46:09.516828 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/usr/local/lib/python2.7/dist-packages/werkzeug/local.py\", line 377, in <lambda>\r\n[Thu Apr 18 09:46:09.516864 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] __getitem__ = lambda x, i: x._get_current_object()[i]\r\n[Thu Apr 18 09:46:09.516899 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File \"/usr/local/lib/python2.7/dist-packages/flask/sessions.py\", line 83, in __getitem__\r\n[Thu Apr 18 09:46:09.516933 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return super(SecureCookieSession, self).__getitem__(key)\r\n[Thu Apr 18 09:46:09.516968 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] KeyError: 'codename'\r\n```\r\n\r\n## Steps to Reproduce\r\nThis error was initially hit by:\r\n1) starting a source interface session on a 0.12.1 Xenial install\r\n2) updating the 0.12.1 Xenial install to 0.12.2~rc1 via cron-apt\r\n3) running a 0.12.1 db restore against the 0.12.2 database\r\n4) attempting to continue the source session.\r\n\r\nIt's also been reproduced during a test session by creating multiple sources and logging in and out repeatedly (h/t @eloquence ), but is not reliably reproducible.\r\n\r\n## Expected Behavior\r\nSource Interface requests for valid URLs return the correct results.\r\n\r\n## Actual Behavior\r\nSI requests all return 500 errors.\r\n\r\n## Comments\r\n\r\n\n", "before_files": [{"content": "import operator\nimport os\nimport io\nimport six\n\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n session['codename'] = codename\n session['new_user'] = True\n return render_template('generate.html', codename=codename)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n filesystem_id = current_app.crypto_util.hash_codename(\n session['codename'])\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n if six.PY2: # Python2\n reply.decrypted = reply_obj.decode('utf-8')\n else:\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.getkey(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.getkey(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n msg = request.form['msg']\n fh = None\n if 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n if logged_in():\n msg = render_template('logout_flashed_message.html')\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n flash(Markup(msg), \"important hide-if-not-tor-browser\")\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}], "after_files": [{"content": "import operator\nimport os\nimport io\nimport six\n\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n session['codename'] = codename\n session['new_user'] = True\n return render_template('generate.html', codename=codename)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n filesystem_id = current_app.crypto_util.hash_codename(\n session['codename'])\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n\n # Issue 4361: Delete 'logged_in' if it's in the session\n try:\n del session['logged_in']\n except KeyError:\n pass\n\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n if six.PY2: # Python2\n reply.decrypted = reply_obj.decode('utf-8')\n else:\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.getkey(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.getkey(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n msg = request.form['msg']\n fh = None\n if 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n if logged_in():\n msg = render_template('logout_flashed_message.html')\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n flash(Markup(msg), \"important hide-if-not-tor-browser\")\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}]} |
gh_patches_debug_1475 | rasdani/github-patches | git_diff | microsoft__ptvsd-926 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make --host a required switch
`--host` is currently optional, and defaults to `localhost`. The old behavior was to default to `0.0.0.0`, which is not a particularly sane default. However, the new default makes things confusing, since it is applied silently - things just work differently. Changing the switch to be explicit solves that problem, while also forcing the user to consider the security implications of either choice.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/__main__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import argparse
6 import os.path
7 import sys
8
9 from ptvsd._attach import attach_main
10 from ptvsd._local import debug_main, run_main
11 from ptvsd.socket import Address
12 from ptvsd.version import __version__, __author__ # noqa
13
14
15 ##################################
16 # the script
17
18 """
19 For the PyDevd CLI handling see:
20
21 https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py
22 https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)
23 """ # noqa
24
25 PYDEVD_OPTS = {
26 '--file',
27 '--vm_type',
28 }
29
30 PYDEVD_FLAGS = {
31 '--DEBUG',
32 '--DEBUG_RECORD_SOCKET_READS',
33 '--cmd-line',
34 '--module',
35 '--multiproc',
36 '--multiprocess',
37 '--print-in-debugger-startup',
38 '--save-signatures',
39 '--save-threading',
40 '--save-asyncio',
41 '--server',
42 '--qt-support=auto',
43 }
44
45 USAGE = """
46 {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]
47 {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]
48 {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID
49 """ # noqa
50
51
52 def parse_args(argv=None):
53 """Return the parsed args to use in main()."""
54 if argv is None:
55 argv = sys.argv
56 prog = argv[0]
57 if prog == __file__:
58 prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))
59 else:
60 prog = argv[0]
61 argv = argv[1:]
62
63 supported, pydevd, script = _group_args(argv)
64 args = _parse_args(prog, supported)
65 # '--' is used in _run_args to extract pydevd specific args
66 extra = pydevd + ['--']
67 if script:
68 extra += script
69 return args, extra
70
71
72 def _group_args(argv):
73 supported = []
74 pydevd = []
75 script = []
76
77 try:
78 pos = argv.index('--')
79 except ValueError:
80 script = []
81 else:
82 script = argv[pos + 1:]
83 argv = argv[:pos]
84
85 for arg in argv:
86 if arg == '-h' or arg == '--help':
87 return argv, [], script
88
89 gottarget = False
90 skip = 0
91 for i in range(len(argv)):
92 if skip:
93 skip -= 1
94 continue
95
96 arg = argv[i]
97 try:
98 nextarg = argv[i + 1]
99 except IndexError:
100 nextarg = None
101
102 # TODO: Deprecate the PyDevd arg support.
103 # PyDevd support
104 if gottarget:
105 script = argv[i:] + script
106 break
107 if arg == '--file':
108 if nextarg is None: # The filename is missing...
109 pydevd.append(arg)
110 continue # This will get handled later.
111 if nextarg.endswith(':') and '--module' in pydevd:
112 pydevd.remove('--module')
113 arg = '-m'
114 argv[i + 1] = nextarg = nextarg[:-1]
115 else:
116 arg = nextarg
117 skip += 1
118
119 if arg in PYDEVD_OPTS:
120 pydevd.append(arg)
121 if nextarg is not None:
122 pydevd.append(nextarg)
123 skip += 1
124 elif arg in PYDEVD_FLAGS:
125 pydevd.append(arg)
126 elif arg == '--nodebug':
127 supported.append(arg)
128
129 # ptvsd support
130 elif arg in ('--host', '--port', '--pid', '-m'):
131 if arg == '-m' or arg == '--pid':
132 gottarget = True
133 supported.append(arg)
134 if nextarg is not None:
135 supported.append(nextarg)
136 skip += 1
137 elif arg in ('--single-session', '--wait', '--client'):
138 supported.append(arg)
139 elif not arg.startswith('-'):
140 supported.append(arg)
141 gottarget = True
142
143 # unsupported arg
144 else:
145 supported.append(arg)
146 break
147
148 return supported, pydevd, script
149
150
151 def _parse_args(prog, argv):
152 parser = argparse.ArgumentParser(
153 prog=prog,
154 usage=USAGE.format(prog),
155 )
156
157 parser.add_argument('--nodebug', action='store_true')
158 parser.add_argument('--client', action='store_true')
159
160 parser.add_argument('--host')
161 parser.add_argument('--port', type=int, required=True)
162
163 target = parser.add_mutually_exclusive_group(required=True)
164 target.add_argument('-m', dest='module')
165 target.add_argument('--pid', type=int)
166 target.add_argument('filename', nargs='?')
167
168 parser.add_argument('--single-session', action='store_true')
169 parser.add_argument('--wait', action='store_true')
170
171 parser.add_argument('-V', '--version', action='version')
172 parser.version = __version__
173
174 args = parser.parse_args(argv)
175 ns = vars(args)
176
177 host = ns.pop('host', None)
178 port = ns.pop('port')
179 client = ns.pop('client')
180 args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa
181
182 pid = ns.pop('pid')
183 module = ns.pop('module')
184 filename = ns.pop('filename')
185 if pid is not None:
186 args.name = pid
187 args.kind = 'pid'
188 elif module is not None:
189 args.name = module
190 args.kind = 'module'
191 else:
192 args.name = filename
193 args.kind = 'script'
194
195 return args
196
197
198 def handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):
199 if kind == 'pid':
200 attach_main(addr, name, *extra, **kwargs)
201 elif nodebug:
202 run_main(addr, name, kind, *extra, **kwargs)
203 else:
204 debug_main(addr, name, kind, *extra, **kwargs)
205
206
207 def main(argv=None):
208 args, extra = parse_args(argv)
209 handle_args(args.address, args.name, args.kind, extra,
210 nodebug=args.nodebug, singlesession=args.single_session,
211 wait=args.wait)
212
213
214 if __name__ == '__main__':
215 main()
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py
--- a/ptvsd/__main__.py
+++ b/ptvsd/__main__.py
@@ -157,7 +157,7 @@
parser.add_argument('--nodebug', action='store_true')
parser.add_argument('--client', action='store_true')
- parser.add_argument('--host')
+ parser.add_argument('--host', required=True)
parser.add_argument('--port', type=int, required=True)
target = parser.add_mutually_exclusive_group(required=True)
| {"golden_diff": "diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py\n--- a/ptvsd/__main__.py\n+++ b/ptvsd/__main__.py\n@@ -157,7 +157,7 @@\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n \n- parser.add_argument('--host')\n+ parser.add_argument('--host', required=True)\n parser.add_argument('--port', type=int, required=True)\n \n target = parser.add_mutually_exclusive_group(required=True)\n", "issue": "Make --host a required switch\n`--host` is currently optional, and defaults to `localhost`. The old behavior was to default to `0.0.0.0`, which is not a particularly sane default. However, the new default makes things confusing, since it is applied silently - things just work differently. Changing the switch to be explicit solves that problem, while also forcing the user to consider the security implications of either choice.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n\n parser.add_argument('--host')\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n host = ns.pop('host', None)\n port = ns.pop('port')\n client = ns.pop('client')\n args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n", "path": "ptvsd/__main__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n\n parser.add_argument('--host', required=True)\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n host = ns.pop('host', None)\n port = ns.pop('port')\n client = ns.pop('client')\n args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n", "path": "ptvsd/__main__.py"}]} |
gh_patches_debug_1476 | rasdani/github-patches | git_diff | ResonantGeoData__ResonantGeoData-470 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Proper testing for rgd_client
We need to implement real tests for the Python client
The tests for this would require running RGD with data prepopulated in the background then executing the client tests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rgd-client/rgd_client/rgdc.py`
Content:
```
1 from base64 import b64encode
2 from dataclasses import dataclass
3 import getpass
4 from pathlib import Path
5 import tempfile
6 from typing import Dict, Iterator, List, Optional, Tuple, Union
7
8 from tqdm import tqdm
9
10 from .session import RgdcSession
11 from .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE
12 from .utils import (
13 DEFAULT_RGD_API,
14 download_checksum_file_to_path,
15 limit_offset_pager,
16 spatial_search_params,
17 spatial_subentry_id,
18 )
19
20
21 @dataclass
22 class RasterDownload:
23 path: Path
24 images: List[Path]
25 ancillary: List[Path]
26
27
28 class Rgdc:
29 def __init__(
30 self,
31 api_url: str = DEFAULT_RGD_API,
32 username: Optional[str] = None,
33 password: Optional[str] = None,
34 ):
35 """
36 Initialize a RGD Client.
37
38 Args:
39 api_url: The base url of the RGD API instance.
40 username: The username to authenticate to the instance with, if any.
41 password: The password associated with the provided username. If None, a prompt will be provided.
42
43 Returns:
44 A new Rgdc instance.
45 """
46 auth_header = None
47
48 # Prompt for password if not provided
49 if username is not None and password is None:
50 password = getpass.getpass()
51
52 if username and password:
53 encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()
54 auth_header = f'Basic {encoded_credentials}'
55
56 self.session = RgdcSession(base_url=api_url, auth_header=auth_header)
57
58 def list_image_tiles(self, image_id: Union[str, int]) -> Dict:
59 """List geodata imagery tiles."""
60 r = self.session.get(f'image_process/imagery/{image_id}/tiles')
61 return r.json()
62
63 def download_image_file(
64 self, image_id: Union[str, int], chunk_size: int = 1024 * 1024
65 ) -> Iterator[bytes]:
66 """
67 Download the associated ImageFile data for this ImageEntry directly from S3.
68
69 Args:
70 image_id: The ID of the ImageEntry to download.
71 chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).
72
73 Returns:
74 An iterator of byte chunks.
75 """
76 r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)
77 return r.iter_content(chunk_size=chunk_size)
78
79 def download_image_thumbnail(
80 self,
81 image_id: Union[str, int],
82 ) -> bytes:
83 """
84 Download the generated thumbnail for this ImageEntry.
85
86 Args:
87 image_id: The ID of the ImageEntry to download.
88
89 Returns:
90 Thumbnail bytes.
91 """
92 r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')
93 return r.content
94
95 def download_raster_thumbnail(
96 self,
97 raster_meta_id: Union[str, int, dict],
98 band: int = 0,
99 ) -> bytes:
100 """
101 Download the generated thumbnail for this ImageEntry.
102
103 Args:
104 raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.
105 band: The index of the image in the raster's image set to produce thumbnail from.
106
107 Returns:
108 Thumbnail bytes.
109 """
110 if isinstance(raster_meta_id, dict):
111 raster_meta_id = spatial_subentry_id(raster_meta_id)
112
113 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
114 parent_raster = r.json().get('parent_raster', {})
115 images = parent_raster.get('image_set', {}).get('images', [])
116 try:
117 return self.download_image_thumbnail(images[band]['id'])
118 except IndexError:
119 raise IndexError(f'Band index ({band}) out of range.')
120
121 def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:
122 """Get raster entry detail.
123
124 Args:
125 stac: Optionally return as STAC Item dictionary/JSON.
126
127 Returns:
128 Serialized object representation.
129 """
130 if isinstance(raster_meta_id, dict):
131 raster_meta_id = spatial_subentry_id(raster_meta_id)
132
133 if stac:
134 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')
135 else:
136 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
137 return r.json()
138
139 def download_raster(
140 self,
141 raster_meta_id: Union[str, int, dict],
142 pathname: Optional[str] = None,
143 nest_with_name: bool = False,
144 keep_existing: bool = True,
145 ) -> RasterDownload:
146 """
147 Download the image set associated with a raster entry to disk.
148
149 Args:
150 raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.
151 pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.
152 nest_with_name: If True, nests the download within an additional directory, using the raster entry name.
153 keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.
154
155 Returns:
156 A dictionary of the paths to all files downloaded under the directory.
157 """
158 if isinstance(raster_meta_id, dict):
159 raster_meta_id = spatial_subentry_id(raster_meta_id)
160
161 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
162 parent_raster = r.json().get('parent_raster', {})
163
164 # Create dirs after request to avoid empty dirs if failed
165 if pathname is None:
166 pathname = tempfile.mkdtemp()
167
168 # Handle optional nesting with raster entry name
169 path = Path(pathname)
170 parent_raster_name: Optional[str] = parent_raster.get('name')
171
172 if nest_with_name and parent_raster_name:
173 path = path / parent_raster_name
174
175 # Ensure base download directory exists
176 if not path.exists():
177 path.mkdir()
178
179 # Initialize dataclass
180 raster_download = RasterDownload(path, [], [])
181
182 # Download images
183 images = parent_raster.get('image_set', {}).get('images', [])
184 for image in tqdm(images, desc='Downloading image files'):
185 file = image.get('file', {})
186 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
187 if file_path:
188 raster_download.images.append(file_path)
189
190 # Download ancillary files
191 ancillary = parent_raster.get('ancillary_files', [])
192 for file in tqdm(ancillary, desc='Downloading ancillary files'):
193 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
194 if file_path:
195 raster_download.ancillary.append(file_path)
196
197 return raster_download
198
199 def search(
200 self,
201 query: Optional[Union[Dict, str]] = None,
202 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
203 relates: Optional[str] = None,
204 distance: Optional[Tuple[float, float]] = None,
205 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
206 instrumentation: Optional[str] = None,
207 limit: Optional[int] = None,
208 offset: Optional[int] = None,
209 ) -> List[Dict]:
210 """
211 Search for geospatial entries based on various criteria.
212
213 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
214 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
215
216 Args:
217 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
218 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
219 be used to filter data such that predicate(a, b) where b is the queried geometry.
220 relates: Specify exactly how the queried geometry should relate to the data using a
221 DE-9IM string code.
222 distance: The min/max distance around the queried geometry in meters.
223 acquired: The min/max date and time (ISO 8601) when data was acquired.
224 instrumentation: The instrumentation used to acquire at least one of these data.
225 limit: The maximum number of results to return.
226 offset: The number of results to skip.
227
228 Returns:
229 A list of Spatial Entries.
230 """
231 params = spatial_search_params(
232 query=query,
233 predicate=predicate,
234 relates=relates,
235 distance=distance,
236 acquired=acquired,
237 instrumentation=instrumentation,
238 limit=limit,
239 offset=offset,
240 )
241 return list(limit_offset_pager(self.session, 'rgd/search', params=params))
242
243 def create_raster_stac(self, raster: Dict) -> Dict:
244 """Create a raster entry using STAC format."""
245 r = self.session.post('rgd_imagery/raster/stac', json=raster)
246 r.raise_for_status()
247
248 return r.json()
249
250 def search_raster_stac(
251 self,
252 query: Optional[Union[Dict, str]] = None,
253 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
254 relates: Optional[str] = None,
255 distance: Optional[Tuple[float, float]] = None,
256 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
257 instrumentation: Optional[str] = None,
258 num_bands: Optional[Tuple[int, int]] = None,
259 resolution: Optional[Tuple[int, int]] = None,
260 cloud_cover: Optional[Tuple[float, float]] = None,
261 limit: Optional[int] = None,
262 offset: Optional[int] = None,
263 ) -> List[Dict]:
264 """
265 Search for raster entries based on various criteria.
266
267 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
268 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
269
270 Args:
271 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
272 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
273 be used to filter data such that predicate(a, b) where b is the queried geometry.
274 relates: Specify exactly how the queried geometry should relate to the data using a
275 DE-9IM string code.
276 distance: The min/max distance around the queried geometry in meters.
277 acquired: The min/max date and time (ISO 8601) when data was acquired.
278 instrumentation: The instrumentation used to acquire at least one of these data.
279 num_bands: The min/max number of bands in the raster.
280 resolution: The min/max resolution of the raster.
281 cloud_cover: The min/max cloud coverage of the raster.
282 limit: The maximum number of results to return.
283 offset: The number of results to skip.
284
285 Returns:
286 A list of Spatial Entries in STAC Item format.
287 """
288 params = spatial_search_params(
289 query=query,
290 predicate=predicate,
291 relates=relates,
292 distance=distance,
293 acquired=acquired,
294 instrumentation=instrumentation,
295 limit=limit,
296 offset=offset,
297 )
298
299 if num_bands and len(num_bands) == 2:
300 nbmin, nbmax = num_bands
301 params['num_bands_min'] = nbmin
302 params['num_bands_max'] = nbmax
303
304 if resolution and len(resolution) == 2:
305 rmin, rmax = resolution
306 params['resolution_min'] = rmin
307 params['resolution_max'] = rmax
308
309 if cloud_cover and len(cloud_cover) == 2:
310 ccmin, ccmax = cloud_cover
311 params['cloud_cover_min'] = ccmin
312 params['cloud_cover_max'] = ccmax
313
314 return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rgd-client/rgd_client/rgdc.py b/rgd-client/rgd_client/rgdc.py
--- a/rgd-client/rgd_client/rgdc.py
+++ b/rgd-client/rgd_client/rgdc.py
@@ -238,7 +238,11 @@
limit=limit,
offset=offset,
)
- return list(limit_offset_pager(self.session, 'rgd/search', params=params))
+
+ r = self.session.get('rgd/search', params=params)
+ r.raise_for_status()
+
+ return r.json()
def create_raster_stac(self, raster: Dict) -> Dict:
"""Create a raster entry using STAC format."""
| {"golden_diff": "diff --git a/rgd-client/rgd_client/rgdc.py b/rgd-client/rgd_client/rgdc.py\n--- a/rgd-client/rgd_client/rgdc.py\n+++ b/rgd-client/rgd_client/rgdc.py\n@@ -238,7 +238,11 @@\n limit=limit,\n offset=offset,\n )\n- return list(limit_offset_pager(self.session, 'rgd/search', params=params))\n+\n+ r = self.session.get('rgd/search', params=params)\n+ r.raise_for_status()\n+\n+ return r.json()\n \n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n", "issue": "Proper testing for rgd_client\nWe need to implement real tests for the Python client\r\n\r\nThe tests for this would require running RGD with data prepopulated in the background then executing the client tests\n", "before_files": [{"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n return list(limit_offset_pager(self.session, 'rgd/search', params=params))\n\n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n r = self.session.post('rgd_imagery/raster/stac', json=raster)\n r.raise_for_status()\n\n return r.json()\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))\n", "path": "rgd-client/rgd_client/rgdc.py"}], "after_files": [{"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n r = self.session.get('rgd/search', params=params)\n r.raise_for_status()\n\n return r.json()\n\n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n r = self.session.post('rgd_imagery/raster/stac', json=raster)\n r.raise_for_status()\n\n return r.json()\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))\n", "path": "rgd-client/rgd_client/rgdc.py"}]} |
gh_patches_debug_1477 | rasdani/github-patches | git_diff | canonical__snapcraft-80 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logging out from the run environment produces a traceback
(amd64)ubuntu@localhost:~$ logout
Connection to localhost closed.
Traceback (most recent call last):
File "/usr/bin/snapcraft", line 33, in <module>
snapcraft.main.main()
File "/usr/lib/python3/dist-packages/snapcraft/main.py", line 80, in main
args.func(args)
File "/usr/lib/python3/dist-packages/snapcraft/cmds.py", line 228, in run
preexec_fn=os.setsid)
File "/usr/lib/python3/dist-packages/snapcraft/cmds.py", line 343, in _check_call
return subprocess.check_call(args, **kwargs)
File "/usr/lib/python3.4/subprocess.py", line 561, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['ssh', '-i', '/home/daniel/.ssh/ubuntudevice_0149BDCB0C009017_id_rsa', '-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/tmp/tmpcaocvoj7', '-oKbdInteractiveAuthentication=no', '-p', '8022', 'ubuntu@localhost']' returned non-zero exit status 1
daniel@daydream:~/dev/apps/bwm-ng.snap$
Launchpad Details: [#LP1499242](https://bugs.launchpad.net/bugs/1499242) Daniel Holbach - 2015-09-24 06:05:27 -0300
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snapcraft/cmds.py`
Content:
```
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright (C) 2015 Canonical Ltd
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import apt
18 import filecmp
19 import glob
20 import logging
21 import os
22 import shlex
23 import shutil
24 import subprocess
25 import sys
26 import tempfile
27 import time
28
29 import snapcraft.yaml
30 from snapcraft import common
31 from snapcraft import lifecycle
32 from snapcraft import meta
33
34 logger = logging.getLogger(__name__)
35
36
37 _TEMPLATE_YAML = r'''name: # the name of the snap
38 version: # the version of the snap
39 # The vendor for the snap (replace 'Vendor <[email protected]>')
40 vendor: Vendor <[email protected]>
41 summary: # 79 char long summary
42 description: # A longer description for the snap
43 icon: # A path to an icon for the package
44 '''
45
46
47 _config = None
48
49
50 def init(args):
51 if os.path.exists('snapcraft.yaml'):
52 logger.error('snapcraft.yaml already exists!')
53 sys.exit(1)
54 yaml = _TEMPLATE_YAML
55 if args.part:
56 yaml += 'parts:\n'
57 for part_name in args.part:
58 part = lifecycle.load_plugin(part_name, part_name)
59 yaml += ' ' + part.name + ':\n'
60 for opt in part.config.get('options', []):
61 if part.config['options'][opt].get('required', False):
62 yaml += ' ' + opt + ':\n'
63 yaml = yaml.strip()
64 with open('snapcraft.yaml', mode='w+') as f:
65 f.write(yaml)
66 logger.info('Wrote the following as snapcraft.yaml.')
67 print()
68 print(yaml)
69 sys.exit(0)
70
71
72 def shell(args):
73 config = _load_config()
74 common.env = config.stage_env()
75 userCommand = args.userCommand
76 if not userCommand:
77 userCommand = ['/usr/bin/env',
78 'PS1=\[\e[1;32m\]snapcraft:\w\$\[\e[0m\] ',
79 '/bin/bash',
80 '--norc']
81 common.run(userCommand)
82
83
84 def snap(args):
85 cmd(args)
86
87 # This check is to support manual assembly.
88 if not os.path.exists(os.path.join(common.get_snapdir(), 'meta')):
89 arches = [snapcraft.common.get_arch(), ]
90
91 config = _load_config()
92
93 # FIXME this should be done in a more contained manner
94 common.env = config.snap_env()
95
96 meta.create(config.data, arches)
97
98
99 def assemble(args):
100 args.cmd = 'snap'
101 # With all the data in snapcraft.yaml, maybe it's not a good idea to call
102 # snap(args) and just do a snappy build if assemble was explicitly called.
103 snap(args)
104 common.run(['snappy', 'build', common.get_snapdir()])
105
106
107 def _find_latest_private_key():
108 """
109 Find the latest private key in ~/.ssh.
110
111 :returns:
112 Path of the most-recently-modified private SSH key
113 :raises LookupError:
114 If no such key was found.
115
116 This function tries to mimic the logic found in ``ubuntu-device-flash``. It
117 will look for the most recently modified private key in the users' SSH
118 configuration directory.
119 """
120 candidates = []
121 ssh_dir = os.path.expanduser('~/.ssh/')
122 for filename in os.listdir(ssh_dir):
123 # Skip public keys, we want the private key
124 if filename.endswith('.pub'):
125 continue
126 ssh_key = os.path.join(ssh_dir, filename)
127 # Skip non-files
128 if not os.path.isfile(ssh_key):
129 continue
130 # Ensure that it is a real ssh key
131 with open(ssh_key, 'rb') as stream:
132 if stream.readline() != b'-----BEGIN RSA PRIVATE KEY-----\n':
133 continue
134 candidates.append(ssh_key)
135 # Sort the keys by modification time, pick the most recent key
136 candidates.sort(key=lambda f: os.stat(f).st_mtime, reverse=True)
137 logger.debug('Available ssh public keys: %r', candidates)
138 if not candidates:
139 raise LookupError('Unable to find any private ssh key')
140 return candidates[0]
141
142
143 def run(args):
144 # We are mostly making sure we are operating from the correct location. In
145 # the future this could do more by using target attribute in snapcraft.yaml
146 # to create the correct target image.
147 _load_config()
148 # Find the ssh key that ubuntu-device-flash would use so that we can use it
149 # ourselves as well. This may not be the default key that the user has
150 # configured.
151 # See: https://bugs.launchpad.net/snapcraft/+bug/1486659
152 try:
153 ssh_key = _find_latest_private_key()
154 except LookupError:
155 logger.error('You need to have an SSH key to use this command')
156 logger.error('Please generate one with ssh-keygen(1)')
157 return 1
158 else:
159 logger.info('Using the following ssh key: %s', ssh_key)
160
161 # Find available *.snap files to copy into the test VM
162 snap_dir = os.path.join(os.getcwd())
163 # copy the snap with the largest version number into the test VM
164 snaps = glob.glob(snap_dir + '/*.snap')
165 snaps.sort()
166 if not snaps:
167 logger.error('There are no .snap files ready')
168 logger.error('Perhaps you forgot to run "snapcraft assemble"')
169 return 1
170
171 qemudir = os.path.join(os.getcwd(), 'image')
172 qemu_img = os.path.join(qemudir, '15.04.img')
173 if not os.path.exists(qemu_img):
174 os.makedirs(qemudir, exist_ok=True)
175 logger.info(
176 'Setting up virtual snappy environment, root access required')
177 common.run([
178 'sudo', 'ubuntu-device-flash', 'core', '15.04', '--developer-mode',
179 '--enable-ssh', '-o', os.path.relpath(qemu_img, qemudir)],
180 cwd=qemudir)
181 qemu = None
182 try:
183 # Allow the developer to provide additional arguments to qemu. This
184 # can be used, for example, to pass through USB devices from the host.
185 # This can enable a lot of hardware-specific use cases directly inside
186 # the snapcraft run workflow.
187 #
188 # For example:
189 # $ export SNAPCRAFT_RUN_QEMU_ARGS=\
190 # "-usb -device usb-host,hostbus=1,hostaddr=10"
191 # $ snapcraft run
192 qemu_args = os.getenv('SNAPCRAFT_RUN_QEMU_ARGS')
193 if qemu_args is not None:
194 qemu_args = shlex.split(qemu_args)
195 else:
196 qemu_args = []
197 qemu = subprocess.Popen(
198 ['kvm', '-m', '768', '-nographic', '-snapshot', '-redir',
199 'tcp:8022::22', qemu_img] + qemu_args, stdin=subprocess.PIPE)
200 n = tempfile.NamedTemporaryFile()
201 ssh_opts = [
202 # We want to login with the specified ssh identity (key)
203 '-i', ssh_key,
204 # We don't want strict host checking because it's a new VM with a
205 # random key each time.
206 '-oStrictHostKeyChecking=no',
207 # We don't want to pollute the known_hosts file with new entries
208 # all the time so let's use a temporary file for that
209 '-oUserKnownHostsFile={}'.format(n.name),
210 # Don't try keyboard interactive authentication, we're expecting to
211 # login via the key and if that doesn't work then everything else
212 # will fail anyway.
213 '-oKbdInteractiveAuthentication=no',
214 ]
215 while True:
216 ret_code = _call(
217 ['ssh'] + ssh_opts +
218 ['ubuntu@localhost', '-p', '8022', 'true'])
219 if ret_code == 0:
220 break
221 print('Waiting for device')
222 time.sleep(1)
223 # copy the most recent snap into the test VM
224 _check_call(
225 ['scp'] + ssh_opts + [
226 '-P', '8022', snaps[-1], 'ubuntu@localhost:~/'])
227 # install the snap
228 _check_call(
229 ['ssh'] + ssh_opts +
230 ['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])
231 # "login"
232 _check_call(
233 ['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],
234 preexec_fn=os.setsid)
235 finally:
236 if qemu:
237 qemu.kill()
238
239
240 def list_plugins(args=None):
241 import pkgutil
242 import snapcraft.plugins
243
244 for importer, modname, is_package in pkgutil.iter_modules(
245 snapcraft.plugins.__path__):
246 if not is_package:
247 print(modname.replace('_', '-'))
248
249
250 def clean(args):
251 config = _load_config()
252
253 for part in config.all_parts:
254 logger.info('Cleaning up for part %r', part.name)
255 if os.path.exists(part.partdir):
256 shutil.rmtree(part.partdir)
257
258 # parts dir does not contain only generated code.
259 if (os.path.exists(common.get_partsdir()) and
260 not os.listdir(common.get_partsdir())):
261 os.rmdir(common.get_partsdir())
262
263 logger.info('Cleaning up staging area')
264 if os.path.exists(common.get_stagedir()):
265 shutil.rmtree(common.get_stagedir())
266
267 logger.info('Cleaning up snapping area')
268 if os.path.exists(common.get_snapdir()):
269 shutil.rmtree(common.get_snapdir())
270
271
272 def _check_for_collisions(parts):
273 parts_files = {}
274 for part in parts:
275 # Gather our own files up
276 fileset = getattr(part.code.options, 'stage', ['*']) or ['*']
277 part_files, _ = lifecycle.migratable_filesets(
278 fileset,
279 part.installdir)
280
281 # Scan previous parts for collisions
282 for other_part_name in parts_files:
283 common = part_files & parts_files[other_part_name]['files']
284 conflict_files = []
285 for f in common:
286 this = os.path.join(part.installdir, f)
287 other = os.path.join(
288 parts_files[other_part_name]['installdir'],
289 f)
290 if os.path.islink(this) and os.path.islink(other):
291 continue
292 if not filecmp.cmp(this, other, shallow=False):
293 conflict_files.append(f)
294
295 if conflict_files:
296 logger.error('Error: parts %s and %s have the following file '
297 'paths in common which have different '
298 'contents:\n %s',
299 other_part_name,
300 part.name,
301 '\n '.join(sorted(conflict_files)))
302
303 return False
304
305 # And add our files to the list
306 parts_files[part.name] = {'files': part_files,
307 'installdir': part.installdir}
308
309 return True
310
311
312 def cmd(args):
313 forceAll = args.force
314 forceCommand = None
315
316 cmds = [args.cmd]
317
318 if cmds[0] in common.COMMAND_ORDER:
319 forceCommand = cmds[0]
320 cmds = common.COMMAND_ORDER[0:common.COMMAND_ORDER.index(cmds[0]) + 1]
321
322 config = _load_config()
323 _install_build_packages(config.build_tools)
324
325 # clean the snap dir before Snapping
326 snap_clean = False
327
328 for part in config.all_parts:
329 for cmd in cmds:
330 if cmd is 'stage':
331 # This ends up running multiple times, as each part gets to its
332 # staging cmd. That's inefficient, but largely OK.
333 # FIXME: fix the above by iterating over cmds before iterating
334 # all_parts. But then we need to make sure we continue to
335 # handle cases like go, where you want go built before trying
336 # to pull a go project.
337 if not _check_for_collisions(config.all_parts):
338 sys.exit(1)
339
340 # We want to make sure we have a clean snap dir
341 if cmd is 'snap' and not snap_clean:
342 shutil.rmtree(common.get_snapdir())
343 snap_clean = True
344
345 common.env = config.build_env_for_part(part)
346 force = forceAll or cmd == forceCommand
347
348 try:
349 getattr(part, cmd)(force=force)
350 except Exception as e:
351 logger.error('Failed doing %s for %s: %s', cmd, part.name, e)
352 sys.exit(1)
353
354
355 def _call(args, **kwargs):
356 logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))
357 return subprocess.call(args, **kwargs)
358
359
360 def _check_call(args, **kwargs):
361 logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))
362 return subprocess.check_call(args, **kwargs)
363
364
365 def _install_build_packages(packages):
366 new_packages = []
367 for pkg in packages:
368 try:
369 if not apt.Cache()[pkg].installed:
370 new_packages.append(pkg)
371 except KeyError:
372 logger.error('Could not find all the "build-packages" required '
373 'in snapcraft.yaml')
374 sys.exit(1)
375 if new_packages:
376 logger.info('Installing required packages on the host system')
377 _check_call(['sudo', 'apt-get', '-o', 'Dpkg::Progress-Fancy=1',
378 '--no-install-recommends',
379 '-y', 'install'] + new_packages)
380
381
382 def _load_config():
383 global _config
384 if _config:
385 return _config
386
387 try:
388 _config = snapcraft.yaml.Config()
389 return _config
390 except snapcraft.yaml.SnapcraftYamlFileError as e:
391 logger.error(
392 'Could not find {}. Are you sure you are in the right '
393 'directory?\nTo start a new project, use \'snapcraft '
394 'init\''.format(e.file))
395 sys.exit(1)
396 except snapcraft.yaml.SnapcraftSchemaError as e:
397 msg = 'Issues while validating snapcraft.yaml: {}'.format(e.message)
398 logger.error(msg)
399 sys.exit(1)
400 except snapcraft.yaml.PluginNotDefinedError as e:
401 logger.error(
402 'Issues while validating snapcraft.yaml: the "plugin" keyword is '
403 'missing for the "{}" part.'.format(e.part))
404 sys.exit(1)
405 except snapcraft.yaml.SnapcraftLogicError as e:
406 logger.error('Issue detected while analyzing '
407 'snapcraft.yaml: {}'.format(e.message))
408 sys.exit(1)
409 except lifecycle.PluginError as e:
410 logger.error('Issue while loading plugin: {}'.format(e))
411
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snapcraft/cmds.py b/snapcraft/cmds.py
--- a/snapcraft/cmds.py
+++ b/snapcraft/cmds.py
@@ -229,7 +229,7 @@
['ssh'] + ssh_opts +
['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])
# "login"
- _check_call(
+ _call(
['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],
preexec_fn=os.setsid)
finally:
| {"golden_diff": "diff --git a/snapcraft/cmds.py b/snapcraft/cmds.py\n--- a/snapcraft/cmds.py\n+++ b/snapcraft/cmds.py\n@@ -229,7 +229,7 @@\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])\n # \"login\"\n- _check_call(\n+ _call(\n ['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],\n preexec_fn=os.setsid)\n finally:\n", "issue": "Logging out from the run environment produces a traceback\n(amd64)ubuntu@localhost:~$ logout\nConnection to localhost closed.\nTraceback (most recent call last):\n File \"/usr/bin/snapcraft\", line 33, in <module>\n snapcraft.main.main()\n File \"/usr/lib/python3/dist-packages/snapcraft/main.py\", line 80, in main\n args.func(args)\n File \"/usr/lib/python3/dist-packages/snapcraft/cmds.py\", line 228, in run\n preexec_fn=os.setsid)\n File \"/usr/lib/python3/dist-packages/snapcraft/cmds.py\", line 343, in _check_call\n return subprocess.check_call(args, **kwargs)\n File \"/usr/lib/python3.4/subprocess.py\", line 561, in check_call\n raise CalledProcessError(retcode, cmd)\nsubprocess.CalledProcessError: Command '['ssh', '-i', '/home/daniel/.ssh/ubuntudevice_0149BDCB0C009017_id_rsa', '-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/tmp/tmpcaocvoj7', '-oKbdInteractiveAuthentication=no', '-p', '8022', 'ubuntu@localhost']' returned non-zero exit status 1\ndaniel@daydream:~/dev/apps/bwm-ng.snap$\n\nLaunchpad Details: [#LP1499242](https://bugs.launchpad.net/bugs/1499242) Daniel Holbach - 2015-09-24 06:05:27 -0300\n\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2015 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport apt\nimport filecmp\nimport glob\nimport logging\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nimport snapcraft.yaml\nfrom snapcraft import common\nfrom snapcraft import lifecycle\nfrom snapcraft import meta\n\nlogger = logging.getLogger(__name__)\n\n\n_TEMPLATE_YAML = r'''name: # the name of the snap\nversion: # the version of the snap\n# The vendor for the snap (replace 'Vendor <[email protected]>')\nvendor: Vendor <[email protected]>\nsummary: # 79 char long summary\ndescription: # A longer description for the snap\nicon: # A path to an icon for the package\n'''\n\n\n_config = None\n\n\ndef init(args):\n if os.path.exists('snapcraft.yaml'):\n logger.error('snapcraft.yaml already exists!')\n sys.exit(1)\n yaml = _TEMPLATE_YAML\n if args.part:\n yaml += 'parts:\\n'\n for part_name in args.part:\n part = lifecycle.load_plugin(part_name, part_name)\n yaml += ' ' + part.name + ':\\n'\n for opt in part.config.get('options', []):\n if part.config['options'][opt].get('required', False):\n yaml += ' ' + opt + ':\\n'\n yaml = yaml.strip()\n with open('snapcraft.yaml', mode='w+') as f:\n f.write(yaml)\n logger.info('Wrote the following as snapcraft.yaml.')\n print()\n print(yaml)\n sys.exit(0)\n\n\ndef shell(args):\n config = _load_config()\n common.env = config.stage_env()\n userCommand = args.userCommand\n if not userCommand:\n userCommand = ['/usr/bin/env',\n 'PS1=\\[\\e[1;32m\\]snapcraft:\\w\\$\\[\\e[0m\\] ',\n '/bin/bash',\n '--norc']\n common.run(userCommand)\n\n\ndef snap(args):\n cmd(args)\n\n # This check is to support manual assembly.\n if not os.path.exists(os.path.join(common.get_snapdir(), 'meta')):\n arches = [snapcraft.common.get_arch(), ]\n\n config = _load_config()\n\n # FIXME this should be done in a more contained manner\n common.env = config.snap_env()\n\n meta.create(config.data, arches)\n\n\ndef assemble(args):\n args.cmd = 'snap'\n # With all the data in snapcraft.yaml, maybe it's not a good idea to call\n # snap(args) and just do a snappy build if assemble was explicitly called.\n snap(args)\n common.run(['snappy', 'build', common.get_snapdir()])\n\n\ndef _find_latest_private_key():\n \"\"\"\n Find the latest private key in ~/.ssh.\n\n :returns:\n Path of the most-recently-modified private SSH key\n :raises LookupError:\n If no such key was found.\n\n This function tries to mimic the logic found in ``ubuntu-device-flash``. It\n will look for the most recently modified private key in the users' SSH\n configuration directory.\n \"\"\"\n candidates = []\n ssh_dir = os.path.expanduser('~/.ssh/')\n for filename in os.listdir(ssh_dir):\n # Skip public keys, we want the private key\n if filename.endswith('.pub'):\n continue\n ssh_key = os.path.join(ssh_dir, filename)\n # Skip non-files\n if not os.path.isfile(ssh_key):\n continue\n # Ensure that it is a real ssh key\n with open(ssh_key, 'rb') as stream:\n if stream.readline() != b'-----BEGIN RSA PRIVATE KEY-----\\n':\n continue\n candidates.append(ssh_key)\n # Sort the keys by modification time, pick the most recent key\n candidates.sort(key=lambda f: os.stat(f).st_mtime, reverse=True)\n logger.debug('Available ssh public keys: %r', candidates)\n if not candidates:\n raise LookupError('Unable to find any private ssh key')\n return candidates[0]\n\n\ndef run(args):\n # We are mostly making sure we are operating from the correct location. In\n # the future this could do more by using target attribute in snapcraft.yaml\n # to create the correct target image.\n _load_config()\n # Find the ssh key that ubuntu-device-flash would use so that we can use it\n # ourselves as well. This may not be the default key that the user has\n # configured.\n # See: https://bugs.launchpad.net/snapcraft/+bug/1486659\n try:\n ssh_key = _find_latest_private_key()\n except LookupError:\n logger.error('You need to have an SSH key to use this command')\n logger.error('Please generate one with ssh-keygen(1)')\n return 1\n else:\n logger.info('Using the following ssh key: %s', ssh_key)\n\n # Find available *.snap files to copy into the test VM\n snap_dir = os.path.join(os.getcwd())\n # copy the snap with the largest version number into the test VM\n snaps = glob.glob(snap_dir + '/*.snap')\n snaps.sort()\n if not snaps:\n logger.error('There are no .snap files ready')\n logger.error('Perhaps you forgot to run \"snapcraft assemble\"')\n return 1\n\n qemudir = os.path.join(os.getcwd(), 'image')\n qemu_img = os.path.join(qemudir, '15.04.img')\n if not os.path.exists(qemu_img):\n os.makedirs(qemudir, exist_ok=True)\n logger.info(\n 'Setting up virtual snappy environment, root access required')\n common.run([\n 'sudo', 'ubuntu-device-flash', 'core', '15.04', '--developer-mode',\n '--enable-ssh', '-o', os.path.relpath(qemu_img, qemudir)],\n cwd=qemudir)\n qemu = None\n try:\n # Allow the developer to provide additional arguments to qemu. This\n # can be used, for example, to pass through USB devices from the host.\n # This can enable a lot of hardware-specific use cases directly inside\n # the snapcraft run workflow.\n #\n # For example:\n # $ export SNAPCRAFT_RUN_QEMU_ARGS=\\\n # \"-usb -device usb-host,hostbus=1,hostaddr=10\"\n # $ snapcraft run\n qemu_args = os.getenv('SNAPCRAFT_RUN_QEMU_ARGS')\n if qemu_args is not None:\n qemu_args = shlex.split(qemu_args)\n else:\n qemu_args = []\n qemu = subprocess.Popen(\n ['kvm', '-m', '768', '-nographic', '-snapshot', '-redir',\n 'tcp:8022::22', qemu_img] + qemu_args, stdin=subprocess.PIPE)\n n = tempfile.NamedTemporaryFile()\n ssh_opts = [\n # We want to login with the specified ssh identity (key)\n '-i', ssh_key,\n # We don't want strict host checking because it's a new VM with a\n # random key each time.\n '-oStrictHostKeyChecking=no',\n # We don't want to pollute the known_hosts file with new entries\n # all the time so let's use a temporary file for that\n '-oUserKnownHostsFile={}'.format(n.name),\n # Don't try keyboard interactive authentication, we're expecting to\n # login via the key and if that doesn't work then everything else\n # will fail anyway.\n '-oKbdInteractiveAuthentication=no',\n ]\n while True:\n ret_code = _call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'true'])\n if ret_code == 0:\n break\n print('Waiting for device')\n time.sleep(1)\n # copy the most recent snap into the test VM\n _check_call(\n ['scp'] + ssh_opts + [\n '-P', '8022', snaps[-1], 'ubuntu@localhost:~/'])\n # install the snap\n _check_call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])\n # \"login\"\n _check_call(\n ['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],\n preexec_fn=os.setsid)\n finally:\n if qemu:\n qemu.kill()\n\n\ndef list_plugins(args=None):\n import pkgutil\n import snapcraft.plugins\n\n for importer, modname, is_package in pkgutil.iter_modules(\n snapcraft.plugins.__path__):\n if not is_package:\n print(modname.replace('_', '-'))\n\n\ndef clean(args):\n config = _load_config()\n\n for part in config.all_parts:\n logger.info('Cleaning up for part %r', part.name)\n if os.path.exists(part.partdir):\n shutil.rmtree(part.partdir)\n\n # parts dir does not contain only generated code.\n if (os.path.exists(common.get_partsdir()) and\n not os.listdir(common.get_partsdir())):\n os.rmdir(common.get_partsdir())\n\n logger.info('Cleaning up staging area')\n if os.path.exists(common.get_stagedir()):\n shutil.rmtree(common.get_stagedir())\n\n logger.info('Cleaning up snapping area')\n if os.path.exists(common.get_snapdir()):\n shutil.rmtree(common.get_snapdir())\n\n\ndef _check_for_collisions(parts):\n parts_files = {}\n for part in parts:\n # Gather our own files up\n fileset = getattr(part.code.options, 'stage', ['*']) or ['*']\n part_files, _ = lifecycle.migratable_filesets(\n fileset,\n part.installdir)\n\n # Scan previous parts for collisions\n for other_part_name in parts_files:\n common = part_files & parts_files[other_part_name]['files']\n conflict_files = []\n for f in common:\n this = os.path.join(part.installdir, f)\n other = os.path.join(\n parts_files[other_part_name]['installdir'],\n f)\n if os.path.islink(this) and os.path.islink(other):\n continue\n if not filecmp.cmp(this, other, shallow=False):\n conflict_files.append(f)\n\n if conflict_files:\n logger.error('Error: parts %s and %s have the following file '\n 'paths in common which have different '\n 'contents:\\n %s',\n other_part_name,\n part.name,\n '\\n '.join(sorted(conflict_files)))\n\n return False\n\n # And add our files to the list\n parts_files[part.name] = {'files': part_files,\n 'installdir': part.installdir}\n\n return True\n\n\ndef cmd(args):\n forceAll = args.force\n forceCommand = None\n\n cmds = [args.cmd]\n\n if cmds[0] in common.COMMAND_ORDER:\n forceCommand = cmds[0]\n cmds = common.COMMAND_ORDER[0:common.COMMAND_ORDER.index(cmds[0]) + 1]\n\n config = _load_config()\n _install_build_packages(config.build_tools)\n\n # clean the snap dir before Snapping\n snap_clean = False\n\n for part in config.all_parts:\n for cmd in cmds:\n if cmd is 'stage':\n # This ends up running multiple times, as each part gets to its\n # staging cmd. That's inefficient, but largely OK.\n # FIXME: fix the above by iterating over cmds before iterating\n # all_parts. But then we need to make sure we continue to\n # handle cases like go, where you want go built before trying\n # to pull a go project.\n if not _check_for_collisions(config.all_parts):\n sys.exit(1)\n\n # We want to make sure we have a clean snap dir\n if cmd is 'snap' and not snap_clean:\n shutil.rmtree(common.get_snapdir())\n snap_clean = True\n\n common.env = config.build_env_for_part(part)\n force = forceAll or cmd == forceCommand\n\n try:\n getattr(part, cmd)(force=force)\n except Exception as e:\n logger.error('Failed doing %s for %s: %s', cmd, part.name, e)\n sys.exit(1)\n\n\ndef _call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.call(args, **kwargs)\n\n\ndef _check_call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.check_call(args, **kwargs)\n\n\ndef _install_build_packages(packages):\n new_packages = []\n for pkg in packages:\n try:\n if not apt.Cache()[pkg].installed:\n new_packages.append(pkg)\n except KeyError:\n logger.error('Could not find all the \"build-packages\" required '\n 'in snapcraft.yaml')\n sys.exit(1)\n if new_packages:\n logger.info('Installing required packages on the host system')\n _check_call(['sudo', 'apt-get', '-o', 'Dpkg::Progress-Fancy=1',\n '--no-install-recommends',\n '-y', 'install'] + new_packages)\n\n\ndef _load_config():\n global _config\n if _config:\n return _config\n\n try:\n _config = snapcraft.yaml.Config()\n return _config\n except snapcraft.yaml.SnapcraftYamlFileError as e:\n logger.error(\n 'Could not find {}. Are you sure you are in the right '\n 'directory?\\nTo start a new project, use \\'snapcraft '\n 'init\\''.format(e.file))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftSchemaError as e:\n msg = 'Issues while validating snapcraft.yaml: {}'.format(e.message)\n logger.error(msg)\n sys.exit(1)\n except snapcraft.yaml.PluginNotDefinedError as e:\n logger.error(\n 'Issues while validating snapcraft.yaml: the \"plugin\" keyword is '\n 'missing for the \"{}\" part.'.format(e.part))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftLogicError as e:\n logger.error('Issue detected while analyzing '\n 'snapcraft.yaml: {}'.format(e.message))\n sys.exit(1)\n except lifecycle.PluginError as e:\n logger.error('Issue while loading plugin: {}'.format(e))\n", "path": "snapcraft/cmds.py"}], "after_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2015 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport apt\nimport filecmp\nimport glob\nimport logging\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nimport snapcraft.yaml\nfrom snapcraft import common\nfrom snapcraft import lifecycle\nfrom snapcraft import meta\n\nlogger = logging.getLogger(__name__)\n\n\n_TEMPLATE_YAML = r'''name: # the name of the snap\nversion: # the version of the snap\n# The vendor for the snap (replace 'Vendor <[email protected]>')\nvendor: Vendor <[email protected]>\nsummary: # 79 char long summary\ndescription: # A longer description for the snap\nicon: # A path to an icon for the package\n'''\n\n\n_config = None\n\n\ndef init(args):\n if os.path.exists('snapcraft.yaml'):\n logger.error('snapcraft.yaml already exists!')\n sys.exit(1)\n yaml = _TEMPLATE_YAML\n if args.part:\n yaml += 'parts:\\n'\n for part_name in args.part:\n part = lifecycle.load_plugin(part_name, part_name)\n yaml += ' ' + part.name + ':\\n'\n for opt in part.config.get('options', []):\n if part.config['options'][opt].get('required', False):\n yaml += ' ' + opt + ':\\n'\n yaml = yaml.strip()\n with open('snapcraft.yaml', mode='w+') as f:\n f.write(yaml)\n logger.info('Wrote the following as snapcraft.yaml.')\n print()\n print(yaml)\n sys.exit(0)\n\n\ndef shell(args):\n config = _load_config()\n common.env = config.stage_env()\n userCommand = args.userCommand\n if not userCommand:\n userCommand = ['/usr/bin/env',\n 'PS1=\\[\\e[1;32m\\]snapcraft:\\w\\$\\[\\e[0m\\] ',\n '/bin/bash',\n '--norc']\n common.run(userCommand)\n\n\ndef snap(args):\n cmd(args)\n\n # This check is to support manual assembly.\n if not os.path.exists(os.path.join(common.get_snapdir(), 'meta')):\n arches = [snapcraft.common.get_arch(), ]\n\n config = _load_config()\n\n # FIXME this should be done in a more contained manner\n common.env = config.snap_env()\n\n meta.create(config.data, arches)\n\n\ndef assemble(args):\n args.cmd = 'snap'\n # With all the data in snapcraft.yaml, maybe it's not a good idea to call\n # snap(args) and just do a snappy build if assemble was explicitly called.\n snap(args)\n common.run(['snappy', 'build', common.get_snapdir()])\n\n\ndef _find_latest_private_key():\n \"\"\"\n Find the latest private key in ~/.ssh.\n\n :returns:\n Path of the most-recently-modified private SSH key\n :raises LookupError:\n If no such key was found.\n\n This function tries to mimic the logic found in ``ubuntu-device-flash``. It\n will look for the most recently modified private key in the users' SSH\n configuration directory.\n \"\"\"\n candidates = []\n ssh_dir = os.path.expanduser('~/.ssh/')\n for filename in os.listdir(ssh_dir):\n # Skip public keys, we want the private key\n if filename.endswith('.pub'):\n continue\n ssh_key = os.path.join(ssh_dir, filename)\n # Skip non-files\n if not os.path.isfile(ssh_key):\n continue\n # Ensure that it is a real ssh key\n with open(ssh_key, 'rb') as stream:\n if stream.readline() != b'-----BEGIN RSA PRIVATE KEY-----\\n':\n continue\n candidates.append(ssh_key)\n # Sort the keys by modification time, pick the most recent key\n candidates.sort(key=lambda f: os.stat(f).st_mtime, reverse=True)\n logger.debug('Available ssh public keys: %r', candidates)\n if not candidates:\n raise LookupError('Unable to find any private ssh key')\n return candidates[0]\n\n\ndef run(args):\n # We are mostly making sure we are operating from the correct location. In\n # the future this could do more by using target attribute in snapcraft.yaml\n # to create the correct target image.\n _load_config()\n # Find the ssh key that ubuntu-device-flash would use so that we can use it\n # ourselves as well. This may not be the default key that the user has\n # configured.\n # See: https://bugs.launchpad.net/snapcraft/+bug/1486659\n try:\n ssh_key = _find_latest_private_key()\n except LookupError:\n logger.error('You need to have an SSH key to use this command')\n logger.error('Please generate one with ssh-keygen(1)')\n return 1\n else:\n logger.info('Using the following ssh key: %s', ssh_key)\n\n # Find available *.snap files to copy into the test VM\n snap_dir = os.path.join(os.getcwd())\n # copy the snap with the largest version number into the test VM\n snaps = glob.glob(snap_dir + '/*.snap')\n snaps.sort()\n if not snaps:\n logger.error('There are no .snap files ready')\n logger.error('Perhaps you forgot to run \"snapcraft assemble\"')\n return 1\n\n qemudir = os.path.join(os.getcwd(), 'image')\n qemu_img = os.path.join(qemudir, '15.04.img')\n if not os.path.exists(qemu_img):\n os.makedirs(qemudir, exist_ok=True)\n logger.info(\n 'Setting up virtual snappy environment, root access required')\n common.run([\n 'sudo', 'ubuntu-device-flash', 'core', '15.04', '--developer-mode',\n '--enable-ssh', '-o', os.path.relpath(qemu_img, qemudir)],\n cwd=qemudir)\n qemu = None\n try:\n # Allow the developer to provide additional arguments to qemu. This\n # can be used, for example, to pass through USB devices from the host.\n # This can enable a lot of hardware-specific use cases directly inside\n # the snapcraft run workflow.\n #\n # For example:\n # $ export SNAPCRAFT_RUN_QEMU_ARGS=\\\n # \"-usb -device usb-host,hostbus=1,hostaddr=10\"\n # $ snapcraft run\n qemu_args = os.getenv('SNAPCRAFT_RUN_QEMU_ARGS')\n if qemu_args is not None:\n qemu_args = shlex.split(qemu_args)\n else:\n qemu_args = []\n qemu = subprocess.Popen(\n ['kvm', '-m', '768', '-nographic', '-snapshot', '-redir',\n 'tcp:8022::22', qemu_img] + qemu_args, stdin=subprocess.PIPE)\n n = tempfile.NamedTemporaryFile()\n ssh_opts = [\n # We want to login with the specified ssh identity (key)\n '-i', ssh_key,\n # We don't want strict host checking because it's a new VM with a\n # random key each time.\n '-oStrictHostKeyChecking=no',\n # We don't want to pollute the known_hosts file with new entries\n # all the time so let's use a temporary file for that\n '-oUserKnownHostsFile={}'.format(n.name),\n # Don't try keyboard interactive authentication, we're expecting to\n # login via the key and if that doesn't work then everything else\n # will fail anyway.\n '-oKbdInteractiveAuthentication=no',\n ]\n while True:\n ret_code = _call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'true'])\n if ret_code == 0:\n break\n print('Waiting for device')\n time.sleep(1)\n # copy the most recent snap into the test VM\n _check_call(\n ['scp'] + ssh_opts + [\n '-P', '8022', snaps[-1], 'ubuntu@localhost:~/'])\n # install the snap\n _check_call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])\n # \"login\"\n _call(\n ['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],\n preexec_fn=os.setsid)\n finally:\n if qemu:\n qemu.kill()\n\n\ndef list_plugins(args=None):\n import pkgutil\n import snapcraft.plugins\n\n for importer, modname, is_package in pkgutil.iter_modules(\n snapcraft.plugins.__path__):\n if not is_package:\n print(modname.replace('_', '-'))\n\n\ndef clean(args):\n config = _load_config()\n\n for part in config.all_parts:\n logger.info('Cleaning up for part %r', part.name)\n if os.path.exists(part.partdir):\n shutil.rmtree(part.partdir)\n\n # parts dir does not contain only generated code.\n if (os.path.exists(common.get_partsdir()) and\n not os.listdir(common.get_partsdir())):\n os.rmdir(common.get_partsdir())\n\n logger.info('Cleaning up staging area')\n if os.path.exists(common.get_stagedir()):\n shutil.rmtree(common.get_stagedir())\n\n logger.info('Cleaning up snapping area')\n if os.path.exists(common.get_snapdir()):\n shutil.rmtree(common.get_snapdir())\n\n\ndef _check_for_collisions(parts):\n parts_files = {}\n for part in parts:\n # Gather our own files up\n fileset = getattr(part.code.options, 'stage', ['*']) or ['*']\n part_files, _ = lifecycle.migratable_filesets(\n fileset,\n part.installdir)\n\n # Scan previous parts for collisions\n for other_part_name in parts_files:\n common = part_files & parts_files[other_part_name]['files']\n conflict_files = []\n for f in common:\n this = os.path.join(part.installdir, f)\n other = os.path.join(\n parts_files[other_part_name]['installdir'],\n f)\n if os.path.islink(this) and os.path.islink(other):\n continue\n if not filecmp.cmp(this, other, shallow=False):\n conflict_files.append(f)\n\n if conflict_files:\n logger.error('Error: parts %s and %s have the following file '\n 'paths in common which have different '\n 'contents:\\n %s',\n other_part_name,\n part.name,\n '\\n '.join(sorted(conflict_files)))\n\n return False\n\n # And add our files to the list\n parts_files[part.name] = {'files': part_files,\n 'installdir': part.installdir}\n\n return True\n\n\ndef cmd(args):\n forceAll = args.force\n forceCommand = None\n\n cmds = [args.cmd]\n\n if cmds[0] in common.COMMAND_ORDER:\n forceCommand = cmds[0]\n cmds = common.COMMAND_ORDER[0:common.COMMAND_ORDER.index(cmds[0]) + 1]\n\n config = _load_config()\n _install_build_packages(config.build_tools)\n\n # clean the snap dir before Snapping\n snap_clean = False\n\n for part in config.all_parts:\n for cmd in cmds:\n if cmd is 'stage':\n # This ends up running multiple times, as each part gets to its\n # staging cmd. That's inefficient, but largely OK.\n # FIXME: fix the above by iterating over cmds before iterating\n # all_parts. But then we need to make sure we continue to\n # handle cases like go, where you want go built before trying\n # to pull a go project.\n if not _check_for_collisions(config.all_parts):\n sys.exit(1)\n\n # We want to make sure we have a clean snap dir\n if cmd is 'snap' and not snap_clean:\n shutil.rmtree(common.get_snapdir())\n snap_clean = True\n\n common.env = config.build_env_for_part(part)\n force = forceAll or cmd == forceCommand\n\n try:\n getattr(part, cmd)(force=force)\n except Exception as e:\n logger.error('Failed doing %s for %s: %s', cmd, part.name, e)\n sys.exit(1)\n\n\ndef _call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.call(args, **kwargs)\n\n\ndef _check_call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.check_call(args, **kwargs)\n\n\ndef _install_build_packages(packages):\n new_packages = []\n for pkg in packages:\n try:\n if not apt.Cache()[pkg].installed:\n new_packages.append(pkg)\n except KeyError:\n logger.error('Could not find all the \"build-packages\" required '\n 'in snapcraft.yaml')\n sys.exit(1)\n if new_packages:\n logger.info('Installing required packages on the host system')\n _check_call(['sudo', 'apt-get', '-o', 'Dpkg::Progress-Fancy=1',\n '--no-install-recommends',\n '-y', 'install'] + new_packages)\n\n\ndef _load_config():\n global _config\n if _config:\n return _config\n\n try:\n _config = snapcraft.yaml.Config()\n return _config\n except snapcraft.yaml.SnapcraftYamlFileError as e:\n logger.error(\n 'Could not find {}. Are you sure you are in the right '\n 'directory?\\nTo start a new project, use \\'snapcraft '\n 'init\\''.format(e.file))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftSchemaError as e:\n msg = 'Issues while validating snapcraft.yaml: {}'.format(e.message)\n logger.error(msg)\n sys.exit(1)\n except snapcraft.yaml.PluginNotDefinedError as e:\n logger.error(\n 'Issues while validating snapcraft.yaml: the \"plugin\" keyword is '\n 'missing for the \"{}\" part.'.format(e.part))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftLogicError as e:\n logger.error('Issue detected while analyzing '\n 'snapcraft.yaml: {}'.format(e.message))\n sys.exit(1)\n except lifecycle.PluginError as e:\n logger.error('Issue while loading plugin: {}'.format(e))\n", "path": "snapcraft/cmds.py"}]} |
gh_patches_debug_1478 | rasdani/github-patches | git_diff | ManimCommunity__manim-3510 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
keyword argument 'line_join'
## Description of bug / unexpected behavior
When I rendering the line joint scene from basic.py from the example scene it shows Mobject.__init__() got an unexpected keyword argument 'line_join'
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
class LineJoints(Scene):
def construct(self):
t1 = Triangle()
t2 = Triangle(line_join=LineJointType.ROUND)
t3 = Triangle(line_join=LineJointType.BEVEL)
grp = VGroup(t1, t2, t3).arrange(RIGHT)
grp.set(width=config.frame_width - 1)
self.add(grp)
```
</details>
## Logs
<details><summary>Virtual Code Studio output</summary>
<!-- Add "-v DEBUG" when calling manim to generate more detailed logs -->
```
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ C:\tools\Manim\Lib\site-packages\manim\cli\render\commands.py:115 in render │
│ │
│ 112 │ │ │ try: │
│ 113 │ │ │ │ with tempconfig({}): │
│ 114 │ │ │ │ │ scene = SceneClass() │
│ ❱ 115 │ │ │ │ │ scene.render() │
│ 116 │ │ │ except Exception: │
│ 117 │ │ │ │ error_console.print_exception() │
│ 118 │ │ │ │ sys.exit(1) │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\scene\scene.py:223 in render │
│ │
│ 220 │ │ """ │
│ 221 │ │ self.setup() │
│ 222 │ │ try: │
│ ❱ 223 │ │ │ self.construct() │
│ 224 │ │ except EndSceneEarlyException: │
│ 225 │ │ │ pass │
│ 226 │ │ except RerunSceneException as e: │
│ │
│ C:\Users\HP\Documents\ManimCE\basic.py:170 in construct │
│ │
│ 167 class LineJoints(Scene): │
│ 168 │ def construct(self): │
│ 169 │ │ t1 = Triangle() │
│ ❱ 170 │ │ t2 = Triangle(line_join=LineJointType.ROUND) │
│ 171 │ │ t3 = Triangle(line_join=LineJointType.BEVEL) │
│ 172 │ │ │
│ 173 │ │ grp = VGroup(t1, t2, t3).arrange(RIGHT) │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:559 in __init__ │
│ │
│ 556 │ """ │
│ 557 │ │
│ 558 │ def __init__(self, **kwargs): │
│ ❱ 559 │ │ super().__init__(n=3, **kwargs) │
│ 560 │
│ 561 │
│ 562 class Rectangle(Polygon): │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:428 in __init__ │
│ │
│ 425 │ """ │
│ 426 │ │
│ 427 │ def __init__(self, n: int = 6, **kwargs): │
│ ❱ 428 │ │ super().__init__(n, density=1, **kwargs) │
│ 429 │
│ 430 │
│ 431 class Star(Polygon): │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:399 in __init__ │
│ │
│ 396 │ │ │ │
│ 397 │ │ │ vertex_groups.append(group) │
│ 398 │ │ │
│ ❱ 399 │ │ super().__init__(*vertex_groups, **kwargs) │
│ 400 │
│ 401 │
│ 402 class RegularPolygon(RegularPolygram): │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:69 in __init__ │
│ │
│ 66 │ """ │
│ 67 │ │
│ 68 │ def __init__(self, *vertex_groups: Iterable[Sequence[float]], color=BLUE, **kwargs): │
│ ❱ 69 │ │ super().__init__(color=color, **kwargs) │
│ 70 │ │ │
│ 71 │ │ for vertices in vertex_groups: │
│ 72 │ │ │ first_vertex, *vertices = vertices │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\types\vectorized_mobject.py:125 in __init__ │
│ │
│ 122 │ │ self.shade_in_3d = shade_in_3d │
│ 123 │ │ self.tolerance_for_point_equality = tolerance_for_point_equality │
│ 124 │ │ self.n_points_per_cubic_curve = n_points_per_cubic_curve │
│ ❱ 125 │ │ super().__init__(**kwargs) │
│ 126 │ │ │
│ 127 │ │ if fill_color: │
│ 128 │ │ │ self.fill_color = fill_color │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
TypeError: Mobject.__init__() got an unexpected keyword argument 'line_join'
```
</details>
<details><summary>CMD output</summary>
Traceback (most recent call last):
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\__init__.py", line 24, in <module>
from . import multiarray
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\multiarray.py", line 10, in <module>
from . import overrides
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\overrides.py", line 8, in <module>
from numpy.core._multiarray_umath import (
ModuleNotFoundError: No module named 'numpy.core._multiarray_umath'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\__init__.py", line 158, in <module>
from numpy.__config__ import show as show_config
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\__config__.py", line 4, in <module>
from numpy.core._multiarray_umath import (
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\__init__.py", line 50, in <module>
raise ImportError(msg)
ImportError:
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python3.11 from "C:\Users\HP\Documents\ManimCE\mce\Scripts\python.exe"
* The NumPy version is: "1.26.0"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: No module named 'numpy.core._multiarray_umath'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<frozen runpy>", line 198, in _run_module_as_main
File "<frozen runpy>", line 88, in _run_code
File "C:\Users\HP\Documents\ManimCE\mce\Scripts\manim.exe\__main__.py", line 4, in <module>
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\manim\__init__.py", line 17, in <module>
from ._config import *
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\manim\_config\__init__.py", line 10, in <module>
from .utils import ManimConfig, ManimFrame, make_config_parser
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\manim\_config\utils.py", line 27, in <module>
import numpy as np
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\__init__.py", line 163, in <module>
raise ImportError(msg) from e
ImportError: Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there.
</details>
## System specifications
<details><summary>System Details</summary>
- OS Windows 10
- Python version (3.11.5)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `example_scenes/basic.py`
Content:
```
1 #!/usr/bin/env python
2
3
4 from manim import *
5
6 # To watch one of these scenes, run the following:
7 # python --quality m manim -p example_scenes.py SquareToCircle
8 #
9 # Use the flag --quality l for a faster rendering at a lower quality.
10 # Use -s to skip to the end and just save the final frame
11 # Use the -p to have preview of the animation (or image, if -s was
12 # used) pop up once done.
13 # Use -n <number> to skip ahead to the nth animation of a scene.
14 # Use -r <number> to specify a resolution (for example, -r 1920,1080
15 # for a 1920x1080 video)
16
17
18 class OpeningManim(Scene):
19 def construct(self):
20 title = Tex(r"This is some \LaTeX")
21 basel = MathTex(r"\sum_{n=1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}")
22 VGroup(title, basel).arrange(DOWN)
23 self.play(
24 Write(title),
25 FadeIn(basel, shift=DOWN),
26 )
27 self.wait()
28
29 transform_title = Tex("That was a transform")
30 transform_title.to_corner(UP + LEFT)
31 self.play(
32 Transform(title, transform_title),
33 LaggedStart(*(FadeOut(obj, shift=DOWN) for obj in basel)),
34 )
35 self.wait()
36
37 grid = NumberPlane()
38 grid_title = Tex("This is a grid", font_size=72)
39 grid_title.move_to(transform_title)
40
41 self.add(grid, grid_title) # Make sure title is on top of grid
42 self.play(
43 FadeOut(title),
44 FadeIn(grid_title, shift=UP),
45 Create(grid, run_time=3, lag_ratio=0.1),
46 )
47 self.wait()
48
49 grid_transform_title = Tex(
50 r"That was a non-linear function \\ applied to the grid",
51 )
52 grid_transform_title.move_to(grid_title, UL)
53 grid.prepare_for_nonlinear_transform()
54 self.play(
55 grid.animate.apply_function(
56 lambda p: p
57 + np.array(
58 [
59 np.sin(p[1]),
60 np.sin(p[0]),
61 0,
62 ],
63 ),
64 ),
65 run_time=3,
66 )
67 self.wait()
68 self.play(Transform(grid_title, grid_transform_title))
69 self.wait()
70
71
72 class SquareToCircle(Scene):
73 def construct(self):
74 circle = Circle()
75 square = Square()
76 square.flip(RIGHT)
77 square.rotate(-3 * TAU / 8)
78 circle.set_fill(PINK, opacity=0.5)
79
80 self.play(Create(square))
81 self.play(Transform(square, circle))
82 self.play(FadeOut(square))
83
84
85 class WarpSquare(Scene):
86 def construct(self):
87 square = Square()
88 self.play(
89 ApplyPointwiseFunction(
90 lambda point: complex_to_R3(np.exp(R3_to_complex(point))),
91 square,
92 ),
93 )
94 self.wait()
95
96
97 class WriteStuff(Scene):
98 def construct(self):
99 example_text = Tex("This is a some text", tex_to_color_map={"text": YELLOW})
100 example_tex = MathTex(
101 "\\sum_{k=1}^\\infty {1 \\over k^2} = {\\pi^2 \\over 6}",
102 )
103 group = VGroup(example_text, example_tex)
104 group.arrange(DOWN)
105 group.width = config["frame_width"] - 2 * LARGE_BUFF
106
107 self.play(Write(example_text))
108 self.play(Write(example_tex))
109 self.wait()
110
111
112 class UpdatersExample(Scene):
113 def construct(self):
114 decimal = DecimalNumber(
115 0,
116 show_ellipsis=True,
117 num_decimal_places=3,
118 include_sign=True,
119 )
120 square = Square().to_edge(UP)
121
122 decimal.add_updater(lambda d: d.next_to(square, RIGHT))
123 decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))
124 self.add(square, decimal)
125 self.play(
126 square.animate.to_edge(DOWN),
127 rate_func=there_and_back,
128 run_time=5,
129 )
130 self.wait()
131
132
133 class SpiralInExample(Scene):
134 def construct(self):
135 logo_green = "#81b29a"
136 logo_blue = "#454866"
137 logo_red = "#e07a5f"
138
139 font_color = "#ece6e2"
140
141 pi = MathTex(r"\pi").scale(7).set_color(font_color)
142 pi.shift(2.25 * LEFT + 1.5 * UP)
143
144 circle = Circle(color=logo_green, fill_opacity=0.7, stroke_width=0).shift(LEFT)
145 square = Square(color=logo_blue, fill_opacity=0.8, stroke_width=0).shift(UP)
146 triangle = Triangle(color=logo_red, fill_opacity=0.9, stroke_width=0).shift(
147 RIGHT
148 )
149 pentagon = Polygon(
150 *[
151 [np.cos(2 * np.pi / 5 * i), np.sin(2 * np.pi / 5 * i), 0]
152 for i in range(5)
153 ],
154 color=PURPLE_B,
155 fill_opacity=1,
156 stroke_width=0
157 ).shift(UP + 2 * RIGHT)
158 shapes = VGroup(triangle, square, circle, pentagon, pi)
159 self.play(SpiralIn(shapes, fade_in_fraction=0.9))
160 self.wait()
161 self.play(FadeOut(shapes))
162
163
164 Triangle.set_default(stroke_width=20)
165
166
167 class LineJoints(Scene):
168 def construct(self):
169 t1 = Triangle()
170 t2 = Triangle(line_join=LineJointType.ROUND)
171 t3 = Triangle(line_join=LineJointType.BEVEL)
172
173 grp = VGroup(t1, t2, t3).arrange(RIGHT)
174 grp.set(width=config.frame_width - 1)
175
176 self.add(grp)
177
178
179 # See many more examples at https://docs.manim.community/en/stable/examples.html
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/example_scenes/basic.py b/example_scenes/basic.py
--- a/example_scenes/basic.py
+++ b/example_scenes/basic.py
@@ -167,8 +167,8 @@
class LineJoints(Scene):
def construct(self):
t1 = Triangle()
- t2 = Triangle(line_join=LineJointType.ROUND)
- t3 = Triangle(line_join=LineJointType.BEVEL)
+ t2 = Triangle(joint_type=LineJointType.ROUND)
+ t3 = Triangle(joint_type=LineJointType.BEVEL)
grp = VGroup(t1, t2, t3).arrange(RIGHT)
grp.set(width=config.frame_width - 1)
| {"golden_diff": "diff --git a/example_scenes/basic.py b/example_scenes/basic.py\n--- a/example_scenes/basic.py\n+++ b/example_scenes/basic.py\n@@ -167,8 +167,8 @@\n class LineJoints(Scene):\n def construct(self):\n t1 = Triangle()\n- t2 = Triangle(line_join=LineJointType.ROUND)\n- t3 = Triangle(line_join=LineJointType.BEVEL)\n+ t2 = Triangle(joint_type=LineJointType.ROUND)\n+ t3 = Triangle(joint_type=LineJointType.BEVEL)\n \n grp = VGroup(t1, t2, t3).arrange(RIGHT)\n grp.set(width=config.frame_width - 1)\n", "issue": "keyword argument 'line_join'\n## Description of bug / unexpected behavior\r\nWhen I rendering the line joint scene from basic.py from the example scene it shows Mobject.__init__() got an unexpected keyword argument 'line_join'\r\n\r\n## How to reproduce the issue\r\n<!-- Provide a piece of code illustrating the undesired behavior. -->\r\n\r\n<details><summary>Code for reproducing the problem</summary>\r\n\r\n```py\r\nclass LineJoints(Scene):\r\n def construct(self):\r\n t1 = Triangle()\r\n t2 = Triangle(line_join=LineJointType.ROUND)\r\n t3 = Triangle(line_join=LineJointType.BEVEL)\r\n\r\n grp = VGroup(t1, t2, t3).arrange(RIGHT)\r\n grp.set(width=config.frame_width - 1)\r\n\r\n self.add(grp)\r\n```\r\n\r\n</details>\r\n\r\n## Logs\r\n<details><summary>Virtual Code Studio output</summary>\r\n<!-- Add \"-v DEBUG\" when calling manim to generate more detailed logs -->\r\n\r\n```\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Traceback (most recent call last) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 C:\\tools\\Manim\\Lib\\site-packages\\manim\\cli\\render\\commands.py:115 in render \u2502\r\n\u2502 \u2502\r\n\u2502 112 \u2502 \u2502 \u2502 try: \u2502\r\n\u2502 113 \u2502 \u2502 \u2502 \u2502 with tempconfig({}): \u2502\r\n\u2502 114 \u2502 \u2502 \u2502 \u2502 \u2502 scene = SceneClass() \u2502\r\n\u2502 \u2771 115 \u2502 \u2502 \u2502 \u2502 \u2502 scene.render() \u2502\r\n\u2502 116 \u2502 \u2502 \u2502 except Exception: \u2502\r\n\u2502 117 \u2502 \u2502 \u2502 \u2502 error_console.print_exception() \u2502\r\n\u2502 118 \u2502 \u2502 \u2502 \u2502 sys.exit(1) \u2502\r\n\u2502 \u2502\r\n\u2502 C:\\tools\\Manim\\Lib\\site-packages\\manim\\scene\\scene.py:223 in render \u2502\r\n\u2502 \u2502\r\n\u2502 220 \u2502 \u2502 \"\"\" \u2502\r\n\u2502 221 \u2502 \u2502 self.setup() \u2502\r\n\u2502 222 \u2502 \u2502 try: \u2502\r\n\u2502 \u2771 223 \u2502 \u2502 \u2502 self.construct() \u2502\r\n\u2502 224 \u2502 \u2502 except EndSceneEarlyException: \u2502\r\n\u2502 225 \u2502 \u2502 \u2502 pass \u2502\r\n\u2502 226 \u2502 \u2502 except RerunSceneException as e: \u2502\r\n\u2502 \u2502\r\n\u2502 C:\\Users\\HP\\Documents\\ManimCE\\basic.py:170 in construct \u2502\r\n\u2502 \u2502\r\n\u2502 167 class LineJoints(Scene): \u2502\r\n\u2502 168 \u2502 def construct(self): \u2502\r\n\u2502 169 \u2502 \u2502 t1 = Triangle() \u2502\r\n\u2502 \u2771 170 \u2502 \u2502 t2 = Triangle(line_join=LineJointType.ROUND) \u2502\r\n\u2502 171 \u2502 \u2502 t3 = Triangle(line_join=LineJointType.BEVEL) \u2502\r\n\u2502 172 \u2502 \u2502 \u2502\r\n\u2502 173 \u2502 \u2502 grp = VGroup(t1, t2, t3).arrange(RIGHT) \u2502\r\n\u2502 \u2502\r\n\u2502 C:\\tools\\Manim\\Lib\\site-packages\\manim\\mobject\\geometry\\polygram.py:559 in __init__ \u2502\r\n\u2502 \u2502\r\n\u2502 556 \u2502 \"\"\" \u2502\r\n\u2502 557 \u2502 \u2502\r\n\u2502 558 \u2502 def __init__(self, **kwargs): \u2502\r\n\u2502 \u2771 559 \u2502 \u2502 super().__init__(n=3, **kwargs) \u2502\r\n\u2502 560 \u2502\r\n\u2502 561 \u2502\r\n\u2502 562 class Rectangle(Polygon): \u2502\r\n\u2502 \u2502\r\n\u2502 C:\\tools\\Manim\\Lib\\site-packages\\manim\\mobject\\geometry\\polygram.py:428 in __init__ \u2502\r\n\u2502 \u2502\r\n\u2502 425 \u2502 \"\"\" \u2502\r\n\u2502 426 \u2502 \u2502\r\n\u2502 427 \u2502 def __init__(self, n: int = 6, **kwargs): \u2502\r\n\u2502 \u2771 428 \u2502 \u2502 super().__init__(n, density=1, **kwargs) \u2502\r\n\u2502 429 \u2502\r\n\u2502 430 \u2502\r\n\u2502 431 class Star(Polygon): \u2502\r\n\u2502 \u2502\r\n\u2502 C:\\tools\\Manim\\Lib\\site-packages\\manim\\mobject\\geometry\\polygram.py:399 in __init__ \u2502\r\n\u2502 \u2502\r\n\u2502 396 \u2502 \u2502 \u2502 \u2502\r\n\u2502 397 \u2502 \u2502 \u2502 vertex_groups.append(group) \u2502\r\n\u2502 398 \u2502 \u2502 \u2502\r\n\u2502 \u2771 399 \u2502 \u2502 super().__init__(*vertex_groups, **kwargs) \u2502\r\n\u2502 400 \u2502\r\n\u2502 401 \u2502\r\n\u2502 402 class RegularPolygon(RegularPolygram): \u2502\r\n\u2502 \u2502\r\n\u2502 C:\\tools\\Manim\\Lib\\site-packages\\manim\\mobject\\geometry\\polygram.py:69 in __init__ \u2502\r\n\u2502 \u2502\r\n\u2502 66 \u2502 \"\"\" \u2502\r\n\u2502 67 \u2502 \u2502\r\n\u2502 68 \u2502 def __init__(self, *vertex_groups: Iterable[Sequence[float]], color=BLUE, **kwargs): \u2502\r\n\u2502 \u2771 69 \u2502 \u2502 super().__init__(color=color, **kwargs) \u2502\r\n\u2502 70 \u2502 \u2502 \u2502\r\n\u2502 71 \u2502 \u2502 for vertices in vertex_groups: \u2502\r\n\u2502 72 \u2502 \u2502 \u2502 first_vertex, *vertices = vertices \u2502\r\n\u2502 \u2502\r\n\u2502 C:\\tools\\Manim\\Lib\\site-packages\\manim\\mobject\\types\\vectorized_mobject.py:125 in __init__ \u2502\r\n\u2502 \u2502\r\n\u2502 122 \u2502 \u2502 self.shade_in_3d = shade_in_3d \u2502\r\n\u2502 123 \u2502 \u2502 self.tolerance_for_point_equality = tolerance_for_point_equality \u2502\r\n\u2502 124 \u2502 \u2502 self.n_points_per_cubic_curve = n_points_per_cubic_curve \u2502\r\n\u2502 \u2771 125 \u2502 \u2502 super().__init__(**kwargs) \u2502\r\n\u2502 126 \u2502 \u2502 \u2502\r\n\u2502 127 \u2502 \u2502 if fill_color: \u2502\r\n\u2502 128 \u2502 \u2502 \u2502 self.fill_color = fill_color \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nTypeError: Mobject.__init__() got an unexpected keyword argument 'line_join'\r\n```\r\n</details>\r\n\r\n<details><summary>CMD output</summary>\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\numpy\\core\\__init__.py\", line 24, in <module>\r\n from . import multiarray\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\numpy\\core\\multiarray.py\", line 10, in <module>\r\n from . import overrides\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\numpy\\core\\overrides.py\", line 8, in <module>\r\n from numpy.core._multiarray_umath import (\r\nModuleNotFoundError: No module named 'numpy.core._multiarray_umath'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\numpy\\__init__.py\", line 158, in <module>\r\n from numpy.__config__ import show as show_config\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\numpy\\__config__.py\", line 4, in <module>\r\n from numpy.core._multiarray_umath import (\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\numpy\\core\\__init__.py\", line 50, in <module>\r\n raise ImportError(msg)\r\nImportError:\r\n\r\nIMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!\r\n\r\nImporting the numpy C-extensions failed. This error can happen for\r\nmany reasons, often due to issues with your setup or how NumPy was\r\ninstalled.\r\n\r\nWe have compiled some common reasons and troubleshooting tips at:\r\n\r\n https://numpy.org/devdocs/user/troubleshooting-importerror.html\r\n\r\nPlease note and check the following:\r\n\r\n * The Python version is: Python3.11 from \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Scripts\\python.exe\"\r\n * The NumPy version is: \"1.26.0\"\r\n\r\nand make sure that they are the versions you expect.\r\nPlease carefully study the documentation linked above for further help.\r\n\r\nOriginal error was: No module named 'numpy.core._multiarray_umath'\r\n\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"<frozen runpy>\", line 198, in _run_module_as_main\r\n File \"<frozen runpy>\", line 88, in _run_code\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Scripts\\manim.exe\\__main__.py\", line 4, in <module>\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\manim\\__init__.py\", line 17, in <module>\r\n from ._config import *\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\manim\\_config\\__init__.py\", line 10, in <module>\r\n from .utils import ManimConfig, ManimFrame, make_config_parser\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\manim\\_config\\utils.py\", line 27, in <module>\r\n import numpy as np\r\n File \"C:\\Users\\HP\\Documents\\ManimCE\\mce\\Lib\\site-packages\\numpy\\__init__.py\", line 163, in <module>\r\n raise ImportError(msg) from e\r\nImportError: Error importing numpy: you should not try to import numpy from\r\n its source directory; please exit the numpy source tree, and relaunch\r\n your python interpreter from there.\r\n</details>\r\n\r\n## System specifications\r\n\r\n<details><summary>System Details</summary>\r\n\r\n- OS Windows 10\r\n- Python version (3.11.5)\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\nfrom manim import *\n\n# To watch one of these scenes, run the following:\n# python --quality m manim -p example_scenes.py SquareToCircle\n#\n# Use the flag --quality l for a faster rendering at a lower quality.\n# Use -s to skip to the end and just save the final frame\n# Use the -p to have preview of the animation (or image, if -s was\n# used) pop up once done.\n# Use -n <number> to skip ahead to the nth animation of a scene.\n# Use -r <number> to specify a resolution (for example, -r 1920,1080\n# for a 1920x1080 video)\n\n\nclass OpeningManim(Scene):\n def construct(self):\n title = Tex(r\"This is some \\LaTeX\")\n basel = MathTex(r\"\\sum_{n=1}^\\infty \\frac{1}{n^2} = \\frac{\\pi^2}{6}\")\n VGroup(title, basel).arrange(DOWN)\n self.play(\n Write(title),\n FadeIn(basel, shift=DOWN),\n )\n self.wait()\n\n transform_title = Tex(\"That was a transform\")\n transform_title.to_corner(UP + LEFT)\n self.play(\n Transform(title, transform_title),\n LaggedStart(*(FadeOut(obj, shift=DOWN) for obj in basel)),\n )\n self.wait()\n\n grid = NumberPlane()\n grid_title = Tex(\"This is a grid\", font_size=72)\n grid_title.move_to(transform_title)\n\n self.add(grid, grid_title) # Make sure title is on top of grid\n self.play(\n FadeOut(title),\n FadeIn(grid_title, shift=UP),\n Create(grid, run_time=3, lag_ratio=0.1),\n )\n self.wait()\n\n grid_transform_title = Tex(\n r\"That was a non-linear function \\\\ applied to the grid\",\n )\n grid_transform_title.move_to(grid_title, UL)\n grid.prepare_for_nonlinear_transform()\n self.play(\n grid.animate.apply_function(\n lambda p: p\n + np.array(\n [\n np.sin(p[1]),\n np.sin(p[0]),\n 0,\n ],\n ),\n ),\n run_time=3,\n )\n self.wait()\n self.play(Transform(grid_title, grid_transform_title))\n self.wait()\n\n\nclass SquareToCircle(Scene):\n def construct(self):\n circle = Circle()\n square = Square()\n square.flip(RIGHT)\n square.rotate(-3 * TAU / 8)\n circle.set_fill(PINK, opacity=0.5)\n\n self.play(Create(square))\n self.play(Transform(square, circle))\n self.play(FadeOut(square))\n\n\nclass WarpSquare(Scene):\n def construct(self):\n square = Square()\n self.play(\n ApplyPointwiseFunction(\n lambda point: complex_to_R3(np.exp(R3_to_complex(point))),\n square,\n ),\n )\n self.wait()\n\n\nclass WriteStuff(Scene):\n def construct(self):\n example_text = Tex(\"This is a some text\", tex_to_color_map={\"text\": YELLOW})\n example_tex = MathTex(\n \"\\\\sum_{k=1}^\\\\infty {1 \\\\over k^2} = {\\\\pi^2 \\\\over 6}\",\n )\n group = VGroup(example_text, example_tex)\n group.arrange(DOWN)\n group.width = config[\"frame_width\"] - 2 * LARGE_BUFF\n\n self.play(Write(example_text))\n self.play(Write(example_tex))\n self.wait()\n\n\nclass UpdatersExample(Scene):\n def construct(self):\n decimal = DecimalNumber(\n 0,\n show_ellipsis=True,\n num_decimal_places=3,\n include_sign=True,\n )\n square = Square().to_edge(UP)\n\n decimal.add_updater(lambda d: d.next_to(square, RIGHT))\n decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))\n self.add(square, decimal)\n self.play(\n square.animate.to_edge(DOWN),\n rate_func=there_and_back,\n run_time=5,\n )\n self.wait()\n\n\nclass SpiralInExample(Scene):\n def construct(self):\n logo_green = \"#81b29a\"\n logo_blue = \"#454866\"\n logo_red = \"#e07a5f\"\n\n font_color = \"#ece6e2\"\n\n pi = MathTex(r\"\\pi\").scale(7).set_color(font_color)\n pi.shift(2.25 * LEFT + 1.5 * UP)\n\n circle = Circle(color=logo_green, fill_opacity=0.7, stroke_width=0).shift(LEFT)\n square = Square(color=logo_blue, fill_opacity=0.8, stroke_width=0).shift(UP)\n triangle = Triangle(color=logo_red, fill_opacity=0.9, stroke_width=0).shift(\n RIGHT\n )\n pentagon = Polygon(\n *[\n [np.cos(2 * np.pi / 5 * i), np.sin(2 * np.pi / 5 * i), 0]\n for i in range(5)\n ],\n color=PURPLE_B,\n fill_opacity=1,\n stroke_width=0\n ).shift(UP + 2 * RIGHT)\n shapes = VGroup(triangle, square, circle, pentagon, pi)\n self.play(SpiralIn(shapes, fade_in_fraction=0.9))\n self.wait()\n self.play(FadeOut(shapes))\n\n\nTriangle.set_default(stroke_width=20)\n\n\nclass LineJoints(Scene):\n def construct(self):\n t1 = Triangle()\n t2 = Triangle(line_join=LineJointType.ROUND)\n t3 = Triangle(line_join=LineJointType.BEVEL)\n\n grp = VGroup(t1, t2, t3).arrange(RIGHT)\n grp.set(width=config.frame_width - 1)\n\n self.add(grp)\n\n\n# See many more examples at https://docs.manim.community/en/stable/examples.html\n", "path": "example_scenes/basic.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\nfrom manim import *\n\n# To watch one of these scenes, run the following:\n# python --quality m manim -p example_scenes.py SquareToCircle\n#\n# Use the flag --quality l for a faster rendering at a lower quality.\n# Use -s to skip to the end and just save the final frame\n# Use the -p to have preview of the animation (or image, if -s was\n# used) pop up once done.\n# Use -n <number> to skip ahead to the nth animation of a scene.\n# Use -r <number> to specify a resolution (for example, -r 1920,1080\n# for a 1920x1080 video)\n\n\nclass OpeningManim(Scene):\n def construct(self):\n title = Tex(r\"This is some \\LaTeX\")\n basel = MathTex(r\"\\sum_{n=1}^\\infty \\frac{1}{n^2} = \\frac{\\pi^2}{6}\")\n VGroup(title, basel).arrange(DOWN)\n self.play(\n Write(title),\n FadeIn(basel, shift=DOWN),\n )\n self.wait()\n\n transform_title = Tex(\"That was a transform\")\n transform_title.to_corner(UP + LEFT)\n self.play(\n Transform(title, transform_title),\n LaggedStart(*(FadeOut(obj, shift=DOWN) for obj in basel)),\n )\n self.wait()\n\n grid = NumberPlane()\n grid_title = Tex(\"This is a grid\", font_size=72)\n grid_title.move_to(transform_title)\n\n self.add(grid, grid_title) # Make sure title is on top of grid\n self.play(\n FadeOut(title),\n FadeIn(grid_title, shift=UP),\n Create(grid, run_time=3, lag_ratio=0.1),\n )\n self.wait()\n\n grid_transform_title = Tex(\n r\"That was a non-linear function \\\\ applied to the grid\",\n )\n grid_transform_title.move_to(grid_title, UL)\n grid.prepare_for_nonlinear_transform()\n self.play(\n grid.animate.apply_function(\n lambda p: p\n + np.array(\n [\n np.sin(p[1]),\n np.sin(p[0]),\n 0,\n ],\n ),\n ),\n run_time=3,\n )\n self.wait()\n self.play(Transform(grid_title, grid_transform_title))\n self.wait()\n\n\nclass SquareToCircle(Scene):\n def construct(self):\n circle = Circle()\n square = Square()\n square.flip(RIGHT)\n square.rotate(-3 * TAU / 8)\n circle.set_fill(PINK, opacity=0.5)\n\n self.play(Create(square))\n self.play(Transform(square, circle))\n self.play(FadeOut(square))\n\n\nclass WarpSquare(Scene):\n def construct(self):\n square = Square()\n self.play(\n ApplyPointwiseFunction(\n lambda point: complex_to_R3(np.exp(R3_to_complex(point))),\n square,\n ),\n )\n self.wait()\n\n\nclass WriteStuff(Scene):\n def construct(self):\n example_text = Tex(\"This is a some text\", tex_to_color_map={\"text\": YELLOW})\n example_tex = MathTex(\n \"\\\\sum_{k=1}^\\\\infty {1 \\\\over k^2} = {\\\\pi^2 \\\\over 6}\",\n )\n group = VGroup(example_text, example_tex)\n group.arrange(DOWN)\n group.width = config[\"frame_width\"] - 2 * LARGE_BUFF\n\n self.play(Write(example_text))\n self.play(Write(example_tex))\n self.wait()\n\n\nclass UpdatersExample(Scene):\n def construct(self):\n decimal = DecimalNumber(\n 0,\n show_ellipsis=True,\n num_decimal_places=3,\n include_sign=True,\n )\n square = Square().to_edge(UP)\n\n decimal.add_updater(lambda d: d.next_to(square, RIGHT))\n decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))\n self.add(square, decimal)\n self.play(\n square.animate.to_edge(DOWN),\n rate_func=there_and_back,\n run_time=5,\n )\n self.wait()\n\n\nclass SpiralInExample(Scene):\n def construct(self):\n logo_green = \"#81b29a\"\n logo_blue = \"#454866\"\n logo_red = \"#e07a5f\"\n\n font_color = \"#ece6e2\"\n\n pi = MathTex(r\"\\pi\").scale(7).set_color(font_color)\n pi.shift(2.25 * LEFT + 1.5 * UP)\n\n circle = Circle(color=logo_green, fill_opacity=0.7, stroke_width=0).shift(LEFT)\n square = Square(color=logo_blue, fill_opacity=0.8, stroke_width=0).shift(UP)\n triangle = Triangle(color=logo_red, fill_opacity=0.9, stroke_width=0).shift(\n RIGHT\n )\n pentagon = Polygon(\n *[\n [np.cos(2 * np.pi / 5 * i), np.sin(2 * np.pi / 5 * i), 0]\n for i in range(5)\n ],\n color=PURPLE_B,\n fill_opacity=1,\n stroke_width=0\n ).shift(UP + 2 * RIGHT)\n shapes = VGroup(triangle, square, circle, pentagon, pi)\n self.play(SpiralIn(shapes, fade_in_fraction=0.9))\n self.wait()\n self.play(FadeOut(shapes))\n\n\nTriangle.set_default(stroke_width=20)\n\n\nclass LineJoints(Scene):\n def construct(self):\n t1 = Triangle()\n t2 = Triangle(joint_type=LineJointType.ROUND)\n t3 = Triangle(joint_type=LineJointType.BEVEL)\n\n grp = VGroup(t1, t2, t3).arrange(RIGHT)\n grp.set(width=config.frame_width - 1)\n\n self.add(grp)\n\n\n# See many more examples at https://docs.manim.community/en/stable/examples.html\n", "path": "example_scenes/basic.py"}]} |
gh_patches_debug_1479 | rasdani/github-patches | git_diff | deepchecks__deepchecks-728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] resources/suite_output.html file is missing when installing not via git
**Describe the bug**
can't use save_as_html because suite_output.html file is missing
**To Reproduce**
pip install deepchecks
suite_result.save_as_html()
**Expected behavior**
save as html
**Environment (please complete the following information):**
- OS: linux
- Python Version: 3.7
- Deepchecks Version: 0.3.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """
12
13 |build| |Documentation Status| |pkgVersion| |pyVersions|
14 |Maintainability| |Coverage Status|
15
16 .. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png
17 :target: https://github.com/deepchecks/deepchecks
18
19 Deepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.
20 This includes checks related to various types of issues, such as model performance, data integrity,
21 distribution mismatches, and more.
22
23 What Do You Need in Order to Start Validating?
24 ----------------------------------------------
25
26 Depending on your phase and what you wise to validate, you'll need a
27 subset of the following:
28
29 - Raw data (before pre-processing such as OHE, string processing,
30 etc.), with optional labels
31
32 - The model's training data with labels
33
34 - Test data (which the model isn't exposed to) with labels
35
36 - A model compatible with scikit-learn API that you wish to validate
37 (e.g. RandomForest, XGBoost)
38
39 Deepchecks validation accompanies you from the initial phase when you
40 have only raw data, through the data splits, and to the final stage of
41 having a trained model that you wish to evaluate. Accordingly, each
42 phase requires different assets for the validation. See more about
43 typical usage scenarios and the built-in suites in the
44 `docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.
45
46 Installation
47 ------------
48
49 Using pip
50 ~~~~~~~~~
51
52 .. code:: bash
53
54 pip install deepchecks #--upgrade --user
55
56 Using conda
57 ~~~~~~~~~~~
58
59 .. code:: bash
60
61 conda install -c deepchecks deepchecks
62
63 .. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg
64 .. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest
65 :target: https://docs.deepchecks.com/en/latest/?badge=latest
66 .. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks
67 .. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks
68 .. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability
69 :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability
70 .. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main
71 :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main
72
73 """
74
75 import setuptools
76 from setuptools import setup
77 from distutils.util import convert_path
78 import os
79
80 main_ns = {}
81 DOCLINES = (__doc__ or '').split("\n")
82
83 with open(os.path.join('./', 'VERSION')) as version_file:
84 VER = version_file.read().strip()
85
86 requirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'
87 install_requires = []
88 if os.path.isfile(requirementPath):
89 with open(requirementPath) as f:
90 install_requires = f.read().splitlines()
91
92
93
94
95 setup(
96 name='deepchecks',
97 version=VER,
98 packages=setuptools.find_packages(),
99 install_requires=install_requires,
100 license_files = ('LICENSE', ),
101 description = DOCLINES[0],
102 long_description="\n".join(DOCLINES[2:]),
103 author = 'deepchecks',
104 author_email = '[email protected]',
105 url = 'https://github.com/deepchecks/deepchecks',
106 download_url = "https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz".format(VER),
107 keywords = ['Software Development', 'Machine Learning'],
108 include_package_data=True,
109 classifiers = [
110 'Intended Audience :: Developers',
111 'Intended Audience :: Science/Research',
112 'Topic :: Software Development',
113 'Topic :: Scientific/Engineering',
114 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
115 'Programming Language :: Python :: 3',
116 'Programming Language :: Python :: 3.6',
117 'Programming Language :: Python :: 3.7',
118 'Programming Language :: Python :: 3.8',
119 'Programming Language :: Python :: 3.9',
120 'Programming Language :: Python :: 3.10',
121 ],
122 )
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,6 @@
import setuptools
from setuptools import setup
-from distutils.util import convert_path
import os
main_ns = {}
@@ -89,9 +88,6 @@
with open(requirementPath) as f:
install_requires = f.read().splitlines()
-
-
-
setup(
name='deepchecks',
version=VER,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,7 +74,6 @@\n \n import setuptools\n from setuptools import setup\n-from distutils.util import convert_path\n import os\n \n main_ns = {}\n@@ -89,9 +88,6 @@\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n \n-\n-\n-\n setup(\n name='deepchecks',\n version=VER,\n", "issue": "[BUG] resources/suite_output.html file is missing when installing not via git\n**Describe the bug**\r\ncan't use save_as_html because suite_output.html file is missing\r\n\r\n**To Reproduce**\r\npip install deepchecks\r\nsuite_result.save_as_html()\r\n\r\n**Expected behavior**\r\nsave as html\r\n\r\n**Environment (please complete the following information):**\r\n - OS: linux\r\n - Python Version: 3.7\r\n - Deepchecks Version: 0.3.1\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nfrom distutils.util import convert_path\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\n\n\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1480 | rasdani/github-patches | git_diff | zostera__django-bootstrap3-90 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Special display of required fields
It would be nice if there was some way to render differently required fields, like with boldface, or with and asterisk.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bootstrap3/forms.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.contrib.admin.widgets import AdminFileWidget
4 from django.forms import HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput, RadioSelect, \
5 CheckboxInput, ClearableFileInput
6 from django.forms.extras import SelectDateWidget
7 from django.forms.forms import BaseForm, BoundField
8 from django.forms.formsets import BaseFormSet
9 from django.utils.encoding import force_text
10 from django.utils.html import conditional_escape, strip_tags
11
12 from .bootstrap import get_bootstrap_setting
13 from .text import text_concat
14 from .exceptions import BootstrapError
15 from .html import add_css_class, render_tag
16 from .icons import render_icon
17
18
19 FORM_GROUP_CLASS = 'form-group'
20
21
22 def render_formset(formset, **kwargs):
23 """
24 Render a formset to a Bootstrap layout
25 """
26 if not isinstance(formset, BaseFormSet):
27 raise BootstrapError('Parameter "formset" should contain a valid Django FormSet.')
28 forms = [render_form(f, **kwargs) for f in formset]
29 return force_text(formset.management_form) + '\n' + '\n'.join(forms)
30
31
32 def render_form(form, layout='', form_group_class=FORM_GROUP_CLASS, field_class='', label_class='', show_help=True,
33 exclude='', set_required=True):
34 """
35 Render a formset to a Bootstrap layout
36 """
37 if not isinstance(form, BaseForm):
38 raise BootstrapError('Parameter "form" should contain a valid Django Form.')
39 html = ''
40 errors = []
41 fields = []
42 for field in form:
43 fields.append(render_field(
44 field,
45 layout=layout,
46 form_group_class=form_group_class,
47 field_class=field_class,
48 label_class=label_class,
49 show_help=show_help,
50 exclude=exclude,
51 set_required=set_required,
52 ))
53 if field.is_hidden and field.errors:
54 errors += field.errors
55 errors += form.non_field_errors()
56 if errors:
57 html += '''<div class="alert alert-danger alert-dismissable alert-link">
58 <button class=close data-dismiss=alert aria-hidden=true>
59 ×</button>{errors}</div>\n
60 '''.format(errors='\n'.join(['<p>{e}</p>'.format(e=e) for e in errors]))
61 return html + '\n'.join(fields)
62
63
64 def render_field(field, layout='', form_group_class=FORM_GROUP_CLASS,
65 field_class=None, label_class=None, show_label=True,
66 show_help=True, exclude='', set_required=True):
67 """
68 Render a formset to a Bootstrap layout
69 """
70 if not isinstance(field, BoundField):
71 raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
72 # See if we're not excluded
73 if field.name in exclude.replace(' ', '').split(','):
74 return ''
75 # Hidden input requires no special treatment
76 if field.is_hidden:
77 return force_text(field)
78 # Shortcut to widget
79 widget = field.field.widget
80 # Read widgets attributes
81 widget_attrs = {
82 'class': widget.attrs.get('class', ''),
83 'placeholder': widget.attrs.get('placeholder', ''),
84 'title': widget.attrs.get('title', ''),
85 }
86 # Class to add to field element
87 if isinstance(widget, FileInput):
88 form_control_class = ''
89 else:
90 form_control_class = 'form-control'
91 # Optional extra rendering
92 after_render = None
93 # Wrap rendered field in its own label?
94 put_inside_label = False
95 # Wrapper for the final result (should contain {content} if not empty)
96 wrapper = ''
97
98 # Adjust workings for various widget types
99 if isinstance(field.field.widget, CheckboxInput):
100 form_control_class = ''
101 put_inside_label = True
102 wrapper = '<div class="checkbox">{content}</div>'
103 elif isinstance(widget, RadioSelect):
104 form_control_class = ''
105 after_render = list_to_class('radio')
106 elif isinstance(widget, CheckboxSelectMultiple):
107 form_control_class = ''
108 after_render = list_to_class('checkbox')
109 elif isinstance(widget, SelectDateWidget):
110 after_render = fix_date_select_input
111 elif isinstance(widget, ClearableFileInput):
112 after_render = fix_clearable_file_input
113
114 # Get help text
115 field_help = force_text(field.help_text) if show_help and field.help_text else ''
116 # Get errors
117 field_errors = [conditional_escape(force_text(error)) for error in field.errors]
118 # Temporarily adjust widget attributes if necessary
119 if form_control_class:
120 widget.attrs['class'] = add_css_class(widget_attrs['class'], form_control_class)
121 if is_widget_with_placeholder(widget) and field.label and not put_inside_label and not widget_attrs['placeholder']:
122 widget.attrs['placeholder'] = field.label
123 if field_help and not put_inside_label and not widget_attrs['title']:
124 widget.attrs['title'] = strip_tags(field_help)
125 if layout == 'inline' and field_errors:
126 field_title = widget.attrs.get('title', '')
127 field_title += ' ' + ' '.join([strip_tags(e) for e in field_errors])
128 widget.attrs['title'] = field_title.strip()
129 # Set required attribute
130 if set_required and is_widget_required_attribute(widget):
131 widget.attrs['required'] = 'required'
132 # Render the field
133 rendered_field = field.as_widget(attrs=widget.attrs)
134 # Apply the post_processor
135 if after_render:
136 rendered_field = after_render(rendered_field)
137 # Return changed attributes to original settings
138 for attr in widget_attrs:
139 widget.attrs[attr] = widget_attrs[attr]
140 # Wrap the rendered field in its label if necessary
141 if put_inside_label:
142 rendered_field = render_label(
143 content='{field} {label}'.format(field=rendered_field, label=field.label),
144 label_title=field.help_text
145 )
146 # Add any help text and/or errors
147 if layout != 'inline':
148 help_text_and_errors = [field_help] + field_errors
149 if help_text_and_errors:
150 help_html = ' '.join([h for h in help_text_and_errors if h])
151 rendered_field += '<span class=help-block>{help}</span>'.format(help=help_html)
152 # Wrap the rendered field
153 if wrapper:
154 rendered_field = wrapper.format(content=rendered_field)
155 # Prepare label
156 label = field.label
157 if put_inside_label:
158 label = None
159 if layout == 'inline' or not show_label:
160 label_class = add_css_class(label_class, 'sr-only')
161 # Render label and field
162 content = render_field_and_label(
163 field=rendered_field,
164 label=label,
165 field_class=field_class,
166 label_class=label_class,
167 layout=layout,
168 )
169 # Return combined content, wrapped in form control
170 if field.errors:
171 form_group_class = add_css_class(form_group_class, 'has-error')
172 elif field.form.is_bound:
173 form_group_class = add_css_class(form_group_class, 'has-success')
174
175 return render_form_group(content, form_group_class)
176
177
178 def render_label(content, label_for=None, label_class=None, label_title=''):
179 """
180 Render a label with content
181 """
182 attrs = {}
183 if label_for:
184 attrs['for'] = label_for
185 if label_class:
186 attrs['class'] = label_class
187 if label_title:
188 attrs['title'] = label_title
189 return render_tag('label', attrs=attrs, content=content)
190
191
192 def render_button(content, button_type=None, icon=None):
193 attrs = {'class': 'btn'}
194 if button_type:
195 if button_type == 'submit':
196 attrs['class'] += ' btn-primary'
197 elif button_type != 'reset' and button_type != 'button':
198 raise BootstrapError('Parameter "button_type" should be "submit", "reset", "button" or empty.')
199 attrs['type'] = button_type
200 icon_content = render_icon(icon) if icon else ''
201 return render_tag('button', attrs=attrs, content=text_concat(icon_content, content, separator=' '))
202
203
204 def render_field_and_label(field, label, field_class='', label_class='', layout='', **kwargs):
205 # Default settings for horizontal form
206 if layout == 'horizontal':
207 if not label_class:
208 label_class = get_bootstrap_setting('horizontal_label_class')
209 if not field_class:
210 field_class = get_bootstrap_setting('horizontal_field_class')
211 if not label:
212 label = ' '
213 label_class = add_css_class(label_class, 'control-label')
214 html = field
215 if field_class:
216 html = '<div class="{klass}">{html}</div>'.format(klass=field_class, html=html)
217 if label:
218 html = render_label(label, label_class=label_class) + html
219 return html
220
221
222 def render_form_group(content, css_class=FORM_GROUP_CLASS):
223 """
224 Render a Bootstrap form group
225 """
226 return '<div class="{klass}">{content}</div>'.format(
227 klass=css_class,
228 content=content,
229 )
230
231
232 def is_widget_required_attribute(widget):
233 """
234 Is this widget required?
235 """
236 if not get_bootstrap_setting('set_required'):
237 return False
238 if not widget.is_required:
239 return False
240 if isinstance(widget, (AdminFileWidget, HiddenInput, FileInput, CheckboxSelectMultiple)):
241 return False
242 return True
243
244
245 def is_widget_with_placeholder(widget):
246 """
247 Is this a widget that should have a placeholder?
248 Only text, search, url, tel, e-mail, password, number have placeholders
249 These are all derived form TextInput, except for Textarea
250 """
251 return isinstance(widget, (TextInput, Textarea))
252
253
254 def list_to_class(klass):
255 def fixer(html):
256 mapping = [
257 ('<ul', '<div'),
258 ('</ul>', '</div>'),
259 ('<li', '<div class="{klass}"'.format(klass=klass)),
260 ('</li>', '</div>'),
261 ]
262 for k, v in mapping:
263 html = html.replace(k, v)
264 return html
265
266 return fixer
267
268
269 def surround_with(html_with_content):
270 def wrapper(html):
271 return html_with_content.format(content=html)
272
273 return wrapper
274
275
276 def fix_date_select_input(html):
277 div1 = '<div class="col-xs-4">'
278 div2 = '</div>'
279 html = html.replace('<select', div1 + '<select')
280 html = html.replace('</select>', '</select>' + div2)
281 return '<div class="row bootstrap3-multi-input">' + html + '</div>'
282
283
284 def fix_clearable_file_input(html):
285 """
286 Fix a clearable file input
287 TODO: This needs improvement
288
289 Currently Django returns
290 Currently: <a href="dummy.txt">dummy.txt</a> <input id="file4-clear_id" name="file4-clear" type="checkbox" /> <label for="file4-clear_id">Clear</label><br />Change: <input id="id_file4" name="file4" type="file" /><span class=help-block></span></div>
291
292 """
293 # TODO This needs improvement
294 return '<div class="row bootstrap3-multi-input"><div class="col-xs-12">' + html + '</div></div>'
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bootstrap3/forms.py b/bootstrap3/forms.py
--- a/bootstrap3/forms.py
+++ b/bootstrap3/forms.py
@@ -172,6 +172,12 @@
elif field.form.is_bound:
form_group_class = add_css_class(form_group_class, 'has-success')
+ # Required and optional classes to the form group
+ if field.field.required:
+ form_group_class = add_css_class(form_group_class, 'required')
+ else:
+ form_group_class = add_css_class(form_group_class, 'optional')
+
return render_form_group(content, form_group_class)
| {"golden_diff": "diff --git a/bootstrap3/forms.py b/bootstrap3/forms.py\n--- a/bootstrap3/forms.py\n+++ b/bootstrap3/forms.py\n@@ -172,6 +172,12 @@\n elif field.form.is_bound:\n form_group_class = add_css_class(form_group_class, 'has-success')\n \n+ # Required and optional classes to the form group\n+ if field.field.required:\n+ form_group_class = add_css_class(form_group_class, 'required')\n+ else:\n+ form_group_class = add_css_class(form_group_class, 'optional')\n+\n return render_form_group(content, form_group_class)\n", "issue": "Special display of required fields\nIt would be nice if there was some way to render differently required fields, like with boldface, or with and asterisk.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib.admin.widgets import AdminFileWidget\nfrom django.forms import HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput, RadioSelect, \\\n CheckboxInput, ClearableFileInput\nfrom django.forms.extras import SelectDateWidget\nfrom django.forms.forms import BaseForm, BoundField\nfrom django.forms.formsets import BaseFormSet\nfrom django.utils.encoding import force_text\nfrom django.utils.html import conditional_escape, strip_tags\n\nfrom .bootstrap import get_bootstrap_setting\nfrom .text import text_concat\nfrom .exceptions import BootstrapError\nfrom .html import add_css_class, render_tag\nfrom .icons import render_icon\n\n\nFORM_GROUP_CLASS = 'form-group'\n\n\ndef render_formset(formset, **kwargs):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(formset, BaseFormSet):\n raise BootstrapError('Parameter \"formset\" should contain a valid Django FormSet.')\n forms = [render_form(f, **kwargs) for f in formset]\n return force_text(formset.management_form) + '\\n' + '\\n'.join(forms)\n\n\ndef render_form(form, layout='', form_group_class=FORM_GROUP_CLASS, field_class='', label_class='', show_help=True,\n exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(form, BaseForm):\n raise BootstrapError('Parameter \"form\" should contain a valid Django Form.')\n html = ''\n errors = []\n fields = []\n for field in form:\n fields.append(render_field(\n field,\n layout=layout,\n form_group_class=form_group_class,\n field_class=field_class,\n label_class=label_class,\n show_help=show_help,\n exclude=exclude,\n set_required=set_required,\n ))\n if field.is_hidden and field.errors:\n errors += field.errors\n errors += form.non_field_errors()\n if errors:\n html += '''<div class=\"alert alert-danger alert-dismissable alert-link\">\n <button class=close data-dismiss=alert aria-hidden=true>\n ×</button>{errors}</div>\\n\n '''.format(errors='\\n'.join(['<p>{e}</p>'.format(e=e) for e in errors]))\n return html + '\\n'.join(fields)\n\n\ndef render_field(field, layout='', form_group_class=FORM_GROUP_CLASS,\n field_class=None, label_class=None, show_label=True,\n show_help=True, exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(field, BoundField):\n raise BootstrapError('Parameter \"field\" should contain a valid Django BoundField.')\n # See if we're not excluded\n if field.name in exclude.replace(' ', '').split(','):\n return ''\n # Hidden input requires no special treatment\n if field.is_hidden:\n return force_text(field)\n # Shortcut to widget\n widget = field.field.widget\n # Read widgets attributes\n widget_attrs = {\n 'class': widget.attrs.get('class', ''),\n 'placeholder': widget.attrs.get('placeholder', ''),\n 'title': widget.attrs.get('title', ''),\n }\n # Class to add to field element\n if isinstance(widget, FileInput):\n form_control_class = ''\n else:\n form_control_class = 'form-control'\n # Optional extra rendering\n after_render = None\n # Wrap rendered field in its own label?\n put_inside_label = False\n # Wrapper for the final result (should contain {content} if not empty)\n wrapper = ''\n\n # Adjust workings for various widget types\n if isinstance(field.field.widget, CheckboxInput):\n form_control_class = ''\n put_inside_label = True\n wrapper = '<div class=\"checkbox\">{content}</div>'\n elif isinstance(widget, RadioSelect):\n form_control_class = ''\n after_render = list_to_class('radio')\n elif isinstance(widget, CheckboxSelectMultiple):\n form_control_class = ''\n after_render = list_to_class('checkbox')\n elif isinstance(widget, SelectDateWidget):\n after_render = fix_date_select_input\n elif isinstance(widget, ClearableFileInput):\n after_render = fix_clearable_file_input\n\n # Get help text\n field_help = force_text(field.help_text) if show_help and field.help_text else ''\n # Get errors\n field_errors = [conditional_escape(force_text(error)) for error in field.errors]\n # Temporarily adjust widget attributes if necessary\n if form_control_class:\n widget.attrs['class'] = add_css_class(widget_attrs['class'], form_control_class)\n if is_widget_with_placeholder(widget) and field.label and not put_inside_label and not widget_attrs['placeholder']:\n widget.attrs['placeholder'] = field.label\n if field_help and not put_inside_label and not widget_attrs['title']:\n widget.attrs['title'] = strip_tags(field_help)\n if layout == 'inline' and field_errors:\n field_title = widget.attrs.get('title', '')\n field_title += ' ' + ' '.join([strip_tags(e) for e in field_errors])\n widget.attrs['title'] = field_title.strip()\n # Set required attribute\n if set_required and is_widget_required_attribute(widget):\n widget.attrs['required'] = 'required'\n # Render the field\n rendered_field = field.as_widget(attrs=widget.attrs)\n # Apply the post_processor\n if after_render:\n rendered_field = after_render(rendered_field)\n # Return changed attributes to original settings\n for attr in widget_attrs:\n widget.attrs[attr] = widget_attrs[attr]\n # Wrap the rendered field in its label if necessary\n if put_inside_label:\n rendered_field = render_label(\n content='{field} {label}'.format(field=rendered_field, label=field.label),\n label_title=field.help_text\n )\n # Add any help text and/or errors\n if layout != 'inline':\n help_text_and_errors = [field_help] + field_errors\n if help_text_and_errors:\n help_html = ' '.join([h for h in help_text_and_errors if h])\n rendered_field += '<span class=help-block>{help}</span>'.format(help=help_html)\n # Wrap the rendered field\n if wrapper:\n rendered_field = wrapper.format(content=rendered_field)\n # Prepare label\n label = field.label\n if put_inside_label:\n label = None\n if layout == 'inline' or not show_label:\n label_class = add_css_class(label_class, 'sr-only')\n # Render label and field\n content = render_field_and_label(\n field=rendered_field,\n label=label,\n field_class=field_class,\n label_class=label_class,\n layout=layout,\n )\n # Return combined content, wrapped in form control\n if field.errors:\n form_group_class = add_css_class(form_group_class, 'has-error')\n elif field.form.is_bound:\n form_group_class = add_css_class(form_group_class, 'has-success')\n\n return render_form_group(content, form_group_class)\n\n\ndef render_label(content, label_for=None, label_class=None, label_title=''):\n \"\"\"\n Render a label with content\n \"\"\"\n attrs = {}\n if label_for:\n attrs['for'] = label_for\n if label_class:\n attrs['class'] = label_class\n if label_title:\n attrs['title'] = label_title\n return render_tag('label', attrs=attrs, content=content)\n\n\ndef render_button(content, button_type=None, icon=None):\n attrs = {'class': 'btn'}\n if button_type:\n if button_type == 'submit':\n attrs['class'] += ' btn-primary'\n elif button_type != 'reset' and button_type != 'button':\n raise BootstrapError('Parameter \"button_type\" should be \"submit\", \"reset\", \"button\" or empty.')\n attrs['type'] = button_type\n icon_content = render_icon(icon) if icon else ''\n return render_tag('button', attrs=attrs, content=text_concat(icon_content, content, separator=' '))\n\n\ndef render_field_and_label(field, label, field_class='', label_class='', layout='', **kwargs):\n # Default settings for horizontal form\n if layout == 'horizontal':\n if not label_class:\n label_class = get_bootstrap_setting('horizontal_label_class')\n if not field_class:\n field_class = get_bootstrap_setting('horizontal_field_class')\n if not label:\n label = ' '\n label_class = add_css_class(label_class, 'control-label')\n html = field\n if field_class:\n html = '<div class=\"{klass}\">{html}</div>'.format(klass=field_class, html=html)\n if label:\n html = render_label(label, label_class=label_class) + html\n return html\n\n\ndef render_form_group(content, css_class=FORM_GROUP_CLASS):\n \"\"\"\n Render a Bootstrap form group\n \"\"\"\n return '<div class=\"{klass}\">{content}</div>'.format(\n klass=css_class,\n content=content,\n )\n\n\ndef is_widget_required_attribute(widget):\n \"\"\"\n Is this widget required?\n \"\"\"\n if not get_bootstrap_setting('set_required'):\n return False\n if not widget.is_required:\n return False\n if isinstance(widget, (AdminFileWidget, HiddenInput, FileInput, CheckboxSelectMultiple)):\n return False\n return True\n\n\ndef is_widget_with_placeholder(widget):\n \"\"\"\n Is this a widget that should have a placeholder?\n Only text, search, url, tel, e-mail, password, number have placeholders\n These are all derived form TextInput, except for Textarea\n \"\"\"\n return isinstance(widget, (TextInput, Textarea))\n\n\ndef list_to_class(klass):\n def fixer(html):\n mapping = [\n ('<ul', '<div'),\n ('</ul>', '</div>'),\n ('<li', '<div class=\"{klass}\"'.format(klass=klass)),\n ('</li>', '</div>'),\n ]\n for k, v in mapping:\n html = html.replace(k, v)\n return html\n\n return fixer\n\n\ndef surround_with(html_with_content):\n def wrapper(html):\n return html_with_content.format(content=html)\n\n return wrapper\n\n\ndef fix_date_select_input(html):\n div1 = '<div class=\"col-xs-4\">'\n div2 = '</div>'\n html = html.replace('<select', div1 + '<select')\n html = html.replace('</select>', '</select>' + div2)\n return '<div class=\"row bootstrap3-multi-input\">' + html + '</div>'\n\n\ndef fix_clearable_file_input(html):\n \"\"\"\n Fix a clearable file input\n TODO: This needs improvement\n\n Currently Django returns\n Currently: <a href=\"dummy.txt\">dummy.txt</a> <input id=\"file4-clear_id\" name=\"file4-clear\" type=\"checkbox\" /> <label for=\"file4-clear_id\">Clear</label><br />Change: <input id=\"id_file4\" name=\"file4\" type=\"file\" /><span class=help-block></span></div>\n\n \"\"\"\n # TODO This needs improvement\n return '<div class=\"row bootstrap3-multi-input\"><div class=\"col-xs-12\">' + html + '</div></div>'\n", "path": "bootstrap3/forms.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib.admin.widgets import AdminFileWidget\nfrom django.forms import HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput, RadioSelect, \\\n CheckboxInput, ClearableFileInput\nfrom django.forms.extras import SelectDateWidget\nfrom django.forms.forms import BaseForm, BoundField\nfrom django.forms.formsets import BaseFormSet\nfrom django.utils.encoding import force_text\nfrom django.utils.html import conditional_escape, strip_tags\n\nfrom .bootstrap import get_bootstrap_setting\nfrom .text import text_concat\nfrom .exceptions import BootstrapError\nfrom .html import add_css_class, render_tag\nfrom .icons import render_icon\n\n\nFORM_GROUP_CLASS = 'form-group'\n\n\ndef render_formset(formset, **kwargs):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(formset, BaseFormSet):\n raise BootstrapError('Parameter \"formset\" should contain a valid Django FormSet.')\n forms = [render_form(f, **kwargs) for f in formset]\n return force_text(formset.management_form) + '\\n' + '\\n'.join(forms)\n\n\ndef render_form(form, layout='', form_group_class=FORM_GROUP_CLASS, field_class='', label_class='', show_help=True,\n exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(form, BaseForm):\n raise BootstrapError('Parameter \"form\" should contain a valid Django Form.')\n html = ''\n errors = []\n fields = []\n for field in form:\n fields.append(render_field(\n field,\n layout=layout,\n form_group_class=form_group_class,\n field_class=field_class,\n label_class=label_class,\n show_help=show_help,\n exclude=exclude,\n set_required=set_required,\n ))\n if field.is_hidden and field.errors:\n errors += field.errors\n errors += form.non_field_errors()\n if errors:\n html += '''<div class=\"alert alert-danger alert-dismissable alert-link\">\n <button class=close data-dismiss=alert aria-hidden=true>\n ×</button>{errors}</div>\\n\n '''.format(errors='\\n'.join(['<p>{e}</p>'.format(e=e) for e in errors]))\n return html + '\\n'.join(fields)\n\n\ndef render_field(field, layout='', form_group_class=FORM_GROUP_CLASS,\n field_class=None, label_class=None, show_label=True,\n show_help=True, exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(field, BoundField):\n raise BootstrapError('Parameter \"field\" should contain a valid Django BoundField.')\n # See if we're not excluded\n if field.name in exclude.replace(' ', '').split(','):\n return ''\n # Hidden input requires no special treatment\n if field.is_hidden:\n return force_text(field)\n # Shortcut to widget\n widget = field.field.widget\n # Read widgets attributes\n widget_attrs = {\n 'class': widget.attrs.get('class', ''),\n 'placeholder': widget.attrs.get('placeholder', ''),\n 'title': widget.attrs.get('title', ''),\n }\n # Class to add to field element\n if isinstance(widget, FileInput):\n form_control_class = ''\n else:\n form_control_class = 'form-control'\n # Optional extra rendering\n after_render = None\n # Wrap rendered field in its own label?\n put_inside_label = False\n # Wrapper for the final result (should contain {content} if not empty)\n wrapper = ''\n\n # Adjust workings for various widget types\n if isinstance(field.field.widget, CheckboxInput):\n form_control_class = ''\n put_inside_label = True\n wrapper = '<div class=\"checkbox\">{content}</div>'\n elif isinstance(widget, RadioSelect):\n form_control_class = ''\n after_render = list_to_class('radio')\n elif isinstance(widget, CheckboxSelectMultiple):\n form_control_class = ''\n after_render = list_to_class('checkbox')\n elif isinstance(widget, SelectDateWidget):\n after_render = fix_date_select_input\n elif isinstance(widget, ClearableFileInput):\n after_render = fix_clearable_file_input\n\n # Get help text\n field_help = force_text(field.help_text) if show_help and field.help_text else ''\n # Get errors\n field_errors = [conditional_escape(force_text(error)) for error in field.errors]\n # Temporarily adjust widget attributes if necessary\n if form_control_class:\n widget.attrs['class'] = add_css_class(widget_attrs['class'], form_control_class)\n if is_widget_with_placeholder(widget) and field.label and not put_inside_label and not widget_attrs['placeholder']:\n widget.attrs['placeholder'] = field.label\n if field_help and not put_inside_label and not widget_attrs['title']:\n widget.attrs['title'] = strip_tags(field_help)\n if layout == 'inline' and field_errors:\n field_title = widget.attrs.get('title', '')\n field_title += ' ' + ' '.join([strip_tags(e) for e in field_errors])\n widget.attrs['title'] = field_title.strip()\n # Set required attribute\n if set_required and is_widget_required_attribute(widget):\n widget.attrs['required'] = 'required'\n # Render the field\n rendered_field = field.as_widget(attrs=widget.attrs)\n # Apply the post_processor\n if after_render:\n rendered_field = after_render(rendered_field)\n # Return changed attributes to original settings\n for attr in widget_attrs:\n widget.attrs[attr] = widget_attrs[attr]\n # Wrap the rendered field in its label if necessary\n if put_inside_label:\n rendered_field = render_label(\n content='{field} {label}'.format(field=rendered_field, label=field.label),\n label_title=field.help_text\n )\n # Add any help text and/or errors\n if layout != 'inline':\n help_text_and_errors = [field_help] + field_errors\n if help_text_and_errors:\n help_html = ' '.join([h for h in help_text_and_errors if h])\n rendered_field += '<span class=help-block>{help}</span>'.format(help=help_html)\n # Wrap the rendered field\n if wrapper:\n rendered_field = wrapper.format(content=rendered_field)\n # Prepare label\n label = field.label\n if put_inside_label:\n label = None\n if layout == 'inline' or not show_label:\n label_class = add_css_class(label_class, 'sr-only')\n # Render label and field\n content = render_field_and_label(\n field=rendered_field,\n label=label,\n field_class=field_class,\n label_class=label_class,\n layout=layout,\n )\n # Return combined content, wrapped in form control\n if field.errors:\n form_group_class = add_css_class(form_group_class, 'has-error')\n elif field.form.is_bound:\n form_group_class = add_css_class(form_group_class, 'has-success')\n\n # Required and optional classes to the form group\n if field.field.required:\n form_group_class = add_css_class(form_group_class, 'required')\n else:\n form_group_class = add_css_class(form_group_class, 'optional')\n\n return render_form_group(content, form_group_class)\n\n\ndef render_label(content, label_for=None, label_class=None, label_title=''):\n \"\"\"\n Render a label with content\n \"\"\"\n attrs = {}\n if label_for:\n attrs['for'] = label_for\n if label_class:\n attrs['class'] = label_class\n if label_title:\n attrs['title'] = label_title\n return render_tag('label', attrs=attrs, content=content)\n\n\ndef render_button(content, button_type=None, icon=None):\n attrs = {'class': 'btn'}\n if button_type:\n if button_type == 'submit':\n attrs['class'] += ' btn-primary'\n elif button_type != 'reset' and button_type != 'button':\n raise BootstrapError('Parameter \"button_type\" should be \"submit\", \"reset\", \"button\" or empty.')\n attrs['type'] = button_type\n icon_content = render_icon(icon) if icon else ''\n return render_tag('button', attrs=attrs, content=text_concat(icon_content, content, separator=' '))\n\n\ndef render_field_and_label(field, label, field_class='', label_class='', layout='', **kwargs):\n # Default settings for horizontal form\n if layout == 'horizontal':\n if not label_class:\n label_class = get_bootstrap_setting('horizontal_label_class')\n if not field_class:\n field_class = get_bootstrap_setting('horizontal_field_class')\n if not label:\n label = ' '\n label_class = add_css_class(label_class, 'control-label')\n html = field\n if field_class:\n html = '<div class=\"{klass}\">{html}</div>'.format(klass=field_class, html=html)\n if label:\n html = render_label(label, label_class=label_class) + html\n return html\n\n\ndef render_form_group(content, css_class=FORM_GROUP_CLASS):\n \"\"\"\n Render a Bootstrap form group\n \"\"\"\n return '<div class=\"{klass}\">{content}</div>'.format(\n klass=css_class,\n content=content,\n )\n\n\ndef is_widget_required_attribute(widget):\n \"\"\"\n Is this widget required?\n \"\"\"\n if not get_bootstrap_setting('set_required'):\n return False\n if not widget.is_required:\n return False\n if isinstance(widget, (AdminFileWidget, HiddenInput, FileInput, CheckboxSelectMultiple)):\n return False\n return True\n\n\ndef is_widget_with_placeholder(widget):\n \"\"\"\n Is this a widget that should have a placeholder?\n Only text, search, url, tel, e-mail, password, number have placeholders\n These are all derived form TextInput, except for Textarea\n \"\"\"\n return isinstance(widget, (TextInput, Textarea))\n\n\ndef list_to_class(klass):\n def fixer(html):\n mapping = [\n ('<ul', '<div'),\n ('</ul>', '</div>'),\n ('<li', '<div class=\"{klass}\"'.format(klass=klass)),\n ('</li>', '</div>'),\n ]\n for k, v in mapping:\n html = html.replace(k, v)\n return html\n\n return fixer\n\n\ndef surround_with(html_with_content):\n def wrapper(html):\n return html_with_content.format(content=html)\n\n return wrapper\n\n\ndef fix_date_select_input(html):\n div1 = '<div class=\"col-xs-4\">'\n div2 = '</div>'\n html = html.replace('<select', div1 + '<select')\n html = html.replace('</select>', '</select>' + div2)\n return '<div class=\"row bootstrap3-multi-input\">' + html + '</div>'\n\n\ndef fix_clearable_file_input(html):\n \"\"\"\n Fix a clearable file input\n TODO: This needs improvement\n\n Currently Django returns\n Currently: <a href=\"dummy.txt\">dummy.txt</a> <input id=\"file4-clear_id\" name=\"file4-clear\" type=\"checkbox\" /> <label for=\"file4-clear_id\">Clear</label><br />Change: <input id=\"id_file4\" name=\"file4\" type=\"file\" /><span class=help-block></span></div>\n\n \"\"\"\n # TODO This needs improvement\n return '<div class=\"row bootstrap3-multi-input\"><div class=\"col-xs-12\">' + html + '</div></div>'\n", "path": "bootstrap3/forms.py"}]} |
gh_patches_debug_1481 | rasdani/github-patches | git_diff | fossasia__open-event-server-6046 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error logs generated in Celery while sending Mails
```
Traceback (most recent call last):
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 551, in on_task_received
payload = message.decode()
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/message.py", line 193, in decode
self._decoded_cache = self._decode()
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/message.py", line 198, in _decode
self.content_encoding, accept=self.accept)
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/serialization.py", line 253, in loads
raise self._for_untrusted_content(content_type, 'untrusted')
kombu.exceptions.ContentDisallowed: Refusing to deserialize untrusted content of type pickle (application/x-python-serialize)
```
Similar logs are there for JSON format of mail objects.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/__init__.py`
Content:
```
1 from celery.signals import after_task_publish
2 import logging
3 import os.path
4 from envparse import env
5
6 import sys
7 from flask import Flask, json, make_response
8 from flask_celeryext import FlaskCeleryExt
9 from app.settings import get_settings, get_setts
10 from flask_migrate import Migrate, MigrateCommand
11 from flask_script import Manager
12 from flask_login import current_user
13 from flask_jwt import JWT
14 from datetime import timedelta
15 from flask_cors import CORS
16 from flask_rest_jsonapi.errors import jsonapi_errors
17 from flask_rest_jsonapi.exceptions import JsonApiException
18 from healthcheck import HealthCheck
19 from apscheduler.schedulers.background import BackgroundScheduler
20 from elasticsearch_dsl.connections import connections
21 from pytz import utc
22
23 import sqlalchemy as sa
24
25 import stripe
26 from app.settings import get_settings
27 from app.models import db
28 from app.api.helpers.jwt import jwt_authenticate, jwt_identity
29 from app.api.helpers.cache import cache
30 from werkzeug.middleware.profiler import ProfilerMiddleware
31 from app.views import BlueprintsManager
32 from app.api.helpers.auth import AuthManager
33 from app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \
34 send_event_fee_notification_followup, change_session_state_on_event_completion, \
35 expire_pending_tickets_after_three_days
36 from app.models.event import Event
37 from app.models.role_invite import RoleInvite
38 from app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations
39 from app.views.elastic_search import client
40 from app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch
41 from app.views.redis_store import redis_store
42 from app.views.celery_ import celery
43 from app.templates.flask_ext.jinja.filters import init_filters
44 import sentry_sdk
45 from sentry_sdk.integrations.flask import FlaskIntegration
46
47
48 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
49
50 static_dir = os.path.dirname(os.path.dirname(__file__)) + "/static"
51 template_dir = os.path.dirname(__file__) + "/templates"
52 app = Flask(__name__, static_folder=static_dir, template_folder=template_dir)
53 env.read_envfile()
54
55
56 class ReverseProxied(object):
57 """
58 ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel
59 """
60
61 def __init__(self, app):
62 self.app = app
63
64 def __call__(self, environ, start_response):
65 scheme = environ.get('HTTP_X_FORWARDED_PROTO')
66 if scheme:
67 environ['wsgi.url_scheme'] = scheme
68 if os.getenv('FORCE_SSL', 'no') == 'yes':
69 environ['wsgi.url_scheme'] = 'https'
70 return self.app(environ, start_response)
71
72
73 app.wsgi_app = ReverseProxied(app.wsgi_app)
74
75 app_created = False
76
77
78 def create_app():
79 global app_created
80 if not app_created:
81 BlueprintsManager.register(app)
82 Migrate(app, db)
83
84 app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))
85 db.init_app(app)
86 _manager = Manager(app)
87 _manager.add_command('db', MigrateCommand)
88
89 if app.config['CACHING']:
90 cache.init_app(app, config={'CACHE_TYPE': 'simple'})
91 else:
92 cache.init_app(app, config={'CACHE_TYPE': 'null'})
93
94 stripe.api_key = 'SomeStripeKey'
95 app.secret_key = 'super secret key'
96 app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
97 app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'
98
99 app.logger.addHandler(logging.StreamHandler(sys.stdout))
100 app.logger.setLevel(logging.ERROR)
101
102 # set up jwt
103 app.config['JWT_AUTH_USERNAME_KEY'] = 'email'
104 app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)
105 app.config['JWT_AUTH_URL_RULE'] = '/auth/session'
106 _jwt = JWT(app, jwt_authenticate, jwt_identity)
107
108 # setup celery
109 app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']
110 app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']
111
112 CORS(app, resources={r"/*": {"origins": "*"}})
113 AuthManager.init_login(app)
114
115 if app.config['TESTING'] and app.config['PROFILE']:
116 # Profiling
117 app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
118
119 # development api
120 with app.app_context():
121 from app.api.admin_statistics_api.events import event_statistics
122 from app.api.auth import auth_routes
123 from app.api.attendees import attendee_misc_routes
124 from app.api.bootstrap import api_v1
125 from app.api.celery_tasks import celery_routes
126 from app.api.event_copy import event_copy
127 from app.api.exports import export_routes
128 from app.api.imports import import_routes
129 from app.api.uploads import upload_routes
130 from app.api.users import user_misc_routes
131 from app.api.orders import order_misc_routes
132 from app.api.role_invites import role_invites_misc_routes
133 from app.api.auth import ticket_blueprint, authorised_blueprint
134 from app.api.admin_translations import admin_blueprint
135 from app.api.orders import alipay_blueprint
136
137 app.register_blueprint(api_v1)
138 app.register_blueprint(event_copy)
139 app.register_blueprint(upload_routes)
140 app.register_blueprint(export_routes)
141 app.register_blueprint(import_routes)
142 app.register_blueprint(celery_routes)
143 app.register_blueprint(auth_routes)
144 app.register_blueprint(event_statistics)
145 app.register_blueprint(user_misc_routes)
146 app.register_blueprint(attendee_misc_routes)
147 app.register_blueprint(order_misc_routes)
148 app.register_blueprint(role_invites_misc_routes)
149 app.register_blueprint(ticket_blueprint)
150 app.register_blueprint(authorised_blueprint)
151 app.register_blueprint(admin_blueprint)
152 app.register_blueprint(alipay_blueprint)
153
154 sa.orm.configure_mappers()
155
156 if app.config['SERVE_STATIC']:
157 app.add_url_rule('/static/<path:filename>',
158 endpoint='static',
159 view_func=app.send_static_file)
160
161 # sentry
162 if not app_created and 'SENTRY_DSN' in app.config:
163 sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])
164
165 # redis
166 redis_store.init_app(app)
167
168 # elasticsearch
169 if app.config['ENABLE_ELASTICSEARCH']:
170 client.init_app(app)
171 connections.add_connection('default', client.elasticsearch)
172 with app.app_context():
173 try:
174 cron_rebuild_events_elasticsearch.delay()
175 except Exception:
176 pass
177
178 app_created = True
179 return app, _manager, db, _jwt
180
181
182 current_app, manager, database, jwt = create_app()
183 init_filters(app)
184
185
186 # http://stackoverflow.com/questions/26724623/
187 @app.before_request
188 def track_user():
189 if current_user.is_authenticated:
190 current_user.update_lat()
191
192
193 def make_celery(app=None):
194 app = app or create_app()[0]
195 celery.conf.update(app.config)
196 ext = FlaskCeleryExt(app)
197 return ext.celery
198
199
200 # Health-check
201 health = HealthCheck(current_app, "/health-check")
202 health.add_check(health_check_celery)
203 health.add_check(health_check_db)
204 with current_app.app_context():
205 current_app.config['MIGRATION_STATUS'] = check_migrations()
206 health.add_check(health_check_migrations)
207
208
209 # http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists
210 @after_task_publish.connect
211 def update_sent_state(sender=None, headers=None, **kwargs):
212 # the task may not exist if sent using `send_task` which
213 # sends tasks by name, so fall back to the default result backend
214 # if that is the case.
215 task = celery.tasks.get(sender)
216 backend = task.backend if task else celery.backend
217 backend.store_result(headers['id'], None, 'WAITING')
218
219
220 # register celery tasks. removing them will cause the tasks to not function. so don't remove them
221 # it is important to register them after celery is defined to resolve circular imports
222
223 from .api.helpers import tasks
224
225 # import helpers.tasks
226
227
228 scheduler = BackgroundScheduler(timezone=utc)
229 # scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)
230 # scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)
231 if app.config['ENABLE_ELASTICSEARCH']:
232 scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)
233 scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)
234
235 scheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)
236 scheduler.add_job(send_event_fee_notification, 'cron', day=1)
237 scheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)
238 scheduler.add_job(change_session_state_on_event_completion, 'cron', hour=5, minute=30)
239 scheduler.add_job(expire_pending_tickets_after_three_days, 'cron', hour=5)
240 scheduler.start()
241
242
243 @app.errorhandler(500)
244 def internal_server_error(error):
245 if current_app.config['PROPOGATE_ERROR'] is True:
246 exc = JsonApiException({'pointer': ''}, str(error))
247 else:
248 exc = JsonApiException({'pointer': ''}, 'Unknown error')
249 return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,
250 {'Content-Type': 'application/vnd.api+json'})
251
252
253 if __name__ == '__main__':
254 current_app.run()
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/__init__.py b/app/__init__.py
--- a/app/__init__.py
+++ b/app/__init__.py
@@ -108,6 +108,7 @@
# setup celery
app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']
app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']
+ app.config['CELERY_ACCEPT_CONTENT'] = ['json', 'application/text']
CORS(app, resources={r"/*": {"origins": "*"}})
AuthManager.init_login(app)
| {"golden_diff": "diff --git a/app/__init__.py b/app/__init__.py\n--- a/app/__init__.py\n+++ b/app/__init__.py\n@@ -108,6 +108,7 @@\n # setup celery\n app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n+ app.config['CELERY_ACCEPT_CONTENT'] = ['json', 'application/text']\n \n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n", "issue": "Error logs generated in Celery while sending Mails\n```\r\nTraceback (most recent call last):\r\n File \"/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/celery/worker/consumer/consumer.py\", line 551, in on_task_received\r\n payload = message.decode()\r\n File \"/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/message.py\", line 193, in decode\r\n self._decoded_cache = self._decode()\r\n File \"/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/message.py\", line 198, in _decode\r\n self.content_encoding, accept=self.accept)\r\n File \"/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/serialization.py\", line 253, in loads\r\n raise self._for_untrusted_content(content_type, 'untrusted')\r\nkombu.exceptions.ContentDisallowed: Refusing to deserialize untrusted content of type pickle (application/x-python-serialize)\r\n```\r\n\r\nSimilar logs are there for JSON format of mail objects.\n", "before_files": [{"content": "from celery.signals import after_task_publish\nimport logging\nimport os.path\nfrom envparse import env\n\nimport sys\nfrom flask import Flask, json, make_response\nfrom flask_celeryext import FlaskCeleryExt\nfrom app.settings import get_settings, get_setts\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import current_user\nfrom flask_jwt import JWT\nfrom datetime import timedelta\nfrom flask_cors import CORS\nfrom flask_rest_jsonapi.errors import jsonapi_errors\nfrom flask_rest_jsonapi.exceptions import JsonApiException\nfrom healthcheck import HealthCheck\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom elasticsearch_dsl.connections import connections\nfrom pytz import utc\n\nimport sqlalchemy as sa\n\nimport stripe\nfrom app.settings import get_settings\nfrom app.models import db\nfrom app.api.helpers.jwt import jwt_authenticate, jwt_identity\nfrom app.api.helpers.cache import cache\nfrom werkzeug.middleware.profiler import ProfilerMiddleware\nfrom app.views import BlueprintsManager\nfrom app.api.helpers.auth import AuthManager\nfrom app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \\\n send_event_fee_notification_followup, change_session_state_on_event_completion, \\\n expire_pending_tickets_after_three_days\nfrom app.models.event import Event\nfrom app.models.role_invite import RoleInvite\nfrom app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations\nfrom app.views.elastic_search import client\nfrom app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch\nfrom app.views.redis_store import redis_store\nfrom app.views.celery_ import celery\nfrom app.templates.flask_ext.jinja.filters import init_filters\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nstatic_dir = os.path.dirname(os.path.dirname(__file__)) + \"/static\"\ntemplate_dir = os.path.dirname(__file__) + \"/templates\"\napp = Flask(__name__, static_folder=static_dir, template_folder=template_dir)\nenv.read_envfile()\n\n\nclass ReverseProxied(object):\n \"\"\"\n ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n if os.getenv('FORCE_SSL', 'no') == 'yes':\n environ['wsgi.url_scheme'] = 'https'\n return self.app(environ, start_response)\n\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\napp_created = False\n\n\ndef create_app():\n global app_created\n if not app_created:\n BlueprintsManager.register(app)\n Migrate(app, db)\n\n app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))\n db.init_app(app)\n _manager = Manager(app)\n _manager.add_command('db', MigrateCommand)\n\n if app.config['CACHING']:\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n else:\n cache.init_app(app, config={'CACHE_TYPE': 'null'})\n\n stripe.api_key = 'SomeStripeKey'\n app.secret_key = 'super secret key'\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.ERROR)\n\n # set up jwt\n app.config['JWT_AUTH_USERNAME_KEY'] = 'email'\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)\n app.config['JWT_AUTH_URL_RULE'] = '/auth/session'\n _jwt = JWT(app, jwt_authenticate, jwt_identity)\n\n # setup celery\n app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n\n if app.config['TESTING'] and app.config['PROFILE']:\n # Profiling\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n # development api\n with app.app_context():\n from app.api.admin_statistics_api.events import event_statistics\n from app.api.auth import auth_routes\n from app.api.attendees import attendee_misc_routes\n from app.api.bootstrap import api_v1\n from app.api.celery_tasks import celery_routes\n from app.api.event_copy import event_copy\n from app.api.exports import export_routes\n from app.api.imports import import_routes\n from app.api.uploads import upload_routes\n from app.api.users import user_misc_routes\n from app.api.orders import order_misc_routes\n from app.api.role_invites import role_invites_misc_routes\n from app.api.auth import ticket_blueprint, authorised_blueprint\n from app.api.admin_translations import admin_blueprint\n from app.api.orders import alipay_blueprint\n\n app.register_blueprint(api_v1)\n app.register_blueprint(event_copy)\n app.register_blueprint(upload_routes)\n app.register_blueprint(export_routes)\n app.register_blueprint(import_routes)\n app.register_blueprint(celery_routes)\n app.register_blueprint(auth_routes)\n app.register_blueprint(event_statistics)\n app.register_blueprint(user_misc_routes)\n app.register_blueprint(attendee_misc_routes)\n app.register_blueprint(order_misc_routes)\n app.register_blueprint(role_invites_misc_routes)\n app.register_blueprint(ticket_blueprint)\n app.register_blueprint(authorised_blueprint)\n app.register_blueprint(admin_blueprint)\n app.register_blueprint(alipay_blueprint)\n\n sa.orm.configure_mappers()\n\n if app.config['SERVE_STATIC']:\n app.add_url_rule('/static/<path:filename>',\n endpoint='static',\n view_func=app.send_static_file)\n\n # sentry\n if not app_created and 'SENTRY_DSN' in app.config:\n sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])\n\n # redis\n redis_store.init_app(app)\n\n # elasticsearch\n if app.config['ENABLE_ELASTICSEARCH']:\n client.init_app(app)\n connections.add_connection('default', client.elasticsearch)\n with app.app_context():\n try:\n cron_rebuild_events_elasticsearch.delay()\n except Exception:\n pass\n\n app_created = True\n return app, _manager, db, _jwt\n\n\ncurrent_app, manager, database, jwt = create_app()\ninit_filters(app)\n\n\n# http://stackoverflow.com/questions/26724623/\[email protected]_request\ndef track_user():\n if current_user.is_authenticated:\n current_user.update_lat()\n\n\ndef make_celery(app=None):\n app = app or create_app()[0]\n celery.conf.update(app.config)\n ext = FlaskCeleryExt(app)\n return ext.celery\n\n\n# Health-check\nhealth = HealthCheck(current_app, \"/health-check\")\nhealth.add_check(health_check_celery)\nhealth.add_check(health_check_db)\nwith current_app.app_context():\n current_app.config['MIGRATION_STATUS'] = check_migrations()\nhealth.add_check(health_check_migrations)\n\n\n# http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists\n@after_task_publish.connect\ndef update_sent_state(sender=None, headers=None, **kwargs):\n # the task may not exist if sent using `send_task` which\n # sends tasks by name, so fall back to the default result backend\n # if that is the case.\n task = celery.tasks.get(sender)\n backend = task.backend if task else celery.backend\n backend.store_result(headers['id'], None, 'WAITING')\n\n\n# register celery tasks. removing them will cause the tasks to not function. so don't remove them\n# it is important to register them after celery is defined to resolve circular imports\n\nfrom .api.helpers import tasks\n\n# import helpers.tasks\n\n\nscheduler = BackgroundScheduler(timezone=utc)\n# scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)\n# scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)\nif app.config['ENABLE_ELASTICSEARCH']:\n scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)\n scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)\n\nscheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)\nscheduler.add_job(send_event_fee_notification, 'cron', day=1)\nscheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)\nscheduler.add_job(change_session_state_on_event_completion, 'cron', hour=5, minute=30)\nscheduler.add_job(expire_pending_tickets_after_three_days, 'cron', hour=5)\nscheduler.start()\n\n\[email protected](500)\ndef internal_server_error(error):\n if current_app.config['PROPOGATE_ERROR'] is True:\n exc = JsonApiException({'pointer': ''}, str(error))\n else:\n exc = JsonApiException({'pointer': ''}, 'Unknown error')\n return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,\n {'Content-Type': 'application/vnd.api+json'})\n\n\nif __name__ == '__main__':\n current_app.run()\n", "path": "app/__init__.py"}], "after_files": [{"content": "from celery.signals import after_task_publish\nimport logging\nimport os.path\nfrom envparse import env\n\nimport sys\nfrom flask import Flask, json, make_response\nfrom flask_celeryext import FlaskCeleryExt\nfrom app.settings import get_settings, get_setts\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import current_user\nfrom flask_jwt import JWT\nfrom datetime import timedelta\nfrom flask_cors import CORS\nfrom flask_rest_jsonapi.errors import jsonapi_errors\nfrom flask_rest_jsonapi.exceptions import JsonApiException\nfrom healthcheck import HealthCheck\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom elasticsearch_dsl.connections import connections\nfrom pytz import utc\n\nimport sqlalchemy as sa\n\nimport stripe\nfrom app.settings import get_settings\nfrom app.models import db\nfrom app.api.helpers.jwt import jwt_authenticate, jwt_identity\nfrom app.api.helpers.cache import cache\nfrom werkzeug.middleware.profiler import ProfilerMiddleware\nfrom app.views import BlueprintsManager\nfrom app.api.helpers.auth import AuthManager\nfrom app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \\\n send_event_fee_notification_followup, change_session_state_on_event_completion, \\\n expire_pending_tickets_after_three_days\nfrom app.models.event import Event\nfrom app.models.role_invite import RoleInvite\nfrom app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations\nfrom app.views.elastic_search import client\nfrom app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch\nfrom app.views.redis_store import redis_store\nfrom app.views.celery_ import celery\nfrom app.templates.flask_ext.jinja.filters import init_filters\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nstatic_dir = os.path.dirname(os.path.dirname(__file__)) + \"/static\"\ntemplate_dir = os.path.dirname(__file__) + \"/templates\"\napp = Flask(__name__, static_folder=static_dir, template_folder=template_dir)\nenv.read_envfile()\n\n\nclass ReverseProxied(object):\n \"\"\"\n ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n if os.getenv('FORCE_SSL', 'no') == 'yes':\n environ['wsgi.url_scheme'] = 'https'\n return self.app(environ, start_response)\n\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\napp_created = False\n\n\ndef create_app():\n global app_created\n if not app_created:\n BlueprintsManager.register(app)\n Migrate(app, db)\n\n app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))\n db.init_app(app)\n _manager = Manager(app)\n _manager.add_command('db', MigrateCommand)\n\n if app.config['CACHING']:\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n else:\n cache.init_app(app, config={'CACHE_TYPE': 'null'})\n\n stripe.api_key = 'SomeStripeKey'\n app.secret_key = 'super secret key'\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.ERROR)\n\n # set up jwt\n app.config['JWT_AUTH_USERNAME_KEY'] = 'email'\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)\n app.config['JWT_AUTH_URL_RULE'] = '/auth/session'\n _jwt = JWT(app, jwt_authenticate, jwt_identity)\n\n # setup celery\n app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n app.config['CELERY_ACCEPT_CONTENT'] = ['json', 'application/text']\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n\n if app.config['TESTING'] and app.config['PROFILE']:\n # Profiling\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n # development api\n with app.app_context():\n from app.api.admin_statistics_api.events import event_statistics\n from app.api.auth import auth_routes\n from app.api.attendees import attendee_misc_routes\n from app.api.bootstrap import api_v1\n from app.api.celery_tasks import celery_routes\n from app.api.event_copy import event_copy\n from app.api.exports import export_routes\n from app.api.imports import import_routes\n from app.api.uploads import upload_routes\n from app.api.users import user_misc_routes\n from app.api.orders import order_misc_routes\n from app.api.role_invites import role_invites_misc_routes\n from app.api.auth import ticket_blueprint, authorised_blueprint\n from app.api.admin_translations import admin_blueprint\n from app.api.orders import alipay_blueprint\n\n app.register_blueprint(api_v1)\n app.register_blueprint(event_copy)\n app.register_blueprint(upload_routes)\n app.register_blueprint(export_routes)\n app.register_blueprint(import_routes)\n app.register_blueprint(celery_routes)\n app.register_blueprint(auth_routes)\n app.register_blueprint(event_statistics)\n app.register_blueprint(user_misc_routes)\n app.register_blueprint(attendee_misc_routes)\n app.register_blueprint(order_misc_routes)\n app.register_blueprint(role_invites_misc_routes)\n app.register_blueprint(ticket_blueprint)\n app.register_blueprint(authorised_blueprint)\n app.register_blueprint(admin_blueprint)\n app.register_blueprint(alipay_blueprint)\n\n sa.orm.configure_mappers()\n\n if app.config['SERVE_STATIC']:\n app.add_url_rule('/static/<path:filename>',\n endpoint='static',\n view_func=app.send_static_file)\n\n # sentry\n if not app_created and 'SENTRY_DSN' in app.config:\n sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])\n\n # redis\n redis_store.init_app(app)\n\n # elasticsearch\n if app.config['ENABLE_ELASTICSEARCH']:\n client.init_app(app)\n connections.add_connection('default', client.elasticsearch)\n with app.app_context():\n try:\n cron_rebuild_events_elasticsearch.delay()\n except Exception:\n pass\n\n app_created = True\n return app, _manager, db, _jwt\n\n\ncurrent_app, manager, database, jwt = create_app()\ninit_filters(app)\n\n\n# http://stackoverflow.com/questions/26724623/\[email protected]_request\ndef track_user():\n if current_user.is_authenticated:\n current_user.update_lat()\n\n\ndef make_celery(app=None):\n app = app or create_app()[0]\n celery.conf.update(app.config)\n ext = FlaskCeleryExt(app)\n return ext.celery\n\n\n# Health-check\nhealth = HealthCheck(current_app, \"/health-check\")\nhealth.add_check(health_check_celery)\nhealth.add_check(health_check_db)\nwith current_app.app_context():\n current_app.config['MIGRATION_STATUS'] = check_migrations()\nhealth.add_check(health_check_migrations)\n\n\n# http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists\n@after_task_publish.connect\ndef update_sent_state(sender=None, headers=None, **kwargs):\n # the task may not exist if sent using `send_task` which\n # sends tasks by name, so fall back to the default result backend\n # if that is the case.\n task = celery.tasks.get(sender)\n backend = task.backend if task else celery.backend\n backend.store_result(headers['id'], None, 'WAITING')\n\n\n# register celery tasks. removing them will cause the tasks to not function. so don't remove them\n# it is important to register them after celery is defined to resolve circular imports\n\nfrom .api.helpers import tasks\n\n# import helpers.tasks\n\n\nscheduler = BackgroundScheduler(timezone=utc)\n# scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)\n# scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)\nif app.config['ENABLE_ELASTICSEARCH']:\n scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)\n scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)\n\nscheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)\nscheduler.add_job(send_event_fee_notification, 'cron', day=1)\nscheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)\nscheduler.add_job(change_session_state_on_event_completion, 'cron', hour=5, minute=30)\nscheduler.add_job(expire_pending_tickets_after_three_days, 'cron', hour=5)\nscheduler.start()\n\n\[email protected](500)\ndef internal_server_error(error):\n if current_app.config['PROPOGATE_ERROR'] is True:\n exc = JsonApiException({'pointer': ''}, str(error))\n else:\n exc = JsonApiException({'pointer': ''}, 'Unknown error')\n return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,\n {'Content-Type': 'application/vnd.api+json'})\n\n\nif __name__ == '__main__':\n current_app.run()\n", "path": "app/__init__.py"}]} |
gh_patches_debug_1482 | rasdani/github-patches | git_diff | translate__pootle-3380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Core: drop MySQL dependence on MyISAM
Core depends on MyISAM at the moment because of low level features used for changeid tracking. We need to migrate that to a more general approach that works on InnoDB and other supported DB engines.
- [x] Make resources list work in all DB backends (#3539)
- [x] Switch revision counter to Redis (#3364)
- [x] Ensure tests run on InnoDB (#3777)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2008-2013 Zuza Software Foundation
5 # Copyright 2014 Evernote Corporation
6 #
7 # This file is part of Pootle.
8 #
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program; if not, see <http://www.gnu.org/licenses/>.
21
22 import glob
23 import os
24 import re
25 import sys
26
27 from distutils import log
28 from distutils.command.build import build as DistutilsBuild
29 from distutils.errors import DistutilsOptionError
30
31 from setuptools import find_packages, setup
32 from setuptools.command.test import test as TestCommand
33
34 from pootle.__version__ import sver as pootle_version
35
36
37 def parse_requirements(file_name):
38 """Parses a pip requirements file and returns a list of packages.
39
40 Use the result of this function in the ``install_requires`` field.
41 Copied from cburgmer/pdfserver.
42 """
43 requirements = []
44 for line in open(file_name, 'r').read().split('\n'):
45 # Ignore comments, blank lines and included requirements files
46 if re.match(r'(\s*#)|(\s*$)|(-r .*$)', line):
47 continue
48
49 if re.match(r'\s*-e\s+', line):
50 requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line))
51 elif re.match(r'\s*-f\s+', line):
52 pass
53 else:
54 requirements.append(line)
55
56 return requirements
57
58
59 class PyTest(TestCommand):
60
61 def finalize_options(self):
62 TestCommand.finalize_options(self)
63 self.test_args = ['--tb=short', 'tests/']
64 self.test_suite = True
65
66 def run_tests(self):
67 #import here, cause outside the eggs aren't loaded
68 import pytest
69 errno = pytest.main(self.test_args)
70 sys.exit(errno)
71
72
73 class PootleBuildMo(DistutilsBuild):
74
75 description = "compile Gettext PO files into MO"
76 user_options = [
77 ('all', None,
78 "compile all language (don't use LINGUAS file)"),
79 ('lang=', 'l',
80 "specify a language to compile"),
81 ]
82 boolean_options = ['all']
83
84 po_path_base = os.path.join('pootle', 'locale')
85 _langs = []
86
87 def initialize_options(self):
88 self.all = False
89 self.lang = None
90
91 def finalize_options(self):
92 if self.all and self.lang is not None:
93 raise DistutilsOptionError(
94 "Can't use --all and --lang together"
95 )
96 if self.lang is not None:
97 self._langs = [self.lang]
98 elif self.all:
99 for lang in os.listdir(self.po_path_base):
100 if (os.path.isdir(os.path.join(self.po_path_base, lang)) and
101 lang != "templates"):
102 self._langs.append(lang)
103 else:
104 for lang in open(os.path.join('pootle', 'locale', 'LINGUAS')):
105 self._langs.append(lang.rstrip())
106
107 def build_mo(self):
108 """Compile .mo files from available .po files"""
109 import subprocess
110 import gettext
111 from translate.storage import factory
112
113 for lang in self._langs:
114 lang = lang.rstrip()
115
116 po_path = os.path.join('pootle', 'locale', lang)
117 mo_path = os.path.join('pootle', 'locale', lang, 'LC_MESSAGES')
118
119 if not os.path.exists(mo_path):
120 os.makedirs(mo_path)
121
122 for po, mo in (('pootle.po', 'django.mo'),
123 ('pootle_js.po', 'djangojs.mo')):
124 po_filename = os.path.join(po_path, po)
125 mo_filename = os.path.join(mo_path, mo)
126
127 if not os.path.exists(po_filename):
128 log.warn("%s: missing file %s", lang, po_filename)
129 continue
130
131 if not os.path.exists(mo_path):
132 os.makedirs(mo_path)
133
134 log.info("compiling %s", lang)
135 try:
136 subprocess.call([
137 'msgfmt', '--strict', '-o', mo_filename, po_filename],
138 stderr=subprocess.STDOUT)
139 except Exception as e:
140 log.warn("%s: skipping, running msgfmt failed: %s",
141 lang, e)
142
143 try:
144 store = factory.getobject(po_filename)
145 gettext.c2py(store.getheaderplural()[1])
146 except Exception:
147 log.warn("%s: invalid plural header in %s",
148 lang, po_filename)
149
150 def run(self):
151 self.build_mo()
152
153
154 setup(
155 name="Pootle",
156 version=pootle_version,
157
158 description="An online collaborative localization tool.",
159 long_description=open(
160 os.path.join(os.path.dirname(__file__), 'README.rst')
161 ).read(),
162
163 author="Translate",
164 author_email="[email protected]",
165 license="GNU General Public License (GPL)",
166 url="http://pootle.translatehouse.org",
167 download_url="http://sourceforge.net/projects/translate/files/Pootle/" + pootle_version,
168
169 install_requires=parse_requirements('requirements/base.txt'),
170 tests_require=parse_requirements('requirements/tests.txt'),
171
172 platforms=["any"],
173 classifiers=[
174 "Development Status :: 5 - Production/Stable",
175 "Environment :: Web Environment",
176 "Framework :: Django",
177 "Intended Audience :: Developers",
178 "Intended Audience :: End Users/Desktop",
179 "Intended Audience :: Information Technology",
180 "License :: OSI Approved :: GNU General Public License (GPL)",
181 "Operating System :: OS Independent",
182 "Operating System :: Microsoft :: Windows",
183 "Operating System :: Unix",
184 "Programming Language :: JavaScript",
185 "Programming Language :: Python",
186 "Topic :: Software Development :: Localization",
187 "Topic :: Text Processing :: Linguistic"
188 ],
189 zip_safe=False,
190 packages=find_packages(exclude=['deploy*']),
191 include_package_data=True,
192
193 entry_points={
194 'console_scripts': [
195 'pootle = pootle.runner:main',
196 ],
197 },
198 cmdclass={
199 'build_mo': PootleBuildMo,
200 'test': PyTest,
201 },
202 )
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -43,7 +43,7 @@
requirements = []
for line in open(file_name, 'r').read().split('\n'):
# Ignore comments, blank lines and included requirements files
- if re.match(r'(\s*#)|(\s*$)|(-r .*$)', line):
+ if re.match(r'(\s*#)|(\s*$)|((-r|--allow-external|--allow-unverified) .*$)', line):
continue
if re.match(r'\s*-e\s+', line):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@\n requirements = []\n for line in open(file_name, 'r').read().split('\\n'):\n # Ignore comments, blank lines and included requirements files\n- if re.match(r'(\\s*#)|(\\s*$)|(-r .*$)', line):\n+ if re.match(r'(\\s*#)|(\\s*$)|((-r|--allow-external|--allow-unverified) .*$)', line):\n continue\n \n if re.match(r'\\s*-e\\s+', line):\n", "issue": "Core: drop MySQL dependence on MyISAM\nCore depends on MyISAM at the moment because of low level features used for changeid tracking. We need to migrate that to a more general approach that works on InnoDB and other supported DB engines.\n- [x] Make resources list work in all DB backends (#3539)\n- [x] Switch revision counter to Redis (#3364)\n- [x] Ensure tests run on InnoDB (#3777)\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2008-2013 Zuza Software Foundation\n# Copyright 2014 Evernote Corporation\n#\n# This file is part of Pootle.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport glob\nimport os\nimport re\nimport sys\n\nfrom distutils import log\nfrom distutils.command.build import build as DistutilsBuild\nfrom distutils.errors import DistutilsOptionError\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom pootle.__version__ import sver as pootle_version\n\n\ndef parse_requirements(file_name):\n \"\"\"Parses a pip requirements file and returns a list of packages.\n\n Use the result of this function in the ``install_requires`` field.\n Copied from cburgmer/pdfserver.\n \"\"\"\n requirements = []\n for line in open(file_name, 'r').read().split('\\n'):\n # Ignore comments, blank lines and included requirements files\n if re.match(r'(\\s*#)|(\\s*$)|(-r .*$)', line):\n continue\n\n if re.match(r'\\s*-e\\s+', line):\n requirements.append(re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line))\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n\n return requirements\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['--tb=short', 'tests/']\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nclass PootleBuildMo(DistutilsBuild):\n\n description = \"compile Gettext PO files into MO\"\n user_options = [\n ('all', None,\n \"compile all language (don't use LINGUAS file)\"),\n ('lang=', 'l',\n \"specify a language to compile\"),\n ]\n boolean_options = ['all']\n\n po_path_base = os.path.join('pootle', 'locale')\n _langs = []\n\n def initialize_options(self):\n self.all = False\n self.lang = None\n\n def finalize_options(self):\n if self.all and self.lang is not None:\n raise DistutilsOptionError(\n \"Can't use --all and --lang together\"\n )\n if self.lang is not None:\n self._langs = [self.lang]\n elif self.all:\n for lang in os.listdir(self.po_path_base):\n if (os.path.isdir(os.path.join(self.po_path_base, lang)) and\n lang != \"templates\"):\n self._langs.append(lang)\n else:\n for lang in open(os.path.join('pootle', 'locale', 'LINGUAS')):\n self._langs.append(lang.rstrip())\n\n def build_mo(self):\n \"\"\"Compile .mo files from available .po files\"\"\"\n import subprocess\n import gettext\n from translate.storage import factory\n\n for lang in self._langs:\n lang = lang.rstrip()\n\n po_path = os.path.join('pootle', 'locale', lang)\n mo_path = os.path.join('pootle', 'locale', lang, 'LC_MESSAGES')\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n for po, mo in (('pootle.po', 'django.mo'),\n ('pootle_js.po', 'djangojs.mo')):\n po_filename = os.path.join(po_path, po)\n mo_filename = os.path.join(mo_path, mo)\n\n if not os.path.exists(po_filename):\n log.warn(\"%s: missing file %s\", lang, po_filename)\n continue\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n log.info(\"compiling %s\", lang)\n try:\n subprocess.call([\n 'msgfmt', '--strict', '-o', mo_filename, po_filename],\n stderr=subprocess.STDOUT)\n except Exception as e:\n log.warn(\"%s: skipping, running msgfmt failed: %s\",\n lang, e)\n\n try:\n store = factory.getobject(po_filename)\n gettext.c2py(store.getheaderplural()[1])\n except Exception:\n log.warn(\"%s: invalid plural header in %s\",\n lang, po_filename)\n\n def run(self):\n self.build_mo()\n\n\nsetup(\n name=\"Pootle\",\n version=pootle_version,\n\n description=\"An online collaborative localization tool.\",\n long_description=open(\n os.path.join(os.path.dirname(__file__), 'README.rst')\n ).read(),\n\n author=\"Translate\",\n author_email=\"[email protected]\",\n license=\"GNU General Public License (GPL)\",\n url=\"http://pootle.translatehouse.org\",\n download_url=\"http://sourceforge.net/projects/translate/files/Pootle/\" + pootle_version,\n\n install_requires=parse_requirements('requirements/base.txt'),\n tests_require=parse_requirements('requirements/tests.txt'),\n\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: Unix\",\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Localization\",\n \"Topic :: Text Processing :: Linguistic\"\n ],\n zip_safe=False,\n packages=find_packages(exclude=['deploy*']),\n include_package_data=True,\n\n entry_points={\n 'console_scripts': [\n 'pootle = pootle.runner:main',\n ],\n },\n cmdclass={\n 'build_mo': PootleBuildMo,\n 'test': PyTest,\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2008-2013 Zuza Software Foundation\n# Copyright 2014 Evernote Corporation\n#\n# This file is part of Pootle.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport glob\nimport os\nimport re\nimport sys\n\nfrom distutils import log\nfrom distutils.command.build import build as DistutilsBuild\nfrom distutils.errors import DistutilsOptionError\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom pootle.__version__ import sver as pootle_version\n\n\ndef parse_requirements(file_name):\n \"\"\"Parses a pip requirements file and returns a list of packages.\n\n Use the result of this function in the ``install_requires`` field.\n Copied from cburgmer/pdfserver.\n \"\"\"\n requirements = []\n for line in open(file_name, 'r').read().split('\\n'):\n # Ignore comments, blank lines and included requirements files\n if re.match(r'(\\s*#)|(\\s*$)|((-r|--allow-external|--allow-unverified) .*$)', line):\n continue\n\n if re.match(r'\\s*-e\\s+', line):\n requirements.append(re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line))\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n\n return requirements\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['--tb=short', 'tests/']\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nclass PootleBuildMo(DistutilsBuild):\n\n description = \"compile Gettext PO files into MO\"\n user_options = [\n ('all', None,\n \"compile all language (don't use LINGUAS file)\"),\n ('lang=', 'l',\n \"specify a language to compile\"),\n ]\n boolean_options = ['all']\n\n po_path_base = os.path.join('pootle', 'locale')\n _langs = []\n\n def initialize_options(self):\n self.all = False\n self.lang = None\n\n def finalize_options(self):\n if self.all and self.lang is not None:\n raise DistutilsOptionError(\n \"Can't use --all and --lang together\"\n )\n if self.lang is not None:\n self._langs = [self.lang]\n elif self.all:\n for lang in os.listdir(self.po_path_base):\n if (os.path.isdir(os.path.join(self.po_path_base, lang)) and\n lang != \"templates\"):\n self._langs.append(lang)\n else:\n for lang in open(os.path.join('pootle', 'locale', 'LINGUAS')):\n self._langs.append(lang.rstrip())\n\n def build_mo(self):\n \"\"\"Compile .mo files from available .po files\"\"\"\n import subprocess\n import gettext\n from translate.storage import factory\n\n for lang in self._langs:\n lang = lang.rstrip()\n\n po_path = os.path.join('pootle', 'locale', lang)\n mo_path = os.path.join('pootle', 'locale', lang, 'LC_MESSAGES')\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n for po, mo in (('pootle.po', 'django.mo'),\n ('pootle_js.po', 'djangojs.mo')):\n po_filename = os.path.join(po_path, po)\n mo_filename = os.path.join(mo_path, mo)\n\n if not os.path.exists(po_filename):\n log.warn(\"%s: missing file %s\", lang, po_filename)\n continue\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n log.info(\"compiling %s\", lang)\n try:\n subprocess.call([\n 'msgfmt', '--strict', '-o', mo_filename, po_filename],\n stderr=subprocess.STDOUT)\n except Exception as e:\n log.warn(\"%s: skipping, running msgfmt failed: %s\",\n lang, e)\n\n try:\n store = factory.getobject(po_filename)\n gettext.c2py(store.getheaderplural()[1])\n except Exception:\n log.warn(\"%s: invalid plural header in %s\",\n lang, po_filename)\n\n def run(self):\n self.build_mo()\n\n\nsetup(\n name=\"Pootle\",\n version=pootle_version,\n\n description=\"An online collaborative localization tool.\",\n long_description=open(\n os.path.join(os.path.dirname(__file__), 'README.rst')\n ).read(),\n\n author=\"Translate\",\n author_email=\"[email protected]\",\n license=\"GNU General Public License (GPL)\",\n url=\"http://pootle.translatehouse.org\",\n download_url=\"http://sourceforge.net/projects/translate/files/Pootle/\" + pootle_version,\n\n install_requires=parse_requirements('requirements/base.txt'),\n tests_require=parse_requirements('requirements/tests.txt'),\n\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: Unix\",\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Localization\",\n \"Topic :: Text Processing :: Linguistic\"\n ],\n zip_safe=False,\n packages=find_packages(exclude=['deploy*']),\n include_package_data=True,\n\n entry_points={\n 'console_scripts': [\n 'pootle = pootle.runner:main',\n ],\n },\n cmdclass={\n 'build_mo': PootleBuildMo,\n 'test': PyTest,\n },\n)\n", "path": "setup.py"}]} |
gh_patches_debug_1483 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1375 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update grammar in contributing guide
### Idea summary
Improve the grammar in our contributing guide with an automated grammar checker.
### Further details
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/core/settings.py`
Content:
```
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 # False if not in os.environ
10 DEBUG = os.getenv("DEBUG", False)
11
12 # defaults to second value if not found in os.environ
13 DJANGO_HOST = os.getenv("DJANGO_HOST", "LOCALHOST")
14
15 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16 SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "TEST_KEY_FOR_DEVELOPMENT")
17 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost", "0.0.0.0"]
18
19 INSTALLED_APPS = (
20 "django.contrib.admin",
21 "django.contrib.auth",
22 "django.contrib.contenttypes",
23 "django.contrib.sessions",
24 "django.contrib.messages",
25 "django.contrib.staticfiles",
26 "django_extensions",
27 "storages",
28 "core",
29 "rest_framework",
30 "accounts.apps.AccountsConfig",
31 "threads",
32 "notifications",
33 "corsheaders",
34 "taggit",
35 "categories",
36 "notification",
37 "debug_toolbar",
38 )
39
40 MIDDLEWARE = [
41 "debug_toolbar.middleware.DebugToolbarMiddleware",
42 "corsheaders.middleware.CorsMiddleware",
43 "django.middleware.security.SecurityMiddleware",
44 "whitenoise.middleware.WhiteNoiseMiddleware",
45 "django.contrib.sessions.middleware.SessionMiddleware",
46 "django.middleware.common.CommonMiddleware",
47 "django.middleware.csrf.CsrfViewMiddleware",
48 "django.contrib.auth.middleware.AuthenticationMiddleware",
49 # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
50 "django.contrib.messages.middleware.MessageMiddleware",
51 "django.middleware.clickjacking.XFrameOptionsMiddleware",
52 ]
53
54 INTERNAL_IPS = [
55 "127.0.0.1",
56 ]
57
58 CSRF_USE_SESSIONS = (
59 True # Store the CSRF token in the users session instead of in a cookie
60 )
61
62 CORS_ORIGIN_ALLOW_ALL = True
63 ROOT_URLCONF = "core.urls"
64
65 # SSL Setup
66 if DJANGO_HOST != "LOCALHOST":
67 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
68 SECURE_SSL_REDIRECT = True
69 SESSION_COOKIE_SECURE = True
70 CSRF_COOKIE_SECURE = True
71
72 # Internationalization & Localization
73 LANGUAGE_CODE = "en-us"
74 TIME_ZONE = "UTC"
75 USE_I18N = True
76 USE_L10N = True
77 USE_TZ = True
78
79 TEMPLATES = [
80 {
81 "BACKEND": "django.template.backends.django.DjangoTemplates",
82 "DIRS": [
83 os.path.join(BASE_DIR, "threads/templates/threads"),
84 os.path.join(BASE_DIR, "accounts/templates/accounts"),
85 ], # TODO: Add non-webapp template directory
86 "APP_DIRS": True,
87 "OPTIONS": {
88 "context_processors": [
89 "django.template.context_processors.debug",
90 "django.template.context_processors.request",
91 "django.contrib.auth.context_processors.auth",
92 "django.contrib.messages.context_processors.messages",
93 ],
94 },
95 },
96 ]
97
98 WSGI_APPLICATION = "core.wsgi.application"
99
100 # Apex Contact for Production Errors
101 ADMINS = [("Development Team", "[email protected]")]
102
103 STATIC_URL = "/static/"
104 STATICFILES_DIRS = (os.path.join(BASE_DIR, "core/templates/static"),)
105 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
106
107 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
108 MEDIA_URL = "/media/"
109
110 # TODO: re-organize and simplify staticfiles settings
111 if "CIVIWIKI_LOCAL_NAME" not in os.environ:
112 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
113
114 # Use DATABASE_URL in production
115 DATABASE_URL = os.getenv("DATABASE_URL")
116
117 if DATABASE_URL is not None:
118 DATABASES = {"default": DATABASE_URL}
119 else:
120 # Default to sqlite for simplicity in development
121 DATABASES = {
122 "default": {
123 "ENGINE": "django.db.backends.sqlite3",
124 "NAME": BASE_DIR + "/" + "db.sqlite3",
125 }
126 }
127
128 # Email Backend Setup
129 if "EMAIL_HOST" not in os.environ:
130 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
131 EMAIL_HOST_USER = "[email protected]"
132 else:
133 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
134 EMAIL_HOST = os.getenv("EMAIL_HOST")
135 EMAIL_PORT = os.getenv("EMAIL_PORT")
136 EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
137 EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
138 EMAIL_USE_SSL = True
139 DEFAULT_FROM_EMAIL = EMAIL_HOST
140
141 # Notification API Settings
142 NOTIFICATIONS_SOFT_DELETE = True
143 NOTIFICATIONS_USE_JSONFIELD = True
144
145 # Django REST API Settings
146 DEFAULT_RENDERER_CLASSES = ("rest_framework.renderers.JSONRenderer",)
147
148 if DEBUG:
149 # Browsable HTML - Enabled only in Debug mode (dev)
150 DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (
151 "rest_framework.renderers.BrowsableAPIRenderer",
152 )
153
154 REST_FRAMEWORK = {
155 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
156 "DEFAULT_RENDERER_CLASSES": DEFAULT_RENDERER_CLASSES,
157 "DEFAULT_AUTHENTICATION_CLASSES": (
158 "rest_framework.authentication.BasicAuthentication",
159 "rest_framework.authentication.SessionAuthentication",
160 ),
161 }
162
163 # CORS Settings
164 CORS_ORIGIN_ALLOW_ALL = True
165
166 # Custom User model
167 AUTH_USER_MODEL = "accounts.User"
168
169 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
170
171 # Login Logout URLS
172 LOGIN_URL = "login/"
173 LOGIN_REDIRECT_URL = "/"
174 LOGOUT_REDIRECT_URL = "/"
175
176 AUTH_PASSWORD_VALIDATORS = [
177 {
178 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501
179 },
180 {
181 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
182 "OPTIONS": {
183 "min_length": 4,
184 },
185 },
186 {
187 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
188 },
189 {
190 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
191 },
192 ]
193
194 LOGGING = {
195 "version": 1,
196 "disable_existing_loggers": False,
197 "formatters": {"rich": {"datefmt": "[%X]"}},
198 "handlers": {
199 "console": {
200 "class": "rich.logging.RichHandler",
201 "formatter": "rich",
202 "level": "WARNING",
203 # "filters": ["require_debug_true"],
204 "rich_tracebacks": True,
205 "tracebacks_show_locals": True,
206 }
207 },
208 "loggers": {"django": {"handlers": ["console"]}},
209 }
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/core/settings.py b/project/core/settings.py
--- a/project/core/settings.py
+++ b/project/core/settings.py
@@ -107,9 +107,7 @@
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
-# TODO: re-organize and simplify staticfiles settings
-if "CIVIWIKI_LOCAL_NAME" not in os.environ:
- STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
+STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Use DATABASE_URL in production
DATABASE_URL = os.getenv("DATABASE_URL")
| {"golden_diff": "diff --git a/project/core/settings.py b/project/core/settings.py\n--- a/project/core/settings.py\n+++ b/project/core/settings.py\n@@ -107,9 +107,7 @@\n MEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n MEDIA_URL = \"/media/\"\n \n-# TODO: re-organize and simplify staticfiles settings\n-if \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n- STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n+STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n \n # Use DATABASE_URL in production\n DATABASE_URL = os.getenv(\"DATABASE_URL\")\n", "issue": "Update grammar in contributing guide\n### Idea summary\n\nImprove the grammar in our contributing guide with an automated grammar checker.\n\n### Further details\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}]} |
gh_patches_debug_1484 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
opsdroid slack connector intermittently ends up in an exception
<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->
# Description - opsdroid slack connector intermittently ends up in an exception
this doesnt happen for all users - but i see that line 146 in File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py" is the culprit.
```
INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Steps to Reproduce
Please also include relevant information and steps to reproduce the bug/issue.
i am not sure if this can be reproduced elsewhere - otherwise would have been reported by other users.
the slack channel has about 82 users.
the bot is part of 2 channels.
also users interact with the bot directly /
## Expected Functionality
no exception - Looking up sender username should succeed.
## Experienced Functionality
Explain what happened instead(Please include the debug log).
```INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Versions
- **Opsdroid version:** latest master code.
- **Python version:** python3.7
- **OS/Docker version:** 18.06.3-ce
## Configuration File
Please include your version of the configuration file below.
configuration file passed in values.yaml helm chart
```yaml
configuration: |
welcome-message: true
connectors:
slack:
token: "xxx"
bot-name: "xxx" # default "opsdroid"
default-room: "#xxx" # default "#general"
#icon-emoji: ":smile:" # default ":robot_face:"
connect-timeout: 10 # default 10 seconds
chat-as-user: false # default false
skills:
- name: skill-yyy-statistics
path: /home/skill/skill-yyy-statistics
db_server: "1.1.1.1"
db_name: "xx"
user: "xxx"
password: "xxx"
- name: skill-yyy-help
path: /home/skill/skill-yyy-help
- name: skill-yyy-cache
path: /home/skill/skill-yyy-cache
db_server: "1.1.1.1"
db_name: "zz"
user: "xxx"
password: "xxxx"
- name: skill-yyy-eee
path: /home/skill/skill-yyy-eee
- name: skill-yyy-ttt
path: /home/skill/skill-yyy-ttt
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
opsdroid slack connector intermittently ends up in an exception
<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->
# Description - opsdroid slack connector intermittently ends up in an exception
this doesnt happen for all users - but i see that line 146 in File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py" is the culprit.
```
INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Steps to Reproduce
Please also include relevant information and steps to reproduce the bug/issue.
i am not sure if this can be reproduced elsewhere - otherwise would have been reported by other users.
the slack channel has about 82 users.
the bot is part of 2 channels.
also users interact with the bot directly /
## Expected Functionality
no exception - Looking up sender username should succeed.
## Experienced Functionality
Explain what happened instead(Please include the debug log).
```INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Versions
- **Opsdroid version:** latest master code.
- **Python version:** python3.7
- **OS/Docker version:** 18.06.3-ce
## Configuration File
Please include your version of the configuration file below.
configuration file passed in values.yaml helm chart
```yaml
configuration: |
welcome-message: true
connectors:
slack:
token: "xxx"
bot-name: "xxx" # default "opsdroid"
default-room: "#xxx" # default "#general"
#icon-emoji: ":smile:" # default ":robot_face:"
connect-timeout: 10 # default 10 seconds
chat-as-user: false # default false
skills:
- name: skill-yyy-statistics
path: /home/skill/skill-yyy-statistics
db_server: "1.1.1.1"
db_name: "xx"
user: "xxx"
password: "xxx"
- name: skill-yyy-help
path: /home/skill/skill-yyy-help
- name: skill-yyy-cache
path: /home/skill/skill-yyy-cache
db_server: "1.1.1.1"
db_name: "zz"
user: "xxx"
password: "xxxx"
- name: skill-yyy-eee
path: /home/skill/skill-yyy-eee
- name: skill-yyy-ttt
path: /home/skill/skill-yyy-ttt
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/slack/__init__.py`
Content:
```
1 """A connector for Slack."""
2 import logging
3 import re
4 import os
5 import ssl
6 import certifi
7 import json
8
9 import aiohttp
10
11 import slack
12 from emoji import demojize
13 from voluptuous import Required
14
15 from opsdroid.connector import Connector, register_event
16 from opsdroid.events import Message, Reaction
17 from opsdroid.connector.slack.events import (
18 Blocks,
19 BlockActions,
20 MessageAction,
21 ViewSubmission,
22 ViewClosed,
23 )
24
25
26 _LOGGER = logging.getLogger(__name__)
27 CONFIG_SCHEMA = {
28 Required("token"): str,
29 "bot-name": str,
30 "default-room": str,
31 "icon-emoji": str,
32 "connect-timeout": int,
33 "chat-as-user": bool,
34 }
35
36
37 class ConnectorSlack(Connector):
38 """A connector for Slack."""
39
40 def __init__(self, config, opsdroid=None):
41 """Create the connector."""
42 super().__init__(config, opsdroid=opsdroid)
43 _LOGGER.debug(_("Starting Slack connector."))
44 self.name = "slack"
45 self.default_target = config.get("default-room", "#general")
46 self.icon_emoji = config.get("icon-emoji", ":robot_face:")
47 self.token = config["token"]
48 self.timeout = config.get("connect-timeout", 10)
49 self.chat_as_user = config.get("chat-as-user", False)
50 self.ssl_context = ssl.create_default_context(cafile=certifi.where())
51 self.slack = slack.WebClient(
52 token=self.token,
53 run_async=True,
54 ssl=self.ssl_context,
55 proxy=os.environ.get("HTTPS_PROXY"),
56 )
57 self.slack_rtm = slack.RTMClient(
58 token=self.token,
59 run_async=True,
60 ssl=self.ssl_context,
61 proxy=os.environ.get("HTTPS_PROXY"),
62 )
63 self.websocket = None
64 self.bot_name = config.get("bot-name", "opsdroid")
65 self.auth_info = None
66 self.user_info = None
67 self.bot_id = None
68 self.known_users = {}
69 self.keepalive = None
70 self.reconnecting = False
71 self.listening = True
72 self._message_id = 0
73
74 # Register callbacks
75 slack.RTMClient.on(event="message", callback=self.process_message)
76
77 async def connect(self):
78 """Connect to the chat service."""
79 _LOGGER.info(_("Connecting to Slack."))
80
81 try:
82 # The slack library recommends you call `self.slack_rtm.start()`` here but it
83 # seems to mess with the event loop's signal handlers which breaks opsdroid.
84 # Therefore we need to directly call the private `_connect_and_read` method
85 # instead. This method also blocks so we need to dispatch it to the loop as a task.
86 self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())
87
88 self.auth_info = (await self.slack.api_call("auth.test")).data
89 self.user_info = (
90 await self.slack.api_call(
91 "users.info",
92 http_verb="GET",
93 params={"user": self.auth_info["user_id"]},
94 )
95 ).data
96 self.bot_id = self.user_info["user"]["profile"]["bot_id"]
97
98 self.opsdroid.web_server.web_app.router.add_post(
99 "/connector/{}/interactions".format(self.name),
100 self.slack_interactions_handler,
101 )
102
103 _LOGGER.debug(_("Connected as %s."), self.bot_name)
104 _LOGGER.debug(_("Using icon %s."), self.icon_emoji)
105 _LOGGER.debug(_("Default room is %s."), self.default_target)
106 _LOGGER.info(_("Connected successfully."))
107 except slack.errors.SlackApiError as error:
108 _LOGGER.error(
109 _(
110 "Unable to connect to Slack due to %s."
111 "The Slack Connector will not be available."
112 ),
113 error,
114 )
115 except Exception:
116 await self.disconnect()
117 raise
118
119 async def disconnect(self):
120 """Disconnect from Slack."""
121 self.slack_rtm.stop()
122 self.listening = False
123
124 async def listen(self):
125 """Listen for and parse new messages."""
126
127 async def process_message(self, **payload):
128 """Process a raw message and pass it to the parser."""
129 message = payload["data"]
130
131 # Ignore message edits
132 if "subtype" in message and message["subtype"] == "message_changed":
133 return
134
135 # Ignore own messages
136 if (
137 "subtype" in message
138 and message["subtype"] == "bot_message"
139 and message["bot_id"] == self.bot_id
140 ):
141 return
142
143 # Lookup username
144 _LOGGER.debug(_("Looking up sender username."))
145 try:
146 user_info = await self.lookup_username(message["user"])
147 except ValueError:
148 return
149
150 # Replace usernames in the message
151 _LOGGER.debug(_("Replacing userids in message with usernames."))
152 message["text"] = await self.replace_usernames(message["text"])
153
154 await self.opsdroid.parse(
155 Message(
156 text=message["text"],
157 user=user_info["name"],
158 target=message["channel"],
159 connector=self,
160 raw_event=message,
161 )
162 )
163
164 @register_event(Message)
165 async def send_message(self, message):
166 """Respond with a message."""
167 _LOGGER.debug(
168 _("Responding with: '%s' in room %s."), message.text, message.target
169 )
170 await self.slack.api_call(
171 "chat.postMessage",
172 data={
173 "channel": message.target,
174 "text": message.text,
175 "as_user": self.chat_as_user,
176 "username": self.bot_name,
177 "icon_emoji": self.icon_emoji,
178 },
179 )
180
181 @register_event(Blocks)
182 async def send_blocks(self, blocks):
183 """Respond with structured blocks."""
184 _LOGGER.debug(
185 _("Responding with interactive blocks in room %s."), blocks.target
186 )
187 await self.slack.api_call(
188 "chat.postMessage",
189 data={
190 "channel": blocks.target,
191 "as_user": self.chat_as_user,
192 "username": self.bot_name,
193 "blocks": blocks.blocks,
194 "icon_emoji": self.icon_emoji,
195 },
196 )
197
198 @register_event(Reaction)
199 async def send_reaction(self, reaction):
200 """React to a message."""
201 emoji = demojize(reaction.emoji).replace(":", "")
202 _LOGGER.debug(_("Reacting with: %s."), emoji)
203 try:
204 await self.slack.api_call(
205 "reactions.add",
206 data={
207 "name": emoji,
208 "channel": reaction.target,
209 "timestamp": reaction.linked_event.event_id,
210 },
211 )
212 except slack.errors.SlackApiError as error:
213 if "invalid_name" in str(error):
214 _LOGGER.warning(_("Slack does not support the emoji %s."), emoji)
215 else:
216 raise
217
218 async def lookup_username(self, userid):
219 """Lookup a username and cache it."""
220 if userid in self.known_users:
221 user_info = self.known_users[userid]
222 else:
223 response = await self.slack.users_info(user=userid)
224 user_info = response.data["user"]
225 if isinstance(user_info, dict):
226 self.known_users[userid] = user_info
227 else:
228 raise ValueError("Returned user is not a dict.")
229 return user_info
230
231 async def replace_usernames(self, message):
232 """Replace User ID with username in message text."""
233 userids = re.findall(r"\<\@([A-Z0-9]+)(?:\|.+)?\>", message)
234 for userid in userids:
235 user_info = await self.lookup_username(userid)
236 message = message.replace(
237 "<@{userid}>".format(userid=userid), user_info["name"]
238 )
239 return message
240
241 async def slack_interactions_handler(self, request):
242 """Handle interactive events in Slack.
243
244 For each entry in request, it will check if the entry is one of the four main
245 interaction types in slack: block_actions, message_actions, view_submissions
246 and view_closed. Then it will process all the incoming messages.
247
248 Return:
249 A 200 OK response. The Messenger Platform will resend the webhook
250 event every 20 seconds, until a 200 OK response is received.
251 Failing to return a 200 OK may cause your webhook to be
252 unsubscribed by the Messenger Platform.
253
254 """
255
256 req_data = await request.post()
257 payload = json.loads(req_data["payload"])
258
259 if "type" in payload:
260 if payload["type"] == "block_actions":
261 for action in payload["actions"]:
262 block_action = BlockActions(
263 payload,
264 user=payload["user"]["id"],
265 target=payload["channel"]["id"],
266 connector=self,
267 )
268
269 action_value = None
270 if action["type"] == "button":
271 action_value = action["value"]
272 elif action["type"] in ["overflow", "static_select"]:
273 action_value = action["selected_option"]["value"]
274 elif action["type"] == "datepicker":
275 action_value = action["selected_date"]
276 elif action["type"] == "multi_static_select":
277 action_value = [v["value"] for v in action["selected_options"]]
278
279 if action_value:
280 block_action.update_entity("value", action_value)
281 await self.opsdroid.parse(block_action)
282 elif payload["type"] == "message_action":
283 await self.opsdroid.parse(
284 MessageAction(
285 payload,
286 user=payload["user"]["id"],
287 target=payload["channel"]["id"],
288 connector=self,
289 )
290 )
291 elif payload["type"] == "view_submission":
292 await self.opsdroid.parse(
293 ViewSubmission(
294 payload,
295 user=payload["user"]["id"],
296 target=payload["user"]["id"],
297 connector=self,
298 )
299 )
300 elif payload["type"] == "view_closed":
301 await self.opsdroid.parse(
302 ViewClosed(
303 payload,
304 user=payload["user"]["id"],
305 target=payload["user"]["id"],
306 connector=self,
307 )
308 )
309
310 return aiohttp.web.Response(text=json.dumps("Received"), status=200)
311
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py
--- a/opsdroid/connector/slack/__init__.py
+++ b/opsdroid/connector/slack/__init__.py
@@ -144,7 +144,8 @@
_LOGGER.debug(_("Looking up sender username."))
try:
user_info = await self.lookup_username(message["user"])
- except ValueError:
+ except (ValueError, KeyError) as error:
+ _LOGGER.error(_("Username lookup failed for %s."), error)
return
# Replace usernames in the message
| {"golden_diff": "diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py\n--- a/opsdroid/connector/slack/__init__.py\n+++ b/opsdroid/connector/slack/__init__.py\n@@ -144,7 +144,8 @@\n _LOGGER.debug(_(\"Looking up sender username.\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n- except ValueError:\n+ except (ValueError, KeyError) as error:\n+ _LOGGER.error(_(\"Username lookup failed for %s.\"), error)\n return\n \n # Replace usernames in the message\n", "issue": "opsdroid slack connector intermittently ends up in an exception\n<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->\r\n# Description - opsdroid slack connector intermittently ends up in an exception \r\n\r\nthis doesnt happen for all users - but i see that line 146 in File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py\" is the culprit.\r\n\r\n```\r\nINFO opsdroid.connector.slack: Connected successfully.\r\nINFO opsdroid.web: Started web server on http://0.0.0.0:8080\r\nINFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.\r\nDEBUG slack.rtm.client: The Websocket connection has been opened.\r\nDEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.\r\nDEBUG slack.rtm.client: Running 1 callbacks for event: 'message'\r\nDEBUG opsdroid.connector.slack: Looking up sender username.\r\nERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'\r\nDEBUG asyncio: Using selector: EpollSelector\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/opsdroid\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py\", line 42, in start\r\n opsdroid.run()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 165, in run\r\n self.eventloop.run_until_complete(asyncio.gather(*pending))\r\n File \"/usr/local/lib/python3.7/asyncio/base_events.py\", line 583, in run_until_complete\r\n return future.result()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 339, in _connect_and_read\r\n await self._read_messages()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 390, in _read_messages\r\n await self._dispatch_event(event, data=payload)\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 437, in _dispatch_event\r\n rtm_client=self, web_client=self._web_client, data=data\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py\", line 146, in process_message\r\n user_info = await self.lookup_username(message[\"user\"])\r\nKeyError: 'user'\r\nERROR: Unhandled exception in opsdroid, exiting...\r\n```\r\n\r\n## Steps to Reproduce\r\nPlease also include relevant information and steps to reproduce the bug/issue.\r\n\r\ni am not sure if this can be reproduced elsewhere - otherwise would have been reported by other users.\r\nthe slack channel has about 82 users.\r\nthe bot is part of 2 channels.\r\nalso users interact with the bot directly /\r\n\r\n\r\n\r\n## Expected Functionality\r\nno exception - Looking up sender username should succeed.\r\n\r\n## Experienced Functionality\r\nExplain what happened instead(Please include the debug log).\r\n\r\n```INFO opsdroid.connector.slack: Connected successfully.\r\nINFO opsdroid.web: Started web server on http://0.0.0.0:8080\r\nINFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.\r\nDEBUG slack.rtm.client: The Websocket connection has been opened.\r\nDEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.\r\nDEBUG slack.rtm.client: Running 1 callbacks for event: 'message'\r\nDEBUG opsdroid.connector.slack: Looking up sender username.\r\nERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'\r\nDEBUG asyncio: Using selector: EpollSelector\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/opsdroid\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py\", line 42, in start\r\n opsdroid.run()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 165, in run\r\n self.eventloop.run_until_complete(asyncio.gather(*pending))\r\n File \"/usr/local/lib/python3.7/asyncio/base_events.py\", line 583, in run_until_complete\r\n return future.result()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 339, in _connect_and_read\r\n await self._read_messages()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 390, in _read_messages\r\n await self._dispatch_event(event, data=payload)\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 437, in _dispatch_event\r\n rtm_client=self, web_client=self._web_client, data=data\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py\", line 146, in process_message\r\n user_info = await self.lookup_username(message[\"user\"])\r\nKeyError: 'user'\r\nERROR: Unhandled exception in opsdroid, exiting...\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:** latest master code.\r\n- **Python version:** python3.7\r\n- **OS/Docker version:** 18.06.3-ce\r\n\r\n\r\n\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\nconfiguration file passed in values.yaml helm chart\r\n\r\n```yaml\r\n configuration: |\r\n welcome-message: true\r\n connectors:\r\n slack:\r\n token: \"xxx\"\r\n bot-name: \"xxx\" # default \"opsdroid\"\r\n default-room: \"#xxx\" # default \"#general\"\r\n #icon-emoji: \":smile:\" # default \":robot_face:\"\r\n connect-timeout: 10 # default 10 seconds\r\n chat-as-user: false # default false\r\n skills:\r\n - name: skill-yyy-statistics\r\n path: /home/skill/skill-yyy-statistics\r\n db_server: \"1.1.1.1\"\r\n db_name: \"xx\"\r\n user: \"xxx\"\r\n password: \"xxx\"\r\n - name: skill-yyy-help\r\n path: /home/skill/skill-yyy-help\r\n - name: skill-yyy-cache\r\n path: /home/skill/skill-yyy-cache\r\n db_server: \"1.1.1.1\"\r\n db_name: \"zz\"\r\n user: \"xxx\"\r\n password: \"xxxx\"\r\n - name: skill-yyy-eee\r\n path: /home/skill/skill-yyy-eee\r\n - name: skill-yyy-ttt\r\n path: /home/skill/skill-yyy-ttt\r\n\r\n\r\n```\r\n\r\n## Additional Details\r\nAny other details you wish to include such as screenshots, console messages, etc.\r\n\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\nopsdroid slack connector intermittently ends up in an exception\n<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->\r\n# Description - opsdroid slack connector intermittently ends up in an exception \r\n\r\nthis doesnt happen for all users - but i see that line 146 in File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py\" is the culprit.\r\n\r\n```\r\nINFO opsdroid.connector.slack: Connected successfully.\r\nINFO opsdroid.web: Started web server on http://0.0.0.0:8080\r\nINFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.\r\nDEBUG slack.rtm.client: The Websocket connection has been opened.\r\nDEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.\r\nDEBUG slack.rtm.client: Running 1 callbacks for event: 'message'\r\nDEBUG opsdroid.connector.slack: Looking up sender username.\r\nERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'\r\nDEBUG asyncio: Using selector: EpollSelector\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/opsdroid\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py\", line 42, in start\r\n opsdroid.run()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 165, in run\r\n self.eventloop.run_until_complete(asyncio.gather(*pending))\r\n File \"/usr/local/lib/python3.7/asyncio/base_events.py\", line 583, in run_until_complete\r\n return future.result()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 339, in _connect_and_read\r\n await self._read_messages()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 390, in _read_messages\r\n await self._dispatch_event(event, data=payload)\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 437, in _dispatch_event\r\n rtm_client=self, web_client=self._web_client, data=data\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py\", line 146, in process_message\r\n user_info = await self.lookup_username(message[\"user\"])\r\nKeyError: 'user'\r\nERROR: Unhandled exception in opsdroid, exiting...\r\n```\r\n\r\n## Steps to Reproduce\r\nPlease also include relevant information and steps to reproduce the bug/issue.\r\n\r\ni am not sure if this can be reproduced elsewhere - otherwise would have been reported by other users.\r\nthe slack channel has about 82 users.\r\nthe bot is part of 2 channels.\r\nalso users interact with the bot directly /\r\n\r\n\r\n\r\n## Expected Functionality\r\nno exception - Looking up sender username should succeed.\r\n\r\n## Experienced Functionality\r\nExplain what happened instead(Please include the debug log).\r\n\r\n```INFO opsdroid.connector.slack: Connected successfully.\r\nINFO opsdroid.web: Started web server on http://0.0.0.0:8080\r\nINFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.\r\nDEBUG slack.rtm.client: The Websocket connection has been opened.\r\nDEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.\r\nDEBUG slack.rtm.client: Running 1 callbacks for event: 'message'\r\nDEBUG opsdroid.connector.slack: Looking up sender username.\r\nERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'\r\nDEBUG asyncio: Using selector: EpollSelector\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/opsdroid\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py\", line 42, in start\r\n opsdroid.run()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 165, in run\r\n self.eventloop.run_until_complete(asyncio.gather(*pending))\r\n File \"/usr/local/lib/python3.7/asyncio/base_events.py\", line 583, in run_until_complete\r\n return future.result()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 339, in _connect_and_read\r\n await self._read_messages()\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 390, in _read_messages\r\n await self._dispatch_event(event, data=payload)\r\n File \"/usr/local/lib/python3.7/site-packages/slack/rtm/client.py\", line 437, in _dispatch_event\r\n rtm_client=self, web_client=self._web_client, data=data\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py\", line 146, in process_message\r\n user_info = await self.lookup_username(message[\"user\"])\r\nKeyError: 'user'\r\nERROR: Unhandled exception in opsdroid, exiting...\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:** latest master code.\r\n- **Python version:** python3.7\r\n- **OS/Docker version:** 18.06.3-ce\r\n\r\n\r\n\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\nconfiguration file passed in values.yaml helm chart\r\n\r\n```yaml\r\n configuration: |\r\n welcome-message: true\r\n connectors:\r\n slack:\r\n token: \"xxx\"\r\n bot-name: \"xxx\" # default \"opsdroid\"\r\n default-room: \"#xxx\" # default \"#general\"\r\n #icon-emoji: \":smile:\" # default \":robot_face:\"\r\n connect-timeout: 10 # default 10 seconds\r\n chat-as-user: false # default false\r\n skills:\r\n - name: skill-yyy-statistics\r\n path: /home/skill/skill-yyy-statistics\r\n db_server: \"1.1.1.1\"\r\n db_name: \"xx\"\r\n user: \"xxx\"\r\n password: \"xxx\"\r\n - name: skill-yyy-help\r\n path: /home/skill/skill-yyy-help\r\n - name: skill-yyy-cache\r\n path: /home/skill/skill-yyy-cache\r\n db_server: \"1.1.1.1\"\r\n db_name: \"zz\"\r\n user: \"xxx\"\r\n password: \"xxxx\"\r\n - name: skill-yyy-eee\r\n path: /home/skill/skill-yyy-eee\r\n - name: skill-yyy-ttt\r\n path: /home/skill/skill-yyy-ttt\r\n\r\n\r\n```\r\n\r\n## Additional Details\r\nAny other details you wish to include such as screenshots, console messages, etc.\r\n\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\nimport os\nimport ssl\nimport certifi\nimport json\n\nimport aiohttp\n\nimport slack\nfrom emoji import demojize\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import (\n Blocks,\n BlockActions,\n MessageAction,\n ViewSubmission,\n ViewClosed,\n)\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"bot-name\": str,\n \"default-room\": str,\n \"icon-emoji\": str,\n \"connect-timeout\": int,\n \"chat-as-user\": bool,\n}\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector.\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.chat_as_user = config.get(\"chat-as-user\", False)\n self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n self.slack = slack.WebClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.slack_rtm = slack.RTMClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack.\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/{}/interactions\".format(self.name),\n self.slack_interactions_handler,\n )\n\n _LOGGER.debug(_(\"Connected as %s.\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s.\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s.\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully.\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s.\"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username.\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames.\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n text=message[\"text\"],\n user=user_info[\"name\"],\n target=message[\"channel\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s.\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s.\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s.\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.event_id,\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s.\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n\n async def slack_interactions_handler(self, request):\n \"\"\"Handle interactive events in Slack.\n\n For each entry in request, it will check if the entry is one of the four main\n interaction types in slack: block_actions, message_actions, view_submissions\n and view_closed. Then it will process all the incoming messages.\n\n Return:\n A 200 OK response. The Messenger Platform will resend the webhook\n event every 20 seconds, until a 200 OK response is received.\n Failing to return a 200 OK may cause your webhook to be\n unsubscribed by the Messenger Platform.\n\n \"\"\"\n\n req_data = await request.post()\n payload = json.loads(req_data[\"payload\"])\n\n if \"type\" in payload:\n if payload[\"type\"] == \"block_actions\":\n for action in payload[\"actions\"]:\n block_action = BlockActions(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n\n action_value = None\n if action[\"type\"] == \"button\":\n action_value = action[\"value\"]\n elif action[\"type\"] in [\"overflow\", \"static_select\"]:\n action_value = action[\"selected_option\"][\"value\"]\n elif action[\"type\"] == \"datepicker\":\n action_value = action[\"selected_date\"]\n elif action[\"type\"] == \"multi_static_select\":\n action_value = [v[\"value\"] for v in action[\"selected_options\"]]\n\n if action_value:\n block_action.update_entity(\"value\", action_value)\n await self.opsdroid.parse(block_action)\n elif payload[\"type\"] == \"message_action\":\n await self.opsdroid.parse(\n MessageAction(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_submission\":\n await self.opsdroid.parse(\n ViewSubmission(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_closed\":\n await self.opsdroid.parse(\n ViewClosed(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=200)\n", "path": "opsdroid/connector/slack/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\nimport os\nimport ssl\nimport certifi\nimport json\n\nimport aiohttp\n\nimport slack\nfrom emoji import demojize\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import (\n Blocks,\n BlockActions,\n MessageAction,\n ViewSubmission,\n ViewClosed,\n)\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"bot-name\": str,\n \"default-room\": str,\n \"icon-emoji\": str,\n \"connect-timeout\": int,\n \"chat-as-user\": bool,\n}\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector.\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.chat_as_user = config.get(\"chat-as-user\", False)\n self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n self.slack = slack.WebClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.slack_rtm = slack.RTMClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack.\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/{}/interactions\".format(self.name),\n self.slack_interactions_handler,\n )\n\n _LOGGER.debug(_(\"Connected as %s.\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s.\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s.\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully.\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s.\"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username.\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except (ValueError, KeyError) as error:\n _LOGGER.error(_(\"Username lookup failed for %s.\"), error)\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames.\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n text=message[\"text\"],\n user=user_info[\"name\"],\n target=message[\"channel\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s.\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s.\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s.\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.event_id,\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s.\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n\n async def slack_interactions_handler(self, request):\n \"\"\"Handle interactive events in Slack.\n\n For each entry in request, it will check if the entry is one of the four main\n interaction types in slack: block_actions, message_actions, view_submissions\n and view_closed. Then it will process all the incoming messages.\n\n Return:\n A 200 OK response. The Messenger Platform will resend the webhook\n event every 20 seconds, until a 200 OK response is received.\n Failing to return a 200 OK may cause your webhook to be\n unsubscribed by the Messenger Platform.\n\n \"\"\"\n\n req_data = await request.post()\n payload = json.loads(req_data[\"payload\"])\n\n if \"type\" in payload:\n if payload[\"type\"] == \"block_actions\":\n for action in payload[\"actions\"]:\n block_action = BlockActions(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n\n action_value = None\n if action[\"type\"] == \"button\":\n action_value = action[\"value\"]\n elif action[\"type\"] in [\"overflow\", \"static_select\"]:\n action_value = action[\"selected_option\"][\"value\"]\n elif action[\"type\"] == \"datepicker\":\n action_value = action[\"selected_date\"]\n elif action[\"type\"] == \"multi_static_select\":\n action_value = [v[\"value\"] for v in action[\"selected_options\"]]\n\n if action_value:\n block_action.update_entity(\"value\", action_value)\n await self.opsdroid.parse(block_action)\n elif payload[\"type\"] == \"message_action\":\n await self.opsdroid.parse(\n MessageAction(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_submission\":\n await self.opsdroid.parse(\n ViewSubmission(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_closed\":\n await self.opsdroid.parse(\n ViewClosed(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=200)\n", "path": "opsdroid/connector/slack/__init__.py"}]} |
gh_patches_debug_1485 | rasdani/github-patches | git_diff | pyca__cryptography-7644 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AESSIV Encryption/Decryption fails if empty data is passed
## Issue description
If an empty byte string is passed to `data` parameter of methods `encrypt` and `decrypt` of `AESSIV`, operation fails with `InternalError`.
## Steps to reproduce bug
```python
from cryptography.hazmat.primitives.ciphers import aead
key = bytes(32)
data = b""
cipher = aead.AESSIV(key)
output = cipher.encrypt(data, None) # raises `InternalError`
```
## cryptography installation
cryptography is installed via poetry with version constraint >=35.0.0:
```toml
[tool.poetry.dependencies]
python = "^3.8"
cryptography = ">=35.0.0"
```
## Required Version numbers
- `pip` - 22.2.2
- `cffi` - 1.15.1
- `cryptography` - 38.0.1
- `setuptools` - 65.3.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/ciphers/aead.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import os
7 import typing
8
9 from cryptography import exceptions, utils
10 from cryptography.hazmat.backends.openssl import aead
11 from cryptography.hazmat.backends.openssl.backend import backend
12 from cryptography.hazmat.bindings._rust import FixedPool
13
14
15 class ChaCha20Poly1305:
16 _MAX_SIZE = 2**31 - 1
17
18 def __init__(self, key: bytes):
19 if not backend.aead_cipher_supported(self):
20 raise exceptions.UnsupportedAlgorithm(
21 "ChaCha20Poly1305 is not supported by this version of OpenSSL",
22 exceptions._Reasons.UNSUPPORTED_CIPHER,
23 )
24 utils._check_byteslike("key", key)
25
26 if len(key) != 32:
27 raise ValueError("ChaCha20Poly1305 key must be 32 bytes.")
28
29 self._key = key
30 self._pool = FixedPool(self._create_fn)
31
32 @classmethod
33 def generate_key(cls) -> bytes:
34 return os.urandom(32)
35
36 def _create_fn(self):
37 return aead._aead_create_ctx(backend, self, self._key)
38
39 def encrypt(
40 self,
41 nonce: bytes,
42 data: bytes,
43 associated_data: typing.Optional[bytes],
44 ) -> bytes:
45 if associated_data is None:
46 associated_data = b""
47
48 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
49 # This is OverflowError to match what cffi would raise
50 raise OverflowError(
51 "Data or associated data too long. Max 2**31 - 1 bytes"
52 )
53
54 self._check_params(nonce, data, associated_data)
55 with self._pool.acquire() as ctx:
56 return aead._encrypt(
57 backend, self, nonce, data, [associated_data], 16, ctx
58 )
59
60 def decrypt(
61 self,
62 nonce: bytes,
63 data: bytes,
64 associated_data: typing.Optional[bytes],
65 ) -> bytes:
66 if associated_data is None:
67 associated_data = b""
68
69 self._check_params(nonce, data, associated_data)
70 with self._pool.acquire() as ctx:
71 return aead._decrypt(
72 backend, self, nonce, data, [associated_data], 16, ctx
73 )
74
75 def _check_params(
76 self,
77 nonce: bytes,
78 data: bytes,
79 associated_data: bytes,
80 ) -> None:
81 utils._check_byteslike("nonce", nonce)
82 utils._check_bytes("data", data)
83 utils._check_bytes("associated_data", associated_data)
84 if len(nonce) != 12:
85 raise ValueError("Nonce must be 12 bytes")
86
87
88 class AESCCM:
89 _MAX_SIZE = 2**31 - 1
90
91 def __init__(self, key: bytes, tag_length: int = 16):
92 utils._check_byteslike("key", key)
93 if len(key) not in (16, 24, 32):
94 raise ValueError("AESCCM key must be 128, 192, or 256 bits.")
95
96 self._key = key
97 if not isinstance(tag_length, int):
98 raise TypeError("tag_length must be an integer")
99
100 if tag_length not in (4, 6, 8, 10, 12, 14, 16):
101 raise ValueError("Invalid tag_length")
102
103 self._tag_length = tag_length
104
105 if not backend.aead_cipher_supported(self):
106 raise exceptions.UnsupportedAlgorithm(
107 "AESCCM is not supported by this version of OpenSSL",
108 exceptions._Reasons.UNSUPPORTED_CIPHER,
109 )
110
111 @classmethod
112 def generate_key(cls, bit_length: int) -> bytes:
113 if not isinstance(bit_length, int):
114 raise TypeError("bit_length must be an integer")
115
116 if bit_length not in (128, 192, 256):
117 raise ValueError("bit_length must be 128, 192, or 256")
118
119 return os.urandom(bit_length // 8)
120
121 def encrypt(
122 self,
123 nonce: bytes,
124 data: bytes,
125 associated_data: typing.Optional[bytes],
126 ) -> bytes:
127 if associated_data is None:
128 associated_data = b""
129
130 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
131 # This is OverflowError to match what cffi would raise
132 raise OverflowError(
133 "Data or associated data too long. Max 2**31 - 1 bytes"
134 )
135
136 self._check_params(nonce, data, associated_data)
137 self._validate_lengths(nonce, len(data))
138 return aead._encrypt(
139 backend, self, nonce, data, [associated_data], self._tag_length
140 )
141
142 def decrypt(
143 self,
144 nonce: bytes,
145 data: bytes,
146 associated_data: typing.Optional[bytes],
147 ) -> bytes:
148 if associated_data is None:
149 associated_data = b""
150
151 self._check_params(nonce, data, associated_data)
152 return aead._decrypt(
153 backend, self, nonce, data, [associated_data], self._tag_length
154 )
155
156 def _validate_lengths(self, nonce: bytes, data_len: int) -> None:
157 # For information about computing this, see
158 # https://tools.ietf.org/html/rfc3610#section-2.1
159 l_val = 15 - len(nonce)
160 if 2 ** (8 * l_val) < data_len:
161 raise ValueError("Data too long for nonce")
162
163 def _check_params(
164 self, nonce: bytes, data: bytes, associated_data: bytes
165 ) -> None:
166 utils._check_byteslike("nonce", nonce)
167 utils._check_bytes("data", data)
168 utils._check_bytes("associated_data", associated_data)
169 if not 7 <= len(nonce) <= 13:
170 raise ValueError("Nonce must be between 7 and 13 bytes")
171
172
173 class AESGCM:
174 _MAX_SIZE = 2**31 - 1
175
176 def __init__(self, key: bytes):
177 utils._check_byteslike("key", key)
178 if len(key) not in (16, 24, 32):
179 raise ValueError("AESGCM key must be 128, 192, or 256 bits.")
180
181 self._key = key
182
183 @classmethod
184 def generate_key(cls, bit_length: int) -> bytes:
185 if not isinstance(bit_length, int):
186 raise TypeError("bit_length must be an integer")
187
188 if bit_length not in (128, 192, 256):
189 raise ValueError("bit_length must be 128, 192, or 256")
190
191 return os.urandom(bit_length // 8)
192
193 def encrypt(
194 self,
195 nonce: bytes,
196 data: bytes,
197 associated_data: typing.Optional[bytes],
198 ) -> bytes:
199 if associated_data is None:
200 associated_data = b""
201
202 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
203 # This is OverflowError to match what cffi would raise
204 raise OverflowError(
205 "Data or associated data too long. Max 2**31 - 1 bytes"
206 )
207
208 self._check_params(nonce, data, associated_data)
209 return aead._encrypt(backend, self, nonce, data, [associated_data], 16)
210
211 def decrypt(
212 self,
213 nonce: bytes,
214 data: bytes,
215 associated_data: typing.Optional[bytes],
216 ) -> bytes:
217 if associated_data is None:
218 associated_data = b""
219
220 self._check_params(nonce, data, associated_data)
221 return aead._decrypt(backend, self, nonce, data, [associated_data], 16)
222
223 def _check_params(
224 self,
225 nonce: bytes,
226 data: bytes,
227 associated_data: bytes,
228 ) -> None:
229 utils._check_byteslike("nonce", nonce)
230 utils._check_bytes("data", data)
231 utils._check_bytes("associated_data", associated_data)
232 if len(nonce) < 8 or len(nonce) > 128:
233 raise ValueError("Nonce must be between 8 and 128 bytes")
234
235
236 class AESOCB3:
237 _MAX_SIZE = 2**31 - 1
238
239 def __init__(self, key: bytes):
240 utils._check_byteslike("key", key)
241 if len(key) not in (16, 24, 32):
242 raise ValueError("AESOCB3 key must be 128, 192, or 256 bits.")
243
244 self._key = key
245
246 if not backend.aead_cipher_supported(self):
247 raise exceptions.UnsupportedAlgorithm(
248 "OCB3 is not supported by this version of OpenSSL",
249 exceptions._Reasons.UNSUPPORTED_CIPHER,
250 )
251
252 @classmethod
253 def generate_key(cls, bit_length: int) -> bytes:
254 if not isinstance(bit_length, int):
255 raise TypeError("bit_length must be an integer")
256
257 if bit_length not in (128, 192, 256):
258 raise ValueError("bit_length must be 128, 192, or 256")
259
260 return os.urandom(bit_length // 8)
261
262 def encrypt(
263 self,
264 nonce: bytes,
265 data: bytes,
266 associated_data: typing.Optional[bytes],
267 ) -> bytes:
268 if associated_data is None:
269 associated_data = b""
270
271 if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
272 # This is OverflowError to match what cffi would raise
273 raise OverflowError(
274 "Data or associated data too long. Max 2**31 - 1 bytes"
275 )
276
277 self._check_params(nonce, data, associated_data)
278 return aead._encrypt(backend, self, nonce, data, [associated_data], 16)
279
280 def decrypt(
281 self,
282 nonce: bytes,
283 data: bytes,
284 associated_data: typing.Optional[bytes],
285 ) -> bytes:
286 if associated_data is None:
287 associated_data = b""
288
289 self._check_params(nonce, data, associated_data)
290 return aead._decrypt(backend, self, nonce, data, [associated_data], 16)
291
292 def _check_params(
293 self,
294 nonce: bytes,
295 data: bytes,
296 associated_data: bytes,
297 ) -> None:
298 utils._check_byteslike("nonce", nonce)
299 utils._check_bytes("data", data)
300 utils._check_bytes("associated_data", associated_data)
301 if len(nonce) < 12 or len(nonce) > 15:
302 raise ValueError("Nonce must be between 12 and 15 bytes")
303
304
305 class AESSIV(object):
306 _MAX_SIZE = 2**31 - 1
307
308 def __init__(self, key: bytes):
309 utils._check_byteslike("key", key)
310 if len(key) not in (32, 48, 64):
311 raise ValueError("AESSIV key must be 256, 384, or 512 bits.")
312
313 self._key = key
314
315 if not backend.aead_cipher_supported(self):
316 raise exceptions.UnsupportedAlgorithm(
317 "AES-SIV is not supported by this version of OpenSSL",
318 exceptions._Reasons.UNSUPPORTED_CIPHER,
319 )
320
321 @classmethod
322 def generate_key(cls, bit_length: int) -> bytes:
323 if not isinstance(bit_length, int):
324 raise TypeError("bit_length must be an integer")
325
326 if bit_length not in (256, 384, 512):
327 raise ValueError("bit_length must be 256, 384, or 512")
328
329 return os.urandom(bit_length // 8)
330
331 def encrypt(
332 self,
333 data: bytes,
334 associated_data: typing.Optional[typing.List[bytes]],
335 ) -> bytes:
336 if associated_data is None:
337 associated_data = []
338
339 self._check_params(data, associated_data)
340
341 if len(data) > self._MAX_SIZE or any(
342 len(ad) > self._MAX_SIZE for ad in associated_data
343 ):
344 # This is OverflowError to match what cffi would raise
345 raise OverflowError(
346 "Data or associated data too long. Max 2**31 - 1 bytes"
347 )
348
349 return aead._encrypt(backend, self, b"", data, associated_data, 16)
350
351 def decrypt(
352 self,
353 data: bytes,
354 associated_data: typing.Optional[typing.List[bytes]],
355 ) -> bytes:
356 if associated_data is None:
357 associated_data = []
358
359 self._check_params(data, associated_data)
360
361 return aead._decrypt(backend, self, b"", data, associated_data, 16)
362
363 def _check_params(
364 self,
365 data: bytes,
366 associated_data: typing.List,
367 ) -> None:
368 utils._check_bytes("data", data)
369 if not isinstance(associated_data, list) or not all(
370 isinstance(x, bytes) for x in associated_data
371 ):
372 raise TypeError("associated_data must be a list of bytes or None")
373
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/ciphers/aead.py b/src/cryptography/hazmat/primitives/ciphers/aead.py
--- a/src/cryptography/hazmat/primitives/ciphers/aead.py
+++ b/src/cryptography/hazmat/primitives/ciphers/aead.py
@@ -366,6 +366,8 @@
associated_data: typing.List,
) -> None:
utils._check_bytes("data", data)
+ if len(data) == 0:
+ raise ValueError("data must not be zero length")
if not isinstance(associated_data, list) or not all(
isinstance(x, bytes) for x in associated_data
):
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/ciphers/aead.py b/src/cryptography/hazmat/primitives/ciphers/aead.py\n--- a/src/cryptography/hazmat/primitives/ciphers/aead.py\n+++ b/src/cryptography/hazmat/primitives/ciphers/aead.py\n@@ -366,6 +366,8 @@\n associated_data: typing.List,\n ) -> None:\n utils._check_bytes(\"data\", data)\n+ if len(data) == 0:\n+ raise ValueError(\"data must not be zero length\")\n if not isinstance(associated_data, list) or not all(\n isinstance(x, bytes) for x in associated_data\n ):\n", "issue": "AESSIV Encryption/Decryption fails if empty data is passed\n## Issue description\r\n\r\nIf an empty byte string is passed to `data` parameter of methods `encrypt` and `decrypt` of `AESSIV`, operation fails with `InternalError`.\r\n\r\n## Steps to reproduce bug\r\n\r\n```python\r\nfrom cryptography.hazmat.primitives.ciphers import aead\r\n\r\nkey = bytes(32)\r\ndata = b\"\"\r\n\r\ncipher = aead.AESSIV(key)\r\noutput = cipher.encrypt(data, None) # raises `InternalError`\r\n```\r\n\r\n## cryptography installation\r\n\r\ncryptography is installed via poetry with version constraint >=35.0.0:\r\n\r\n```toml\r\n[tool.poetry.dependencies]\r\npython = \"^3.8\"\r\ncryptography = \">=35.0.0\"\r\n```\r\n\r\n## Required Version numbers\r\n\r\n- `pip` - 22.2.2\r\n- `cffi` - 1.15.1\r\n- `cryptography` - 38.0.1\r\n- `setuptools` - 65.3.0\r\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport typing\n\nfrom cryptography import exceptions, utils\nfrom cryptography.hazmat.backends.openssl import aead\nfrom cryptography.hazmat.backends.openssl.backend import backend\nfrom cryptography.hazmat.bindings._rust import FixedPool\n\n\nclass ChaCha20Poly1305:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"ChaCha20Poly1305 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n utils._check_byteslike(\"key\", key)\n\n if len(key) != 32:\n raise ValueError(\"ChaCha20Poly1305 key must be 32 bytes.\")\n\n self._key = key\n self._pool = FixedPool(self._create_fn)\n\n @classmethod\n def generate_key(cls) -> bytes:\n return os.urandom(32)\n\n def _create_fn(self):\n return aead._aead_create_ctx(backend, self, self._key)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) != 12:\n raise ValueError(\"Nonce must be 12 bytes\")\n\n\nclass AESCCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes, tag_length: int = 16):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESCCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n if not isinstance(tag_length, int):\n raise TypeError(\"tag_length must be an integer\")\n\n if tag_length not in (4, 6, 8, 10, 12, 14, 16):\n raise ValueError(\"Invalid tag_length\")\n\n self._tag_length = tag_length\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AESCCM is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n self._validate_lengths(nonce, len(data))\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def _validate_lengths(self, nonce: bytes, data_len: int) -> None:\n # For information about computing this, see\n # https://tools.ietf.org/html/rfc3610#section-2.1\n l_val = 15 - len(nonce)\n if 2 ** (8 * l_val) < data_len:\n raise ValueError(\"Data too long for nonce\")\n\n def _check_params(\n self, nonce: bytes, data: bytes, associated_data: bytes\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if not 7 <= len(nonce) <= 13:\n raise ValueError(\"Nonce must be between 7 and 13 bytes\")\n\n\nclass AESGCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESGCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 8 or len(nonce) > 128:\n raise ValueError(\"Nonce must be between 8 and 128 bytes\")\n\n\nclass AESOCB3:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESOCB3 key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"OCB3 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 12 or len(nonce) > 15:\n raise ValueError(\"Nonce must be between 12 and 15 bytes\")\n\n\nclass AESSIV(object):\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (32, 48, 64):\n raise ValueError(\"AESSIV key must be 256, 384, or 512 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AES-SIV is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (256, 384, 512):\n raise ValueError(\"bit_length must be 256, 384, or 512\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n if len(data) > self._MAX_SIZE or any(\n len(ad) > self._MAX_SIZE for ad in associated_data\n ):\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n return aead._encrypt(backend, self, b\"\", data, associated_data, 16)\n\n def decrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n return aead._decrypt(backend, self, b\"\", data, associated_data, 16)\n\n def _check_params(\n self,\n data: bytes,\n associated_data: typing.List,\n ) -> None:\n utils._check_bytes(\"data\", data)\n if not isinstance(associated_data, list) or not all(\n isinstance(x, bytes) for x in associated_data\n ):\n raise TypeError(\"associated_data must be a list of bytes or None\")\n", "path": "src/cryptography/hazmat/primitives/ciphers/aead.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport typing\n\nfrom cryptography import exceptions, utils\nfrom cryptography.hazmat.backends.openssl import aead\nfrom cryptography.hazmat.backends.openssl.backend import backend\nfrom cryptography.hazmat.bindings._rust import FixedPool\n\n\nclass ChaCha20Poly1305:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"ChaCha20Poly1305 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n utils._check_byteslike(\"key\", key)\n\n if len(key) != 32:\n raise ValueError(\"ChaCha20Poly1305 key must be 32 bytes.\")\n\n self._key = key\n self._pool = FixedPool(self._create_fn)\n\n @classmethod\n def generate_key(cls) -> bytes:\n return os.urandom(32)\n\n def _create_fn(self):\n return aead._aead_create_ctx(backend, self, self._key)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) != 12:\n raise ValueError(\"Nonce must be 12 bytes\")\n\n\nclass AESCCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes, tag_length: int = 16):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESCCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n if not isinstance(tag_length, int):\n raise TypeError(\"tag_length must be an integer\")\n\n if tag_length not in (4, 6, 8, 10, 12, 14, 16):\n raise ValueError(\"Invalid tag_length\")\n\n self._tag_length = tag_length\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AESCCM is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n self._validate_lengths(nonce, len(data))\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def _validate_lengths(self, nonce: bytes, data_len: int) -> None:\n # For information about computing this, see\n # https://tools.ietf.org/html/rfc3610#section-2.1\n l_val = 15 - len(nonce)\n if 2 ** (8 * l_val) < data_len:\n raise ValueError(\"Data too long for nonce\")\n\n def _check_params(\n self, nonce: bytes, data: bytes, associated_data: bytes\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if not 7 <= len(nonce) <= 13:\n raise ValueError(\"Nonce must be between 7 and 13 bytes\")\n\n\nclass AESGCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESGCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 8 or len(nonce) > 128:\n raise ValueError(\"Nonce must be between 8 and 128 bytes\")\n\n\nclass AESOCB3:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESOCB3 key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"OCB3 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 12 or len(nonce) > 15:\n raise ValueError(\"Nonce must be between 12 and 15 bytes\")\n\n\nclass AESSIV(object):\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (32, 48, 64):\n raise ValueError(\"AESSIV key must be 256, 384, or 512 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AES-SIV is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (256, 384, 512):\n raise ValueError(\"bit_length must be 256, 384, or 512\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n if len(data) > self._MAX_SIZE or any(\n len(ad) > self._MAX_SIZE for ad in associated_data\n ):\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n return aead._encrypt(backend, self, b\"\", data, associated_data, 16)\n\n def decrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n return aead._decrypt(backend, self, b\"\", data, associated_data, 16)\n\n def _check_params(\n self,\n data: bytes,\n associated_data: typing.List,\n ) -> None:\n utils._check_bytes(\"data\", data)\n if len(data) == 0:\n raise ValueError(\"data must not be zero length\")\n if not isinstance(associated_data, list) or not all(\n isinstance(x, bytes) for x in associated_data\n ):\n raise TypeError(\"associated_data must be a list of bytes or None\")\n", "path": "src/cryptography/hazmat/primitives/ciphers/aead.py"}]} |
gh_patches_debug_1486 | rasdani/github-patches | git_diff | open-mmlab__mmpose-295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pylint: W0105
```bash
mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py:173:8: W0105: String statement has no effect (pointless-string-statement)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py`
Content:
```
1 import copy as cp
2 import os
3 import os.path as osp
4 from collections import OrderedDict
5
6 import json_tricks as json
7 import numpy as np
8
9 from mmpose.datasets.builder import DATASETS
10 from .topdown_base_dataset import TopDownBaseDataset
11
12
13 @DATASETS.register_module()
14 class TopDownMpiiTrbDataset(TopDownBaseDataset):
15 """MPII-TRB Dataset dataset for top-down pose estimation.
16
17 `TRB: A Novel Triplet Representation for Understanding 2D Human Body`
18 ICCV'2019 More details can be found in the `paper
19 <https://arxiv.org/abs/1910.11535>`__ .
20
21 The dataset loads raw features and apply specified transforms
22 to return a dict containing the image tensors and other information.
23
24 Args:
25 ann_file (str): Path to the annotation file.
26 img_prefix (str): Path to a directory where images are held.
27 Default: None.
28 data_cfg (dict): config
29 pipeline (list[dict | callable]): A sequence of data transforms.
30 test_mode (bool): Store True when building test or
31 validation dataset. Default: False.
32 """
33
34 def __init__(self,
35 ann_file,
36 img_prefix,
37 data_cfg,
38 pipeline,
39 test_mode=False):
40
41 super().__init__(
42 ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)
43
44 # flip_pairs in MPII-TRB
45 self.ann_info['flip_pairs'] = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9],
46 [10, 11], [14, 15]]
47 for i in range(6):
48 self.ann_info['flip_pairs'].append([16 + i, 22 + i])
49 self.ann_info['flip_pairs'].append([28 + i, 34 + i])
50
51 self.ann_info['upper_body_ids'] = [0, 1, 2, 3, 4, 5, 12, 13]
52 self.ann_info['lower_body_ids'] = [6, 7, 8, 9, 10, 11]
53 self.ann_info['upper_body_ids'].extend(list(range(14, 28)))
54 self.ann_info['lower_body_ids'].extend(list(range(28, 40)))
55
56 self.ann_info['use_different_joint_weights'] = False
57
58 assert self.ann_info['num_joints'] == 40
59 self.ann_info['joint_weights'] = np.ones(
60 (self.ann_info['num_joints'], 1), dtype=np.float32)
61
62 self.db = self._get_db(ann_file)
63 self.image_set = set(x['image_file'] for x in self.db)
64 self.num_images = len(self.image_set)
65
66 print(f'=> num_images: {self.num_images}')
67 print(f'=> load {len(self.db)} samples')
68
69 def _get_db(self, ann_file):
70 """Load dataset."""
71 with open(ann_file, 'r') as f:
72 data = json.load(f)
73 tmpl = dict(
74 image_file=None,
75 center=None,
76 scale=None,
77 rotation=0,
78 joints_3d=None,
79 joints_3d_visible=None,
80 dataset='mpii_trb')
81
82 imid2info = {
83 int(osp.splitext(x['file_name'])[0]): x
84 for x in data['images']
85 }
86
87 num_joints = self.ann_info['num_joints']
88 gt_db = []
89
90 for anno in data['annotations']:
91 newitem = cp.deepcopy(tmpl)
92 image_id = anno['image_id']
93 newitem['image_file'] = os.path.join(
94 self.img_prefix, imid2info[image_id]['file_name'])
95
96 if max(anno['keypoints']) == 0:
97 continue
98
99 joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
100 joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
101
102 for ipt in range(num_joints):
103 joints_3d[ipt, 0] = anno['keypoints'][ipt * 3 + 0]
104 joints_3d[ipt, 1] = anno['keypoints'][ipt * 3 + 1]
105 joints_3d[ipt, 2] = 0
106 t_vis = min(anno['keypoints'][ipt * 3 + 2], 1)
107 joints_3d_visible[ipt, :] = (t_vis, t_vis, 0)
108
109 center = np.array(anno['center'], dtype=np.float32)
110 scale = self.ann_info['image_size'] / anno['scale'] / 200.0
111 newitem['center'] = center
112 newitem['scale'] = scale
113 newitem['joints_3d'] = joints_3d
114 newitem['joints_3d_visible'] = joints_3d_visible
115 if 'headbox' in anno:
116 newitem['headbox'] = anno['headbox']
117 gt_db.append(newitem)
118
119 return gt_db
120
121 def _evaluate_kernel(self, pred, joints_3d, joints_3d_visible, headbox):
122 """Evaluate one example."""
123 num_joints = self.ann_info['num_joints']
124 headbox = np.array(headbox)
125 threshold = np.linalg.norm(headbox[:2] - headbox[2:]) * 0.3
126 hit = np.zeros(num_joints, dtype=np.float32)
127 exist = np.zeros(num_joints, dtype=np.float32)
128
129 for i in range(num_joints):
130 pred_pt = pred[i]
131 gt_pt = joints_3d[i]
132 vis = joints_3d_visible[i][0]
133 if vis:
134 exist[i] = 1
135 else:
136 continue
137 distance = np.linalg.norm(pred_pt[:2] - gt_pt[:2])
138 if distance < threshold:
139 hit[i] = 1
140 return hit, exist
141
142 def evaluate(self, outputs, res_folder, metric='PCKh', **kwargs):
143 """Evaluate PCKh for MPII-TRB dataset.
144
145 Note:
146 batch_size: N
147 num_keypoints: K
148 heatmap height: H
149 heatmap width: W
150
151 Args:
152 outputs(list(preds, boxes, image_path, heatmap)):
153
154 * preds(np.ndarray[1,K,3]): The first two dimensions are
155 coordinates, score is the third dimension of the array.
156 * boxes(np.ndarray[1,6]): [center[0], center[1], scale[0]
157 , scale[1],area, score]
158 * image_path(list[str]): For example, ['0', '0',
159 '0', '0', '0', '1', '1', '6', '3', '.', 'j', 'p', 'g']
160 * heatmap (np.ndarray[N, K, H, W]): model output heatmap.
161 res_folder(str): Path of directory to save the results.
162 metric (str | list[str]): Metrics to be performed.
163 Defaults: 'PCKh'.
164
165 Returns:
166 dict: PCKh for each joint
167 """
168 metrics = metric if isinstance(metric, list) else [metric]
169 allowed_metrics = ['PCKh']
170 for metric in metrics:
171 if metric not in allowed_metrics:
172 raise KeyError(f'metric {metric} is not supported')
173 """Evaluate MPII-TRB keypoint results."""
174 res_file = os.path.join(res_folder, 'result_keypoints.json')
175
176 kpts = []
177
178 for preds, boxes, image_path, _ in outputs:
179 str_image_path = ''.join(image_path)
180 image_id = int(osp.basename(osp.splitext(str_image_path)[0]))
181
182 kpts.append({
183 'keypoints': preds[0].tolist(),
184 'center': boxes[0][0:2].tolist(),
185 'scale': boxes[0][2:4].tolist(),
186 'area': float(boxes[0][4]),
187 'score': float(boxes[0][5]),
188 'image_id': image_id,
189 })
190
191 self._write_keypoint_results(kpts, res_file)
192 info_str = self._report_metric(res_file)
193 name_value = OrderedDict(info_str)
194
195 return name_value
196
197 @staticmethod
198 def _write_keypoint_results(keypoints, res_file):
199 """Write results into a json file."""
200
201 with open(res_file, 'w') as f:
202 json.dump(keypoints, f, sort_keys=True, indent=4)
203
204 def _report_metric(self, res_file):
205 """Keypoint evaluation.
206
207 Report Mean Acc of skeleton, contour and all joints.
208 """
209 num_joints = self.ann_info['num_joints']
210 hit = np.zeros(num_joints, dtype=np.float32)
211 exist = np.zeros(num_joints, dtype=np.float32)
212
213 with open(res_file, 'r') as fin:
214 preds = json.load(fin)
215
216 assert len(preds) == len(
217 self.db), f'len(preds)={len(preds)}, len(self.db)={len(self.db)}'
218 for pred, item in zip(preds, self.db):
219 h, e = self._evaluate_kernel(pred['keypoints'], item['joints_3d'],
220 item['joints_3d_visible'],
221 item['headbox'])
222 hit += h
223 exist += e
224 skeleton = np.sum(hit[:14]) / np.sum(exist[:14])
225 contour = np.sum(hit[14:]) / np.sum(exist[14:])
226 mean = np.sum(hit) / np.sum(exist)
227
228 info_str = []
229 info_str.append(('Skeleton_acc', skeleton.item()))
230 info_str.append(('Contour_acc', contour.item()))
231 info_str.append(('PCKh', mean.item()))
232 return info_str
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py b/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py
--- a/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py
+++ b/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py
@@ -170,7 +170,7 @@
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
- """Evaluate MPII-TRB keypoint results."""
+
res_file = os.path.join(res_folder, 'result_keypoints.json')
kpts = []
| {"golden_diff": "diff --git a/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py b/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py\n--- a/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py\n+++ b/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py\n@@ -170,7 +170,7 @@\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n- \"\"\"Evaluate MPII-TRB keypoint results.\"\"\"\n+\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n \n kpts = []\n", "issue": "Pylint: W0105\n```bash\r\nmmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py:173:8: W0105: String statement has no effect (pointless-string-statement)\r\n```\n", "before_files": [{"content": "import copy as cp\nimport os\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .topdown_base_dataset import TopDownBaseDataset\n\n\[email protected]_module()\nclass TopDownMpiiTrbDataset(TopDownBaseDataset):\n \"\"\"MPII-TRB Dataset dataset for top-down pose estimation.\n\n `TRB: A Novel Triplet Representation for Understanding 2D Human Body`\n ICCV'2019 More details can be found in the `paper\n <https://arxiv.org/abs/1910.11535>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n # flip_pairs in MPII-TRB\n self.ann_info['flip_pairs'] = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9],\n [10, 11], [14, 15]]\n for i in range(6):\n self.ann_info['flip_pairs'].append([16 + i, 22 + i])\n self.ann_info['flip_pairs'].append([28 + i, 34 + i])\n\n self.ann_info['upper_body_ids'] = [0, 1, 2, 3, 4, 5, 12, 13]\n self.ann_info['lower_body_ids'] = [6, 7, 8, 9, 10, 11]\n self.ann_info['upper_body_ids'].extend(list(range(14, 28)))\n self.ann_info['lower_body_ids'].extend(list(range(28, 40)))\n\n self.ann_info['use_different_joint_weights'] = False\n\n assert self.ann_info['num_joints'] == 40\n self.ann_info['joint_weights'] = np.ones(\n (self.ann_info['num_joints'], 1), dtype=np.float32)\n\n self.db = self._get_db(ann_file)\n self.image_set = set(x['image_file'] for x in self.db)\n self.num_images = len(self.image_set)\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self, ann_file):\n \"\"\"Load dataset.\"\"\"\n with open(ann_file, 'r') as f:\n data = json.load(f)\n tmpl = dict(\n image_file=None,\n center=None,\n scale=None,\n rotation=0,\n joints_3d=None,\n joints_3d_visible=None,\n dataset='mpii_trb')\n\n imid2info = {\n int(osp.splitext(x['file_name'])[0]): x\n for x in data['images']\n }\n\n num_joints = self.ann_info['num_joints']\n gt_db = []\n\n for anno in data['annotations']:\n newitem = cp.deepcopy(tmpl)\n image_id = anno['image_id']\n newitem['image_file'] = os.path.join(\n self.img_prefix, imid2info[image_id]['file_name'])\n\n if max(anno['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n for ipt in range(num_joints):\n joints_3d[ipt, 0] = anno['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = anno['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = min(anno['keypoints'][ipt * 3 + 2], 1)\n joints_3d_visible[ipt, :] = (t_vis, t_vis, 0)\n\n center = np.array(anno['center'], dtype=np.float32)\n scale = self.ann_info['image_size'] / anno['scale'] / 200.0\n newitem['center'] = center\n newitem['scale'] = scale\n newitem['joints_3d'] = joints_3d\n newitem['joints_3d_visible'] = joints_3d_visible\n if 'headbox' in anno:\n newitem['headbox'] = anno['headbox']\n gt_db.append(newitem)\n\n return gt_db\n\n def _evaluate_kernel(self, pred, joints_3d, joints_3d_visible, headbox):\n \"\"\"Evaluate one example.\"\"\"\n num_joints = self.ann_info['num_joints']\n headbox = np.array(headbox)\n threshold = np.linalg.norm(headbox[:2] - headbox[2:]) * 0.3\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n for i in range(num_joints):\n pred_pt = pred[i]\n gt_pt = joints_3d[i]\n vis = joints_3d_visible[i][0]\n if vis:\n exist[i] = 1\n else:\n continue\n distance = np.linalg.norm(pred_pt[:2] - gt_pt[:2])\n if distance < threshold:\n hit[i] = 1\n return hit, exist\n\n def evaluate(self, outputs, res_folder, metric='PCKh', **kwargs):\n \"\"\"Evaluate PCKh for MPII-TRB dataset.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n outputs(list(preds, boxes, image_path, heatmap)):\n\n * preds(np.ndarray[1,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n * boxes(np.ndarray[1,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n * image_path(list[str]): For example, ['0', '0',\n '0', '0', '0', '1', '1', '6', '3', '.', 'j', 'p', 'g']\n * heatmap (np.ndarray[N, K, H, W]): model output heatmap.\n res_folder(str): Path of directory to save the results.\n metric (str | list[str]): Metrics to be performed.\n Defaults: 'PCKh'.\n\n Returns:\n dict: PCKh for each joint\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCKh']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n \"\"\"Evaluate MPII-TRB keypoint results.\"\"\"\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n\n for preds, boxes, image_path, _ in outputs:\n str_image_path = ''.join(image_path)\n image_id = int(osp.basename(osp.splitext(str_image_path)[0]))\n\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image_id': image_id,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n\n return name_value\n\n @staticmethod\n def _write_keypoint_results(keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report Mean Acc of skeleton, contour and all joints.\n \"\"\"\n num_joints = self.ann_info['num_joints']\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n\n assert len(preds) == len(\n self.db), f'len(preds)={len(preds)}, len(self.db)={len(self.db)}'\n for pred, item in zip(preds, self.db):\n h, e = self._evaluate_kernel(pred['keypoints'], item['joints_3d'],\n item['joints_3d_visible'],\n item['headbox'])\n hit += h\n exist += e\n skeleton = np.sum(hit[:14]) / np.sum(exist[:14])\n contour = np.sum(hit[14:]) / np.sum(exist[14:])\n mean = np.sum(hit) / np.sum(exist)\n\n info_str = []\n info_str.append(('Skeleton_acc', skeleton.item()))\n info_str.append(('Contour_acc', contour.item()))\n info_str.append(('PCKh', mean.item()))\n return info_str\n", "path": "mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py"}], "after_files": [{"content": "import copy as cp\nimport os\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .topdown_base_dataset import TopDownBaseDataset\n\n\[email protected]_module()\nclass TopDownMpiiTrbDataset(TopDownBaseDataset):\n \"\"\"MPII-TRB Dataset dataset for top-down pose estimation.\n\n `TRB: A Novel Triplet Representation for Understanding 2D Human Body`\n ICCV'2019 More details can be found in the `paper\n <https://arxiv.org/abs/1910.11535>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n # flip_pairs in MPII-TRB\n self.ann_info['flip_pairs'] = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9],\n [10, 11], [14, 15]]\n for i in range(6):\n self.ann_info['flip_pairs'].append([16 + i, 22 + i])\n self.ann_info['flip_pairs'].append([28 + i, 34 + i])\n\n self.ann_info['upper_body_ids'] = [0, 1, 2, 3, 4, 5, 12, 13]\n self.ann_info['lower_body_ids'] = [6, 7, 8, 9, 10, 11]\n self.ann_info['upper_body_ids'].extend(list(range(14, 28)))\n self.ann_info['lower_body_ids'].extend(list(range(28, 40)))\n\n self.ann_info['use_different_joint_weights'] = False\n\n assert self.ann_info['num_joints'] == 40\n self.ann_info['joint_weights'] = np.ones(\n (self.ann_info['num_joints'], 1), dtype=np.float32)\n\n self.db = self._get_db(ann_file)\n self.image_set = set(x['image_file'] for x in self.db)\n self.num_images = len(self.image_set)\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self, ann_file):\n \"\"\"Load dataset.\"\"\"\n with open(ann_file, 'r') as f:\n data = json.load(f)\n tmpl = dict(\n image_file=None,\n center=None,\n scale=None,\n rotation=0,\n joints_3d=None,\n joints_3d_visible=None,\n dataset='mpii_trb')\n\n imid2info = {\n int(osp.splitext(x['file_name'])[0]): x\n for x in data['images']\n }\n\n num_joints = self.ann_info['num_joints']\n gt_db = []\n\n for anno in data['annotations']:\n newitem = cp.deepcopy(tmpl)\n image_id = anno['image_id']\n newitem['image_file'] = os.path.join(\n self.img_prefix, imid2info[image_id]['file_name'])\n\n if max(anno['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n for ipt in range(num_joints):\n joints_3d[ipt, 0] = anno['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = anno['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = min(anno['keypoints'][ipt * 3 + 2], 1)\n joints_3d_visible[ipt, :] = (t_vis, t_vis, 0)\n\n center = np.array(anno['center'], dtype=np.float32)\n scale = self.ann_info['image_size'] / anno['scale'] / 200.0\n newitem['center'] = center\n newitem['scale'] = scale\n newitem['joints_3d'] = joints_3d\n newitem['joints_3d_visible'] = joints_3d_visible\n if 'headbox' in anno:\n newitem['headbox'] = anno['headbox']\n gt_db.append(newitem)\n\n return gt_db\n\n def _evaluate_kernel(self, pred, joints_3d, joints_3d_visible, headbox):\n \"\"\"Evaluate one example.\"\"\"\n num_joints = self.ann_info['num_joints']\n headbox = np.array(headbox)\n threshold = np.linalg.norm(headbox[:2] - headbox[2:]) * 0.3\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n for i in range(num_joints):\n pred_pt = pred[i]\n gt_pt = joints_3d[i]\n vis = joints_3d_visible[i][0]\n if vis:\n exist[i] = 1\n else:\n continue\n distance = np.linalg.norm(pred_pt[:2] - gt_pt[:2])\n if distance < threshold:\n hit[i] = 1\n return hit, exist\n\n def evaluate(self, outputs, res_folder, metric='PCKh', **kwargs):\n \"\"\"Evaluate PCKh for MPII-TRB dataset.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n outputs(list(preds, boxes, image_path, heatmap)):\n\n * preds(np.ndarray[1,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n * boxes(np.ndarray[1,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n * image_path(list[str]): For example, ['0', '0',\n '0', '0', '0', '1', '1', '6', '3', '.', 'j', 'p', 'g']\n * heatmap (np.ndarray[N, K, H, W]): model output heatmap.\n res_folder(str): Path of directory to save the results.\n metric (str | list[str]): Metrics to be performed.\n Defaults: 'PCKh'.\n\n Returns:\n dict: PCKh for each joint\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCKh']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n\n for preds, boxes, image_path, _ in outputs:\n str_image_path = ''.join(image_path)\n image_id = int(osp.basename(osp.splitext(str_image_path)[0]))\n\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image_id': image_id,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n\n return name_value\n\n @staticmethod\n def _write_keypoint_results(keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report Mean Acc of skeleton, contour and all joints.\n \"\"\"\n num_joints = self.ann_info['num_joints']\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n\n assert len(preds) == len(\n self.db), f'len(preds)={len(preds)}, len(self.db)={len(self.db)}'\n for pred, item in zip(preds, self.db):\n h, e = self._evaluate_kernel(pred['keypoints'], item['joints_3d'],\n item['joints_3d_visible'],\n item['headbox'])\n hit += h\n exist += e\n skeleton = np.sum(hit[:14]) / np.sum(exist[:14])\n contour = np.sum(hit[14:]) / np.sum(exist[14:])\n mean = np.sum(hit) / np.sum(exist)\n\n info_str = []\n info_str.append(('Skeleton_acc', skeleton.item()))\n info_str.append(('Contour_acc', contour.item()))\n info_str.append(('PCKh', mean.item()))\n return info_str\n", "path": "mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py"}]} |
gh_patches_debug_1487 | rasdani/github-patches | git_diff | holoviz__holoviews-5452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Interpolated charts (Curve, Area, etc) plot data in the provided order, but decimate unsorts data
#### ALL software version info
| Library | Version |
| -- | -- |
| python | 3.9.13 |
| holoviews | 1.15.0 |
| bokeh | 2.4.3 |
| pandas | 1.4.4 |
| numpy | 1.23.3 |
#### Description of expected behavior and the observed behavior
I should be able to decimate Curve/Area charts the same way I can decimate scatter charts. Decimating interpolated charts currently results in garbled output.
#### Complete, minimal, self-contained example code that reproduces the issue
```python
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('bokeh')
x = np.linspace(0, 10, 100)
y1 = np.sin(x)
y2 = np.cos(x)
table = hv.Table((x, y1, y2), 'x', ['y1', 'y2'])
hv.Area(table) # See Figure 1
---------------------------
from holoviews.operation import decimate
decimate(hv.Area(table), max_samples = 50) # See Figure 2
```
#### Screenshots or screencasts of the bug in action
Figure 1

Figure 2

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holoviews/operation/element.py`
Content:
```
1 """
2 Collection of either extremely generic or simple Operation
3 examples.
4 """
5 import warnings
6
7 import numpy as np
8 import param
9
10 from param import _is_number
11
12 from ..core import (Operation, NdOverlay, Overlay, GridMatrix,
13 HoloMap, Dataset, Element, Collator, Dimension)
14 from ..core.data import ArrayInterface, DictInterface, default_datatype
15 from ..core.data.util import dask_array_module
16 from ..core.util import (
17 LooseVersion, group_sanitizer, label_sanitizer, pd, datetime_types, isfinite,
18 dt_to_int, isdatetime, is_dask_array, is_cupy_array, is_ibis_expr
19 )
20 from ..element.chart import Histogram, Scatter
21 from ..element.raster import Image, RGB
22 from ..element.path import Contours, Polygons
23 from ..element.util import categorical_aggregate2d # noqa (API import)
24 from ..streams import RangeXY
25
26 column_interfaces = [ArrayInterface, DictInterface]
27 if pd:
28 from ..core.data import PandasInterface
29 column_interfaces.append(PandasInterface)
30
31
32 def identity(x,k): return x
33
34 class operation(Operation):
35 """
36 The most generic operation that wraps any callable into an
37 Operation. The callable needs to accept an HoloViews
38 component and a key (that may be ignored) and must return a new
39 HoloViews component.
40
41 This class may be useful for turning a HoloViews method into an
42 operation to define as compositor operation. For instance, the
43 following definition:
44
45 operation.instance(op=lambda x, k: x.collapse(np.subtract))
46
47 Could be used to implement a collapse operation to subtracts the
48 data between Rasters in an Overlay.
49 """
50
51 output_type = param.Parameter(None, doc="""
52 The output element type which may be None to disable type
53 checking.
54
55 May be used to declare useful information to other code in
56 HoloViews, e.g. required for tab-completion support of operations
57 registered with compositors.""")
58
59 group = param.String(default='Operation', doc="""
60 The group assigned to the result after having applied the
61 operator.""")
62
63 op = param.Callable(default=identity, doc="""
64 The operation used to generate a new HoloViews object returned
65 by the operation. By default, the identity operation is
66 applied.""")
67
68 def _process(self, view, key=None):
69 retval = self.p.op(view, key)
70 if (self.p.output_type is not None):
71 assert isinstance(retval, self.p.output_type), \
72 "Return value does not match the declared output type."
73 return retval.relabel(group=self.p.group)
74
75
76 class factory(Operation):
77 """
78 Simple operation that constructs any element that accepts some
79 other element as input. For instance, RGB and HSV elements can be
80 created from overlays of Image elements.
81 """
82
83 output_type = param.Parameter(RGB, doc="""
84 The output type of the factor operation.
85
86 By default, if three overlaid Images elements are supplied,
87 the corresponding RGB element will be returned. """)
88
89 args = param.List(default=[], doc="""
90 The list of positional argument to pass to the factory""")
91
92 kwargs = param.Dict(default={}, doc="""
93 The dict of keyword arguments to pass to the factory""")
94
95 def _process(self, view, key=None):
96 return self.p.output_type(view, *self.p.args, **self.p.kwargs)
97
98
99 class function(Operation):
100
101 output_type = param.ClassSelector(class_=type, doc="""
102 The output type of the method operation""")
103
104 input_type = param.ClassSelector(class_=type, doc="""
105 The object type the method is defined on""")
106
107 fn = param.Callable(default=lambda el, *args, **kwargs: el, doc="""
108 The function to apply.""")
109
110 args = param.List(default=[], doc="""
111 The list of positional argument to pass to the method""")
112
113 kwargs = param.Dict(default={}, doc="""
114 The dict of keyword arguments to pass to the method""")
115
116 def _process(self, element, key=None):
117 return self.p.fn(element, *self.p.args, **self.p.kwargs)
118
119
120 class method(Operation):
121 """
122 Operation that wraps a method call
123 """
124
125 output_type = param.ClassSelector(class_=type, doc="""
126 The output type of the method operation""")
127
128 input_type = param.ClassSelector(class_=type, doc="""
129 The object type the method is defined on""")
130
131 method_name = param.String(default='__call__', doc="""
132 The method name""")
133
134 args = param.List(default=[], doc="""
135 The list of positional argument to pass to the method""")
136
137 kwargs = param.Dict(default={}, doc="""
138 The dict of keyword arguments to pass to the method""")
139
140 def _process(self, element, key=None):
141 fn = getattr(self.p.input_type, self.p.method_name)
142 return fn(element, *self.p.args, **self.p.kwargs)
143
144
145 class apply_when(param.ParameterizedFunction):
146 """
147 Applies a selection depending on the current zoom range. If the
148 supplied predicate function returns a True it will apply the
149 operation otherwise it will return the raw element after the
150 selection. For example the following will apply datashading if
151 the number of points in the current viewport exceed 1000 otherwise
152 just returning the selected points element:
153
154 apply_when(points, operation=datashade, predicate=lambda x: x > 1000)
155 """
156
157 operation = param.Callable(default=lambda x: x)
158
159 predicate = param.Callable(default=None)
160
161 def _apply(self, element, x_range, y_range, invert=False):
162 selected = element
163 if x_range is not None and y_range is not None:
164 selected = element[x_range, y_range]
165 condition = self.predicate(selected)
166 if (not invert and condition) or (invert and not condition):
167 return selected
168 elif selected.interface.gridded:
169 return selected.clone([])
170 else:
171 return selected.iloc[:0]
172
173 def __call__(self, obj, **params):
174 if 'streams' in params:
175 streams = params.pop('streams')
176 else:
177 streams = [RangeXY()]
178 self.param.set_param(**params)
179 if not self.predicate:
180 raise ValueError(
181 'Must provide a predicate function to determine when '
182 'to apply the operation and when to return the selected '
183 'data.'
184 )
185 applied = self.operation(obj.apply(self._apply, streams=streams))
186 raw = obj.apply(self._apply, streams=streams, invert=True)
187 return applied * raw
188
189
190 class chain(Operation):
191 """
192 Defining an Operation chain is an easy way to define a new
193 Operation from a series of existing ones. The argument is a
194 list of Operation (or Operation instances) that are
195 called in sequence to generate the returned element.
196
197 chain(operations=[gradient, threshold.instance(level=2)])
198
199 This operation can accept an Image instance and would first
200 compute the gradient before thresholding the result at a level of
201 2.0.
202
203 Instances are only required when arguments need to be passed to
204 individual operations so the resulting object is a function over a
205 single argument.
206 """
207
208 output_type = param.Parameter(Image, doc="""
209 The output type of the chain operation. Must be supplied if
210 the chain is to be used as a channel operation.""")
211
212 group = param.String(default='', doc="""
213 The group assigned to the result after having applied the chain.
214 Defaults to the group produced by the last operation in the chain""")
215
216 operations = param.List(default=[], class_=Operation, doc="""
217 A list of Operations (or Operation instances)
218 that are applied on the input from left to right.""")
219
220 def _process(self, view, key=None):
221 processed = view
222 for i, operation in enumerate(self.p.operations):
223 processed = operation.process_element(
224 processed, key, input_ranges=self.p.input_ranges
225 )
226
227 if not self.p.group:
228 return processed
229 else:
230 return processed.clone(group=self.p.group)
231
232 def find(self, operation, skip_nonlinked=True):
233 """
234 Returns the first found occurrence of an operation while
235 performing a backward traversal of the chain pipeline.
236 """
237 found = None
238 for op in self.operations[::-1]:
239 if isinstance(op, operation):
240 found = op
241 break
242 if not op.link_inputs and skip_nonlinked:
243 break
244 return found
245
246
247 class transform(Operation):
248 """
249 Generic Operation to transform an input Image or RGBA
250 element into an output Image. The transformation is defined by
251 the supplied callable that accepts the data of the input Image
252 (typically a numpy array) and returns the transformed data of the
253 output Image.
254
255 This operator is extremely versatile; for instance, you could
256 implement an alternative to the explicit threshold operator with:
257
258 operator=lambda x: np.clip(x, 0, 0.5)
259
260 Alternatively, you can implement a transform computing the 2D
261 autocorrelation using the scipy library with:
262
263 operator=lambda x: scipy.signal.correlate2d(x, x)
264 """
265
266 output_type = Image
267
268 group = param.String(default='Transform', doc="""
269 The group assigned to the result after applying the
270 transform.""")
271
272 operator = param.Callable(doc="""
273 Function of one argument that transforms the data in the input
274 Image to the data in the output Image. By default, acts as
275 the identity function such that the output matches the input.""")
276
277 def _process(self, img, key=None):
278 processed = (img.data if not self.p.operator
279 else self.p.operator(img.data))
280 return img.clone(processed, group=self.p.group)
281
282
283 class image_overlay(Operation):
284 """
285 Operation to build a overlay of images to a specification from a
286 subset of the required elements.
287
288 This is useful for reordering the elements of an overlay,
289 duplicating layers of an overlay or creating blank image elements
290 in the appropriate positions.
291
292 For instance, image_overlay may build a three layered input
293 suitable for the RGB factory operation even if supplied with one
294 or two of the required channels (creating blank channels for the
295 missing elements).
296
297 Note that if there is any ambiguity regarding the match, the
298 strongest match will be used. In the case of a tie in match
299 strength, the first layer in the input is used. One successful
300 match is always required.
301 """
302
303 output_type = Overlay
304
305 spec = param.String(doc="""
306 Specification of the output Overlay structure. For instance:
307
308 Image.R * Image.G * Image.B
309
310 Will ensure an overlay of this structure is created even if
311 (for instance) only (Image.R * Image.B) is supplied.
312
313 Elements in the input overlay that match are placed in the
314 appropriate positions and unavailable specification elements
315 are created with the specified fill group.""")
316
317 fill = param.Number(default=0)
318
319 default_range = param.Tuple(default=(0,1), doc="""
320 The default range that will be set on the value_dimension of
321 any automatically created blank image elements.""")
322
323 group = param.String(default='Transform', doc="""
324 The group assigned to the resulting overlay.""")
325
326
327 @classmethod
328 def _match(cls, el, spec):
329 "Return the strength of the match (None if no match)"
330 spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))
331 if not isinstance(el, Image) or spec_dict['type'] != 'Image':
332 raise NotImplementedError("Only Image currently supported")
333
334 sanitizers = {'group':group_sanitizer, 'label':label_sanitizer}
335 strength = 1
336 for key in ['group', 'label']:
337 attr_value = sanitizers[key](getattr(el, key))
338 if key in spec_dict:
339 if spec_dict[key] != attr_value: return None
340 strength += 1
341 return strength
342
343
344 def _match_overlay(self, raster, overlay_spec):
345 """
346 Given a raster or input overlay, generate a list of matched
347 elements (None if no match) and corresponding tuple of match
348 strength values.
349 """
350 ordering = [None]*len(overlay_spec) # Elements to overlay
351 strengths = [0]*len(overlay_spec) # Match strengths
352
353 elements = raster.values() if isinstance(raster, Overlay) else [raster]
354
355 for el in elements:
356 for pos in range(len(overlay_spec)):
357 strength = self._match(el, overlay_spec[pos])
358 if strength is None: continue # No match
359 elif (strength <= strengths[pos]): continue # Weaker match
360 else: # Stronger match
361 ordering[pos] = el
362 strengths[pos] = strength
363 return ordering, strengths
364
365
366 def _process(self, raster, key=None):
367 specs = tuple(el.strip() for el in self.p.spec.split('*'))
368 ordering, strengths = self._match_overlay(raster, specs)
369 if all(el is None for el in ordering):
370 raise Exception("The image_overlay operation requires at least one match")
371
372 completed = []
373 strongest = ordering[np.argmax(strengths)]
374 for el, spec in zip(ordering, specs):
375 if el is None:
376 spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))
377 el = Image(np.ones(strongest.data.shape) * self.p.fill,
378 group=spec_dict.get('group','Image'),
379 label=spec_dict.get('label',''))
380 el.vdims[0].range = self.p.default_range
381 completed.append(el)
382 return np.prod(completed)
383
384
385
386 class threshold(Operation):
387 """
388 Threshold a given Image whereby all values higher than a given
389 level map to the specified high value and all values lower than
390 that level map to the specified low value.
391 """
392 output_type = Image
393
394 level = param.Number(default=0.5, doc="""
395 The value at which the threshold is applied. Values lower than
396 the threshold map to the 'low' value and values above map to
397 the 'high' value.""")
398
399 high = param.Number(default=1.0, doc="""
400 The value given to elements greater than (or equal to) the
401 threshold.""")
402
403 low = param.Number(default=0.0, doc="""
404 The value given to elements below the threshold.""")
405
406 group = param.String(default='Threshold', doc="""
407 The group assigned to the thresholded output.""")
408
409 _per_element = True
410
411 def _process(self, matrix, key=None):
412
413 if not isinstance(matrix, Image):
414 raise TypeError("The threshold operation requires a Image as input.")
415
416 arr = matrix.data
417 high = np.ones(arr.shape) * self.p.high
418 low = np.ones(arr.shape) * self.p.low
419 thresholded = np.where(arr > self.p.level, high, low)
420
421 return matrix.clone(thresholded, group=self.p.group)
422
423
424
425 class gradient(Operation):
426 """
427 Compute the gradient plot of the supplied Image.
428
429 If the Image value dimension is cyclic, the smallest step is taken
430 considered the cyclic range
431 """
432
433 output_type = Image
434
435 group = param.String(default='Gradient', doc="""
436 The group assigned to the output gradient matrix.""")
437
438 _per_element = True
439
440 def _process(self, matrix, key=None):
441
442 if len(matrix.vdims) != 1:
443 raise ValueError("Input matrix to gradient operation must "
444 "have single value dimension.")
445
446 matrix_dim = matrix.vdims[0]
447
448 data = np.flipud(matrix.dimension_values(matrix_dim, flat=False))
449 r, c = data.shape
450
451 if matrix_dim.cyclic and (None in matrix_dim.range):
452 raise Exception("Cyclic range must be specified to compute "
453 "the gradient of cyclic quantities")
454 cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)
455 if cyclic_range is not None:
456 # shift values such that wrapping works ok
457 data = data - matrix_dim.range[0]
458
459 dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]
460 dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]
461
462 if cyclic_range is not None: # Wrap into the specified range
463 # Convert negative differences to an equivalent positive value
464 dx = dx % cyclic_range
465 dy = dy % cyclic_range
466 #
467 # Prefer small jumps
468 dx_negatives = dx - cyclic_range
469 dy_negatives = dy - cyclic_range
470 dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)
471 dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)
472
473 return Image(np.sqrt(dx * dx + dy * dy), bounds=matrix.bounds, group=self.p.group)
474
475
476
477 class convolve(Operation):
478 """
479 Apply a convolution to an overlay using the top layer as the
480 kernel for convolving the bottom layer. Both Image elements in
481 the input overlay should have a single value dimension.
482 """
483
484 output_type = Image
485
486 group = param.String(default='Convolution', doc="""
487 The group assigned to the convolved output.""")
488
489 kernel_roi = param.NumericTuple(default=(0,0,0,0), length=4, doc="""
490 A 2-dimensional slice of the kernel layer to use in the
491 convolution in lbrt (left, bottom, right, top) format. By
492 default, no slicing is applied.""")
493
494 _per_element = True
495
496 def _process(self, overlay, key=None):
497 if len(overlay) != 2:
498 raise Exception("Overlay must contain at least to items.")
499
500 [target, kernel] = overlay.get(0), overlay.get(1)
501
502 if len(target.vdims) != 1:
503 raise Exception("Convolution requires inputs with single value dimensions.")
504
505 xslice = slice(self.p.kernel_roi[0], self.p.kernel_roi[2])
506 yslice = slice(self.p.kernel_roi[1], self.p.kernel_roi[3])
507
508 k = kernel.data if self.p.kernel_roi == (0,0,0,0) else kernel[xslice, yslice].data
509
510 data = np.flipud(target.dimension_values(2, flat=False))
511 fft1 = np.fft.fft2(data)
512 fft2 = np.fft.fft2(k, s=data.shape)
513 convolved_raw = np.fft.ifft2(fft1 * fft2).real
514
515 k_rows, k_cols = k.shape
516 rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)
517 convolved = rolled / float(k.sum())
518
519 return Image(convolved, bounds=target.bounds, group=self.p.group)
520
521
522
523 class contours(Operation):
524 """
525 Given a Image with a single channel, annotate it with contour
526 lines for a given set of contour levels.
527
528 The return is an NdOverlay with a Contours layer for each given
529 level, overlaid on top of the input Image.
530 """
531
532 output_type = Overlay
533
534 levels = param.ClassSelector(default=10, class_=(list, int), doc="""
535 A list of scalar values used to specify the contour levels.""")
536
537 group = param.String(default='Level', doc="""
538 The group assigned to the output contours.""")
539
540 filled = param.Boolean(default=False, doc="""
541 Whether to generate filled contours""")
542
543 overlaid = param.Boolean(default=False, doc="""
544 Whether to overlay the contour on the supplied Element.""")
545
546 _per_element = True
547
548 def _process(self, element, key=None):
549 try:
550 from matplotlib.contour import QuadContourSet
551 from matplotlib.axes import Axes
552 from matplotlib.figure import Figure
553 from matplotlib.dates import num2date, date2num
554 except ImportError:
555 raise ImportError("contours operation requires matplotlib.")
556 extent = element.range(0) + element.range(1)[::-1]
557
558 xs = element.dimension_values(0, True, flat=False)
559 ys = element.dimension_values(1, True, flat=False)
560 zs = element.dimension_values(2, flat=False)
561
562 # Ensure that coordinate arrays specify bin centers
563 if xs.shape[0] != zs.shape[0]:
564 xs = xs[:-1] + np.diff(xs, axis=0)/2.
565 if xs.shape[1] != zs.shape[1]:
566 xs = xs[:, :-1] + (np.diff(xs, axis=1)/2.)
567 if ys.shape[0] != zs.shape[0]:
568 ys = ys[:-1] + np.diff(ys, axis=0)/2.
569 if ys.shape[1] != zs.shape[1]:
570 ys = ys[:, :-1] + (np.diff(ys, axis=1)/2.)
571 data = (xs, ys, zs)
572
573 # if any data is a datetime, transform to matplotlib's numerical format
574 data_is_datetime = tuple(isdatetime(arr) for k, arr in enumerate(data))
575 if any(data_is_datetime):
576 data = tuple(
577 date2num(d) if is_datetime else d
578 for d, is_datetime in zip(data, data_is_datetime)
579 )
580
581 xdim, ydim = element.dimensions('key', label=True)
582 if self.p.filled:
583 contour_type = Polygons
584 else:
585 contour_type = Contours
586 vdims = element.vdims[:1]
587
588 kwargs = {}
589 levels = self.p.levels
590 zmin, zmax = element.range(2)
591 if isinstance(self.p.levels, int):
592 if zmin == zmax:
593 contours = contour_type([], [xdim, ydim], vdims)
594 return (element * contours) if self.p.overlaid else contours
595 data += (levels,)
596 else:
597 kwargs = {'levels': levels}
598
599 fig = Figure()
600 ax = Axes(fig, [0, 0, 1, 1])
601 contour_set = QuadContourSet(ax, *data, filled=self.p.filled,
602 extent=extent, **kwargs)
603 levels = np.array(contour_set.get_array())
604 crange = levels.min(), levels.max()
605 if self.p.filled:
606 levels = levels[:-1] + np.diff(levels)/2.
607 vdims = [vdims[0].clone(range=crange)]
608
609 paths = []
610 empty = np.array([[np.nan, np.nan]])
611 for level, cset in zip(levels, contour_set.collections):
612 exteriors = []
613 interiors = []
614 for geom in cset.get_paths():
615 interior = []
616 polys = geom.to_polygons(closed_only=False)
617 for ncp, cp in enumerate(polys):
618 if any(data_is_datetime[0:2]):
619 # transform x/y coordinates back to datetimes
620 xs, ys = np.split(cp, 2, axis=1)
621 if data_is_datetime[0]:
622 xs = np.array(num2date(xs))
623 if data_is_datetime[1]:
624 ys = np.array(num2date(ys))
625 cp = np.concatenate((xs, ys), axis=1)
626 if ncp == 0:
627 exteriors.append(cp)
628 exteriors.append(empty)
629 else:
630 interior.append(cp)
631 if len(polys):
632 interiors.append(interior)
633 if not exteriors:
634 continue
635 geom = {
636 element.vdims[0].name:
637 num2date(level) if data_is_datetime[2] else level,
638 (xdim, ydim): np.concatenate(exteriors[:-1])
639 }
640 if self.p.filled and interiors:
641 geom['holes'] = interiors
642 paths.append(geom)
643 contours = contour_type(paths, label=element.label, kdims=element.kdims, vdims=vdims)
644 if self.p.overlaid:
645 contours = element * contours
646 return contours
647
648
649 class histogram(Operation):
650 """
651 Returns a Histogram of the input element data, binned into
652 num_bins over the bin_range (if specified) along the specified
653 dimension.
654 """
655
656 bin_range = param.NumericTuple(default=None, length=2, doc="""
657 Specifies the range within which to compute the bins.""")
658
659 bins = param.ClassSelector(default=None, class_=(np.ndarray, list, tuple, str), doc="""
660 An explicit set of bin edges or a method to find the optimal
661 set of bin edges, e.g. 'auto', 'fd', 'scott' etc. For more
662 documentation on these approaches see the np.histogram_bin_edges
663 documentation.""")
664
665 cumulative = param.Boolean(default=False, doc="""
666 Whether to compute the cumulative histogram""")
667
668 dimension = param.String(default=None, doc="""
669 Along which dimension of the Element to compute the histogram.""")
670
671 frequency_label = param.String(default=None, doc="""
672 Format string defining the label of the frequency dimension of the Histogram.""")
673
674 groupby = param.ClassSelector(default=None, class_=(str, Dimension), doc="""
675 Defines a dimension to group the Histogram returning an NdOverlay of Histograms.""")
676
677 log = param.Boolean(default=False, doc="""
678 Whether to use base 10 logarithmic samples for the bin edges.""")
679
680 mean_weighted = param.Boolean(default=False, doc="""
681 Whether the weighted frequencies are averaged.""")
682
683 normed = param.ObjectSelector(default=False,
684 objects=[True, False, 'integral', 'height'],
685 doc="""
686 Controls normalization behavior. If `True` or `'integral'`, then
687 `density=True` is passed to np.histogram, and the distribution
688 is normalized such that the integral is unity. If `False`,
689 then the frequencies will be raw counts. If `'height'`, then the
690 frequencies are normalized such that the max bin height is unity.""")
691
692 nonzero = param.Boolean(default=False, doc="""
693 Whether to use only nonzero values when computing the histogram""")
694
695 num_bins = param.Integer(default=20, doc="""
696 Number of bins in the histogram .""")
697
698 weight_dimension = param.String(default=None, doc="""
699 Name of the dimension the weighting should be drawn from""")
700
701 style_prefix = param.String(default=None, allow_None=None, doc="""
702 Used for setting a common style for histograms in a HoloMap or AdjointLayout.""")
703
704 def _process(self, element, key=None):
705 if self.p.groupby:
706 if not isinstance(element, Dataset):
707 raise ValueError('Cannot use histogram groupby on non-Dataset Element')
708 grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)
709 self.p.groupby = None
710 return grouped.map(self._process, Dataset)
711
712 normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed
713 if self.p.dimension:
714 selected_dim = self.p.dimension
715 else:
716 selected_dim = [d.name for d in element.vdims + element.kdims][0]
717 dim = element.get_dimension(selected_dim)
718
719 if hasattr(element, 'interface'):
720 data = element.interface.values(element, selected_dim, compute=False)
721 else:
722 data = element.dimension_values(selected_dim)
723
724 is_datetime = isdatetime(data)
725 if is_datetime:
726 data = data.astype('datetime64[ns]').astype('int64')
727
728 # Handle different datatypes
729 is_finite = isfinite
730 is_cupy = is_cupy_array(data)
731 if is_cupy:
732 import cupy
733 full_cupy_support = LooseVersion(cupy.__version__) > LooseVersion('8.0')
734 if not full_cupy_support and (normed or self.p.weight_dimension):
735 data = cupy.asnumpy(data)
736 is_cupy = False
737 else:
738 is_finite = cupy.isfinite
739
740 # Mask data
741 if is_ibis_expr(data):
742 mask = data.notnull()
743 if self.p.nonzero:
744 mask = mask & (data != 0)
745 data = data.to_projection()
746 data = data[mask]
747 no_data = not len(data.head(1).execute())
748 data = data[dim.name]
749 else:
750 mask = is_finite(data)
751 if self.p.nonzero:
752 mask = mask & (data != 0)
753 data = data[mask]
754 da = dask_array_module()
755 no_data = False if da and isinstance(data, da.Array) else not len(data)
756
757 # Compute weights
758 if self.p.weight_dimension:
759 if hasattr(element, 'interface'):
760 weights = element.interface.values(element, self.p.weight_dimension, compute=False)
761 else:
762 weights = element.dimension_values(self.p.weight_dimension)
763 weights = weights[mask]
764 else:
765 weights = None
766
767 # Compute bins
768 if isinstance(self.p.bins, str):
769 bin_data = cupy.asnumpy(data) if is_cupy else data
770 edges = np.histogram_bin_edges(bin_data, bins=self.p.bins)
771 elif isinstance(self.p.bins, (list, np.ndarray)):
772 edges = self.p.bins
773 if isdatetime(edges):
774 edges = edges.astype('datetime64[ns]').astype('int64')
775 else:
776 hist_range = self.p.bin_range or element.range(selected_dim)
777 # Suppress a warning emitted by Numpy when datetime or timedelta scalars
778 # are compared. See https://github.com/numpy/numpy/issues/10095 and
779 # https://github.com/numpy/numpy/issues/9210.
780 with warnings.catch_warnings():
781 warnings.filterwarnings(
782 action='ignore', message='elementwise comparison failed',
783 category=DeprecationWarning
784 )
785 null_hist_range = hist_range == (0, 0)
786 # Avoids range issues including zero bin range and empty bins
787 if null_hist_range or any(not isfinite(r) for r in hist_range):
788 hist_range = (0, 1)
789 steps = self.p.num_bins + 1
790 start, end = hist_range
791 if is_datetime:
792 start, end = dt_to_int(start, 'ns'), dt_to_int(end, 'ns')
793 if self.p.log:
794 bin_min = max([abs(start), data[data>0].min()])
795 edges = np.logspace(np.log10(bin_min), np.log10(end), steps)
796 else:
797 edges = np.linspace(start, end, steps)
798 if is_cupy:
799 edges = cupy.asarray(edges)
800
801 if not is_dask_array(data) and no_data:
802 nbins = self.p.num_bins if self.p.bins is None else len(self.p.bins)-1
803 hist = np.zeros(nbins)
804 elif hasattr(element, 'interface'):
805 density = True if normed else False
806 hist, edges = element.interface.histogram(
807 data, edges, density=density, weights=weights
808 )
809 if normed == 'height':
810 hist /= hist.max()
811 if self.p.weight_dimension and self.p.mean_weighted:
812 hist_mean, _ = element.interface.histogram(
813 data, density=False, bins=edges
814 )
815 hist /= hist_mean
816 elif normed:
817 # This covers True, 'height', 'integral'
818 hist, edges = np.histogram(data, density=True,
819 weights=weights, bins=edges)
820 if normed == 'height':
821 hist /= hist.max()
822 else:
823 hist, edges = np.histogram(data, normed=normed, weights=weights, bins=edges)
824 if self.p.weight_dimension and self.p.mean_weighted:
825 hist_mean, _ = np.histogram(data, density=False, bins=self.p.num_bins)
826 hist /= hist_mean
827
828 hist[np.isnan(hist)] = 0
829 if is_datetime:
830 edges = (edges/1e3).astype('datetime64[us]')
831
832 params = {}
833 if self.p.weight_dimension:
834 params['vdims'] = [element.get_dimension(self.p.weight_dimension)]
835 elif self.p.frequency_label:
836 label = self.p.frequency_label.format(dim=dim.pprint_label)
837 params['vdims'] = [Dimension('Frequency', label=label)]
838 else:
839 label = 'Frequency' if normed else 'Count'
840 params['vdims'] = [Dimension('{0}_{1}'.format(dim.name, label.lower()),
841 label=label)]
842
843 if element.group != element.__class__.__name__:
844 params['group'] = element.group
845
846 if self.p.cumulative:
847 hist = np.cumsum(hist)
848 if self.p.normed in (True, 'integral'):
849 hist *= edges[1]-edges[0]
850
851 # Save off the computed bin edges so that if this operation instance
852 # is used to compute another histogram, it will default to the same
853 # bin edges.
854 self.bins = list(edges)
855 return Histogram((edges, hist), kdims=[element.get_dimension(selected_dim)],
856 label=element.label, **params)
857
858
859 class decimate(Operation):
860 """
861 Decimates any column based Element to a specified number of random
862 rows if the current element defined by the x_range and y_range
863 contains more than max_samples. By default the operation returns a
864 DynamicMap with a RangeXY stream allowing dynamic downsampling.
865 """
866
867 dynamic = param.Boolean(default=True, doc="""
868 Enables dynamic processing by default.""")
869
870 link_inputs = param.Boolean(default=True, doc="""
871 By default, the link_inputs parameter is set to True so that
872 when applying shade, backends that support linked streams
873 update RangeXY streams on the inputs of the shade operation.""")
874
875 max_samples = param.Integer(default=5000, doc="""
876 Maximum number of samples to display at the same time.""")
877
878 random_seed = param.Integer(default=42, doc="""
879 Seed used to initialize randomization.""")
880
881 streams = param.ClassSelector(default=[RangeXY], class_=(dict, list),
882 doc="""
883 List of streams that are applied if dynamic=True, allowing
884 for dynamic interaction with the plot.""")
885
886 x_range = param.NumericTuple(default=None, length=2, doc="""
887 The x_range as a tuple of min and max x-value. Auto-ranges
888 if set to None.""")
889
890 y_range = param.NumericTuple(default=None, length=2, doc="""
891 The x_range as a tuple of min and max y-value. Auto-ranges
892 if set to None.""")
893
894 _per_element = True
895
896 def _process_layer(self, element, key=None):
897 if not isinstance(element, Dataset):
898 raise ValueError("Cannot downsample non-Dataset types.")
899 if element.interface not in column_interfaces:
900 element = element.clone(tuple(element.columns().values()))
901
902 xstart, xend = self.p.x_range if self.p.x_range else element.range(0)
903 ystart, yend = self.p.y_range if self.p.y_range else element.range(1)
904
905 # Slice element to current ranges
906 xdim, ydim = element.dimensions(label=True)[0:2]
907 sliced = element.select(**{xdim: (xstart, xend),
908 ydim: (ystart, yend)})
909
910 if len(sliced) > self.p.max_samples:
911 prng = np.random.RandomState(self.p.random_seed)
912 return sliced.iloc[prng.choice(len(sliced), self.p.max_samples, False)]
913 return sliced
914
915 def _process(self, element, key=None):
916 return element.map(self._process_layer, Element)
917
918
919 class interpolate_curve(Operation):
920 """
921 Resamples a Curve using the defined interpolation method, e.g.
922 to represent changes in y-values as steps.
923 """
924
925 interpolation = param.ObjectSelector(objects=['steps-pre', 'steps-mid',
926 'steps-post', 'linear'],
927 default='steps-mid', doc="""
928 Controls the transition point of the step along the x-axis.""")
929
930 _per_element = True
931
932 @classmethod
933 def pts_to_prestep(cls, x, values):
934 steps = np.zeros(2 * len(x) - 1)
935 value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)
936
937 steps[0::2] = x
938 steps[1::2] = steps[0:-2:2]
939
940 val_arrays = []
941 for v, s in zip(values, value_steps):
942 s[0::2] = v
943 s[1::2] = s[2::2]
944 val_arrays.append(s)
945
946 return steps, tuple(val_arrays)
947
948 @classmethod
949 def pts_to_midstep(cls, x, values):
950 steps = np.zeros(2 * len(x))
951 value_steps = tuple(np.empty(2 * len(x), dtype=v.dtype) for v in values)
952
953 steps[1:-1:2] = steps[2::2] = x[:-1] + (x[1:] - x[:-1])/2
954 steps[0], steps[-1] = x[0], x[-1]
955
956 val_arrays = []
957 for v, s in zip(values, value_steps):
958 s[0::2] = v
959 s[1::2] = s[0::2]
960 val_arrays.append(s)
961
962 return steps, tuple(val_arrays)
963
964 @classmethod
965 def pts_to_poststep(cls, x, values):
966 steps = np.zeros(2 * len(x) - 1)
967 value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)
968
969 steps[0::2] = x
970 steps[1::2] = steps[2::2]
971
972 val_arrays = []
973 for v, s in zip(values, value_steps):
974 s[0::2] = v
975 s[1::2] = s[0:-2:2]
976 val_arrays.append(s)
977
978 return steps, tuple(val_arrays)
979
980 def _process_layer(self, element, key=None):
981 INTERPOLATE_FUNCS = {'steps-pre': self.pts_to_prestep,
982 'steps-mid': self.pts_to_midstep,
983 'steps-post': self.pts_to_poststep}
984 if self.p.interpolation not in INTERPOLATE_FUNCS:
985 return element
986 x = element.dimension_values(0)
987 is_datetime = isdatetime(x)
988 if is_datetime:
989 dt_type = 'datetime64[ns]'
990 x = x.astype(dt_type)
991 dvals = tuple(element.dimension_values(d) for d in element.dimensions()[1:])
992 xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x, dvals)
993 if is_datetime:
994 xs = xs.astype(dt_type)
995 return element.clone((xs,)+dvals)
996
997 def _process(self, element, key=None):
998 return element.map(self._process_layer, Element)
999
1000
1001 #==================#
1002 # Other operations #
1003 #==================#
1004
1005
1006 class collapse(Operation):
1007 """
1008 Given an overlay of Element types, collapse into single Element
1009 object using supplied function. Collapsing aggregates over the
1010 key dimensions of each object applying the supplied fn to each group.
1011
1012 This is an example of an Operation that does not involve
1013 any Raster types.
1014 """
1015
1016 fn = param.Callable(default=np.mean, doc="""
1017 The function that is used to collapse the curve y-values for
1018 each x-value.""")
1019
1020 def _process(self, overlay, key=None):
1021 if isinstance(overlay, NdOverlay):
1022 collapse_map = HoloMap(overlay)
1023 else:
1024 collapse_map = HoloMap({i: el for i, el in enumerate(overlay)})
1025 return collapse_map.collapse(function=self.p.fn)
1026
1027
1028 class gridmatrix(param.ParameterizedFunction):
1029 """
1030 The gridmatrix operation takes an Element or HoloMap
1031 of Elements as input and creates a GridMatrix object,
1032 which plots each dimension in the Element against
1033 each other dimension. This provides a very useful
1034 overview of high-dimensional data and is inspired
1035 by pandas and seaborn scatter_matrix implementations.
1036 """
1037
1038 chart_type = param.Parameter(default=Scatter, doc="""
1039 The Element type used to display bivariate distributions
1040 of the data.""")
1041
1042 diagonal_type = param.Parameter(default=None, doc="""
1043 The Element type along the diagonal, may be a Histogram or any
1044 other plot type which can visualize a univariate distribution.
1045 This parameter overrides diagonal_operation.""")
1046
1047 diagonal_operation = param.Parameter(default=histogram, doc="""
1048 The operation applied along the diagonal, may be a histogram-operation
1049 or any other function which returns a viewable element.""")
1050
1051 overlay_dims = param.List(default=[], doc="""
1052 If a HoloMap is supplied, this will allow overlaying one or
1053 more of its key dimensions.""")
1054
1055 def __call__(self, data, **params):
1056 p = param.ParamOverrides(self, params)
1057
1058 if isinstance(data, (HoloMap, NdOverlay)):
1059 ranges = {d.name: data.range(d) for d in data.dimensions()}
1060 data = data.clone({k: GridMatrix(self._process(p, v, ranges))
1061 for k, v in data.items()})
1062 data = Collator(data, merge_type=type(data))()
1063 if p.overlay_dims:
1064 data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,))
1065 return data
1066 elif isinstance(data, Element):
1067 data = self._process(p, data)
1068 return GridMatrix(data)
1069
1070
1071 def _process(self, p, element, ranges={}):
1072 # Creates a unified Dataset.data attribute
1073 # to draw the data from
1074 if isinstance(element.data, np.ndarray):
1075 el_data = element.table(default_datatype)
1076 else:
1077 el_data = element.data
1078
1079 # Get dimensions to plot against each other
1080 types = (str, np.str_, np.object_)+datetime_types
1081 dims = [d for d in element.dimensions()
1082 if _is_number(element.range(d)[0]) and
1083 not issubclass(element.get_dimension_type(d), types)]
1084 permuted_dims = [(d1, d2) for d1 in dims
1085 for d2 in dims[::-1]]
1086
1087 # Convert Histogram type to operation to avoid one case in the if below.
1088 if p.diagonal_type is Histogram:
1089 p.diagonal_type = None
1090 p.diagonal_operation = histogram
1091
1092 data = {}
1093 for d1, d2 in permuted_dims:
1094 if d1 == d2:
1095 if p.diagonal_type is not None:
1096 if p.diagonal_type._auto_indexable_1d:
1097 el = p.diagonal_type(el_data, kdims=[d1], vdims=[d2],
1098 datatype=[default_datatype])
1099 else:
1100 values = element.dimension_values(d1)
1101 el = p.diagonal_type(values, kdims=[d1])
1102 elif p.diagonal_operation is None:
1103 continue
1104 elif p.diagonal_operation is histogram or isinstance(p.diagonal_operation, histogram):
1105 bin_range = ranges.get(d1.name, element.range(d1))
1106 el = p.diagonal_operation(element, dimension=d1.name, bin_range=bin_range)
1107 else:
1108 el = p.diagonal_operation(element, dimension=d1.name)
1109 else:
1110 kdims, vdims = ([d1, d2], []) if len(p.chart_type.kdims) == 2 else (d1, d2)
1111 el = p.chart_type(el_data, kdims=kdims, vdims=vdims,
1112 datatype=[default_datatype])
1113 data[(d1.name, d2.name)] = el
1114 return data
1115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holoviews/operation/element.py b/holoviews/operation/element.py
--- a/holoviews/operation/element.py
+++ b/holoviews/operation/element.py
@@ -909,7 +909,8 @@
if len(sliced) > self.p.max_samples:
prng = np.random.RandomState(self.p.random_seed)
- return sliced.iloc[prng.choice(len(sliced), self.p.max_samples, False)]
+ choice = prng.choice(len(sliced), self.p.max_samples, False)
+ return sliced.iloc[np.sort(choice)]
return sliced
def _process(self, element, key=None):
| {"golden_diff": "diff --git a/holoviews/operation/element.py b/holoviews/operation/element.py\n--- a/holoviews/operation/element.py\n+++ b/holoviews/operation/element.py\n@@ -909,7 +909,8 @@\n \n if len(sliced) > self.p.max_samples:\n prng = np.random.RandomState(self.p.random_seed)\n- return sliced.iloc[prng.choice(len(sliced), self.p.max_samples, False)]\n+ choice = prng.choice(len(sliced), self.p.max_samples, False)\n+ return sliced.iloc[np.sort(choice)]\n return sliced\n \n def _process(self, element, key=None):\n", "issue": "Interpolated charts (Curve, Area, etc) plot data in the provided order, but decimate unsorts data\n#### ALL software version info\r\n| Library | Version |\r\n| -- | -- |\r\n| python | 3.9.13 |\r\n| holoviews | 1.15.0 |\r\n| bokeh | 2.4.3 |\r\n| pandas | 1.4.4 |\r\n| numpy | 1.23.3 |\r\n\r\n#### Description of expected behavior and the observed behavior\r\nI should be able to decimate Curve/Area charts the same way I can decimate scatter charts. Decimating interpolated charts currently results in garbled output.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```python\r\nimport numpy as np\r\nimport pandas as pd\r\nimport holoviews as hv\r\n\r\nhv.extension('bokeh')\r\nx = np.linspace(0, 10, 100)\r\ny1 = np.sin(x)\r\ny2 = np.cos(x)\r\n\r\ntable = hv.Table((x, y1, y2), 'x', ['y1', 'y2'])\r\nhv.Area(table) # See Figure 1\r\n\r\n---------------------------\r\n\r\nfrom holoviews.operation import decimate\r\ndecimate(hv.Area(table), max_samples = 50) # See Figure 2\r\n```\r\n\r\n#### Screenshots or screencasts of the bug in action\r\n\r\nFigure 1\r\n\r\n\r\nFigure 2\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCollection of either extremely generic or simple Operation\nexamples.\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport param\n\nfrom param import _is_number\n\nfrom ..core import (Operation, NdOverlay, Overlay, GridMatrix,\n HoloMap, Dataset, Element, Collator, Dimension)\nfrom ..core.data import ArrayInterface, DictInterface, default_datatype\nfrom ..core.data.util import dask_array_module\nfrom ..core.util import (\n LooseVersion, group_sanitizer, label_sanitizer, pd, datetime_types, isfinite,\n dt_to_int, isdatetime, is_dask_array, is_cupy_array, is_ibis_expr\n)\nfrom ..element.chart import Histogram, Scatter\nfrom ..element.raster import Image, RGB\nfrom ..element.path import Contours, Polygons\nfrom ..element.util import categorical_aggregate2d # noqa (API import)\nfrom ..streams import RangeXY\n\ncolumn_interfaces = [ArrayInterface, DictInterface]\nif pd:\n from ..core.data import PandasInterface\n column_interfaces.append(PandasInterface)\n\n\ndef identity(x,k): return x\n\nclass operation(Operation):\n \"\"\"\n The most generic operation that wraps any callable into an\n Operation. The callable needs to accept an HoloViews\n component and a key (that may be ignored) and must return a new\n HoloViews component.\n\n This class may be useful for turning a HoloViews method into an\n operation to define as compositor operation. For instance, the\n following definition:\n\n operation.instance(op=lambda x, k: x.collapse(np.subtract))\n\n Could be used to implement a collapse operation to subtracts the\n data between Rasters in an Overlay.\n \"\"\"\n\n output_type = param.Parameter(None, doc=\"\"\"\n The output element type which may be None to disable type\n checking.\n\n May be used to declare useful information to other code in\n HoloViews, e.g. required for tab-completion support of operations\n registered with compositors.\"\"\")\n\n group = param.String(default='Operation', doc=\"\"\"\n The group assigned to the result after having applied the\n operator.\"\"\")\n\n op = param.Callable(default=identity, doc=\"\"\"\n The operation used to generate a new HoloViews object returned\n by the operation. By default, the identity operation is\n applied.\"\"\")\n\n def _process(self, view, key=None):\n retval = self.p.op(view, key)\n if (self.p.output_type is not None):\n assert isinstance(retval, self.p.output_type), \\\n \"Return value does not match the declared output type.\"\n return retval.relabel(group=self.p.group)\n\n\nclass factory(Operation):\n \"\"\"\n Simple operation that constructs any element that accepts some\n other element as input. For instance, RGB and HSV elements can be\n created from overlays of Image elements.\n \"\"\"\n\n output_type = param.Parameter(RGB, doc=\"\"\"\n The output type of the factor operation.\n\n By default, if three overlaid Images elements are supplied,\n the corresponding RGB element will be returned. \"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the factory\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the factory\"\"\")\n\n def _process(self, view, key=None):\n return self.p.output_type(view, *self.p.args, **self.p.kwargs)\n\n\nclass function(Operation):\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n fn = param.Callable(default=lambda el, *args, **kwargs: el, doc=\"\"\"\n The function to apply.\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n return self.p.fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass method(Operation):\n \"\"\"\n Operation that wraps a method call\n \"\"\"\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n method_name = param.String(default='__call__', doc=\"\"\"\n The method name\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n fn = getattr(self.p.input_type, self.p.method_name)\n return fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass apply_when(param.ParameterizedFunction):\n \"\"\"\n Applies a selection depending on the current zoom range. If the\n supplied predicate function returns a True it will apply the\n operation otherwise it will return the raw element after the\n selection. For example the following will apply datashading if\n the number of points in the current viewport exceed 1000 otherwise\n just returning the selected points element:\n\n apply_when(points, operation=datashade, predicate=lambda x: x > 1000)\n \"\"\"\n\n operation = param.Callable(default=lambda x: x)\n\n predicate = param.Callable(default=None)\n\n def _apply(self, element, x_range, y_range, invert=False):\n selected = element\n if x_range is not None and y_range is not None:\n selected = element[x_range, y_range]\n condition = self.predicate(selected)\n if (not invert and condition) or (invert and not condition):\n return selected\n elif selected.interface.gridded:\n return selected.clone([])\n else:\n return selected.iloc[:0]\n\n def __call__(self, obj, **params):\n if 'streams' in params:\n streams = params.pop('streams')\n else:\n streams = [RangeXY()]\n self.param.set_param(**params)\n if not self.predicate:\n raise ValueError(\n 'Must provide a predicate function to determine when '\n 'to apply the operation and when to return the selected '\n 'data.'\n )\n applied = self.operation(obj.apply(self._apply, streams=streams))\n raw = obj.apply(self._apply, streams=streams, invert=True)\n return applied * raw\n\n\nclass chain(Operation):\n \"\"\"\n Defining an Operation chain is an easy way to define a new\n Operation from a series of existing ones. The argument is a\n list of Operation (or Operation instances) that are\n called in sequence to generate the returned element.\n\n chain(operations=[gradient, threshold.instance(level=2)])\n\n This operation can accept an Image instance and would first\n compute the gradient before thresholding the result at a level of\n 2.0.\n\n Instances are only required when arguments need to be passed to\n individual operations so the resulting object is a function over a\n single argument.\n \"\"\"\n\n output_type = param.Parameter(Image, doc=\"\"\"\n The output type of the chain operation. Must be supplied if\n the chain is to be used as a channel operation.\"\"\")\n\n group = param.String(default='', doc=\"\"\"\n The group assigned to the result after having applied the chain.\n Defaults to the group produced by the last operation in the chain\"\"\")\n\n operations = param.List(default=[], class_=Operation, doc=\"\"\"\n A list of Operations (or Operation instances)\n that are applied on the input from left to right.\"\"\")\n\n def _process(self, view, key=None):\n processed = view\n for i, operation in enumerate(self.p.operations):\n processed = operation.process_element(\n processed, key, input_ranges=self.p.input_ranges\n )\n\n if not self.p.group:\n return processed\n else:\n return processed.clone(group=self.p.group)\n\n def find(self, operation, skip_nonlinked=True):\n \"\"\"\n Returns the first found occurrence of an operation while\n performing a backward traversal of the chain pipeline.\n \"\"\"\n found = None\n for op in self.operations[::-1]:\n if isinstance(op, operation):\n found = op\n break\n if not op.link_inputs and skip_nonlinked:\n break\n return found\n\n\nclass transform(Operation):\n \"\"\"\n Generic Operation to transform an input Image or RGBA\n element into an output Image. The transformation is defined by\n the supplied callable that accepts the data of the input Image\n (typically a numpy array) and returns the transformed data of the\n output Image.\n\n This operator is extremely versatile; for instance, you could\n implement an alternative to the explicit threshold operator with:\n\n operator=lambda x: np.clip(x, 0, 0.5)\n\n Alternatively, you can implement a transform computing the 2D\n autocorrelation using the scipy library with:\n\n operator=lambda x: scipy.signal.correlate2d(x, x)\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the result after applying the\n transform.\"\"\")\n\n operator = param.Callable(doc=\"\"\"\n Function of one argument that transforms the data in the input\n Image to the data in the output Image. By default, acts as\n the identity function such that the output matches the input.\"\"\")\n\n def _process(self, img, key=None):\n processed = (img.data if not self.p.operator\n else self.p.operator(img.data))\n return img.clone(processed, group=self.p.group)\n\n\nclass image_overlay(Operation):\n \"\"\"\n Operation to build a overlay of images to a specification from a\n subset of the required elements.\n\n This is useful for reordering the elements of an overlay,\n duplicating layers of an overlay or creating blank image elements\n in the appropriate positions.\n\n For instance, image_overlay may build a three layered input\n suitable for the RGB factory operation even if supplied with one\n or two of the required channels (creating blank channels for the\n missing elements).\n\n Note that if there is any ambiguity regarding the match, the\n strongest match will be used. In the case of a tie in match\n strength, the first layer in the input is used. One successful\n match is always required.\n \"\"\"\n\n output_type = Overlay\n\n spec = param.String(doc=\"\"\"\n Specification of the output Overlay structure. For instance:\n\n Image.R * Image.G * Image.B\n\n Will ensure an overlay of this structure is created even if\n (for instance) only (Image.R * Image.B) is supplied.\n\n Elements in the input overlay that match are placed in the\n appropriate positions and unavailable specification elements\n are created with the specified fill group.\"\"\")\n\n fill = param.Number(default=0)\n\n default_range = param.Tuple(default=(0,1), doc=\"\"\"\n The default range that will be set on the value_dimension of\n any automatically created blank image elements.\"\"\")\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the resulting overlay.\"\"\")\n\n\n @classmethod\n def _match(cls, el, spec):\n \"Return the strength of the match (None if no match)\"\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n if not isinstance(el, Image) or spec_dict['type'] != 'Image':\n raise NotImplementedError(\"Only Image currently supported\")\n\n sanitizers = {'group':group_sanitizer, 'label':label_sanitizer}\n strength = 1\n for key in ['group', 'label']:\n attr_value = sanitizers[key](getattr(el, key))\n if key in spec_dict:\n if spec_dict[key] != attr_value: return None\n strength += 1\n return strength\n\n\n def _match_overlay(self, raster, overlay_spec):\n \"\"\"\n Given a raster or input overlay, generate a list of matched\n elements (None if no match) and corresponding tuple of match\n strength values.\n \"\"\"\n ordering = [None]*len(overlay_spec) # Elements to overlay\n strengths = [0]*len(overlay_spec) # Match strengths\n\n elements = raster.values() if isinstance(raster, Overlay) else [raster]\n\n for el in elements:\n for pos in range(len(overlay_spec)):\n strength = self._match(el, overlay_spec[pos])\n if strength is None: continue # No match\n elif (strength <= strengths[pos]): continue # Weaker match\n else: # Stronger match\n ordering[pos] = el\n strengths[pos] = strength\n return ordering, strengths\n\n\n def _process(self, raster, key=None):\n specs = tuple(el.strip() for el in self.p.spec.split('*'))\n ordering, strengths = self._match_overlay(raster, specs)\n if all(el is None for el in ordering):\n raise Exception(\"The image_overlay operation requires at least one match\")\n\n completed = []\n strongest = ordering[np.argmax(strengths)]\n for el, spec in zip(ordering, specs):\n if el is None:\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n el = Image(np.ones(strongest.data.shape) * self.p.fill,\n group=spec_dict.get('group','Image'),\n label=spec_dict.get('label',''))\n el.vdims[0].range = self.p.default_range\n completed.append(el)\n return np.prod(completed)\n\n\n\nclass threshold(Operation):\n \"\"\"\n Threshold a given Image whereby all values higher than a given\n level map to the specified high value and all values lower than\n that level map to the specified low value.\n \"\"\"\n output_type = Image\n\n level = param.Number(default=0.5, doc=\"\"\"\n The value at which the threshold is applied. Values lower than\n the threshold map to the 'low' value and values above map to\n the 'high' value.\"\"\")\n\n high = param.Number(default=1.0, doc=\"\"\"\n The value given to elements greater than (or equal to) the\n threshold.\"\"\")\n\n low = param.Number(default=0.0, doc=\"\"\"\n The value given to elements below the threshold.\"\"\")\n\n group = param.String(default='Threshold', doc=\"\"\"\n The group assigned to the thresholded output.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if not isinstance(matrix, Image):\n raise TypeError(\"The threshold operation requires a Image as input.\")\n\n arr = matrix.data\n high = np.ones(arr.shape) * self.p.high\n low = np.ones(arr.shape) * self.p.low\n thresholded = np.where(arr > self.p.level, high, low)\n\n return matrix.clone(thresholded, group=self.p.group)\n\n\n\nclass gradient(Operation):\n \"\"\"\n Compute the gradient plot of the supplied Image.\n\n If the Image value dimension is cyclic, the smallest step is taken\n considered the cyclic range\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Gradient', doc=\"\"\"\n The group assigned to the output gradient matrix.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if len(matrix.vdims) != 1:\n raise ValueError(\"Input matrix to gradient operation must \"\n \"have single value dimension.\")\n\n matrix_dim = matrix.vdims[0]\n\n data = np.flipud(matrix.dimension_values(matrix_dim, flat=False))\n r, c = data.shape\n\n if matrix_dim.cyclic and (None in matrix_dim.range):\n raise Exception(\"Cyclic range must be specified to compute \"\n \"the gradient of cyclic quantities\")\n cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)\n if cyclic_range is not None:\n # shift values such that wrapping works ok\n data = data - matrix_dim.range[0]\n\n dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]\n dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]\n\n if cyclic_range is not None: # Wrap into the specified range\n # Convert negative differences to an equivalent positive value\n dx = dx % cyclic_range\n dy = dy % cyclic_range\n #\n # Prefer small jumps\n dx_negatives = dx - cyclic_range\n dy_negatives = dy - cyclic_range\n dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)\n dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)\n\n return Image(np.sqrt(dx * dx + dy * dy), bounds=matrix.bounds, group=self.p.group)\n\n\n\nclass convolve(Operation):\n \"\"\"\n Apply a convolution to an overlay using the top layer as the\n kernel for convolving the bottom layer. Both Image elements in\n the input overlay should have a single value dimension.\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Convolution', doc=\"\"\"\n The group assigned to the convolved output.\"\"\")\n\n kernel_roi = param.NumericTuple(default=(0,0,0,0), length=4, doc=\"\"\"\n A 2-dimensional slice of the kernel layer to use in the\n convolution in lbrt (left, bottom, right, top) format. By\n default, no slicing is applied.\"\"\")\n\n _per_element = True\n\n def _process(self, overlay, key=None):\n if len(overlay) != 2:\n raise Exception(\"Overlay must contain at least to items.\")\n\n [target, kernel] = overlay.get(0), overlay.get(1)\n\n if len(target.vdims) != 1:\n raise Exception(\"Convolution requires inputs with single value dimensions.\")\n\n xslice = slice(self.p.kernel_roi[0], self.p.kernel_roi[2])\n yslice = slice(self.p.kernel_roi[1], self.p.kernel_roi[3])\n\n k = kernel.data if self.p.kernel_roi == (0,0,0,0) else kernel[xslice, yslice].data\n\n data = np.flipud(target.dimension_values(2, flat=False))\n fft1 = np.fft.fft2(data)\n fft2 = np.fft.fft2(k, s=data.shape)\n convolved_raw = np.fft.ifft2(fft1 * fft2).real\n\n k_rows, k_cols = k.shape\n rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)\n convolved = rolled / float(k.sum())\n\n return Image(convolved, bounds=target.bounds, group=self.p.group)\n\n\n\nclass contours(Operation):\n \"\"\"\n Given a Image with a single channel, annotate it with contour\n lines for a given set of contour levels.\n\n The return is an NdOverlay with a Contours layer for each given\n level, overlaid on top of the input Image.\n \"\"\"\n\n output_type = Overlay\n\n levels = param.ClassSelector(default=10, class_=(list, int), doc=\"\"\"\n A list of scalar values used to specify the contour levels.\"\"\")\n\n group = param.String(default='Level', doc=\"\"\"\n The group assigned to the output contours.\"\"\")\n\n filled = param.Boolean(default=False, doc=\"\"\"\n Whether to generate filled contours\"\"\")\n\n overlaid = param.Boolean(default=False, doc=\"\"\"\n Whether to overlay the contour on the supplied Element.\"\"\")\n\n _per_element = True\n\n def _process(self, element, key=None):\n try:\n from matplotlib.contour import QuadContourSet\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n from matplotlib.dates import num2date, date2num\n except ImportError:\n raise ImportError(\"contours operation requires matplotlib.\")\n extent = element.range(0) + element.range(1)[::-1]\n\n xs = element.dimension_values(0, True, flat=False)\n ys = element.dimension_values(1, True, flat=False)\n zs = element.dimension_values(2, flat=False)\n\n # Ensure that coordinate arrays specify bin centers\n if xs.shape[0] != zs.shape[0]:\n xs = xs[:-1] + np.diff(xs, axis=0)/2.\n if xs.shape[1] != zs.shape[1]:\n xs = xs[:, :-1] + (np.diff(xs, axis=1)/2.)\n if ys.shape[0] != zs.shape[0]:\n ys = ys[:-1] + np.diff(ys, axis=0)/2.\n if ys.shape[1] != zs.shape[1]:\n ys = ys[:, :-1] + (np.diff(ys, axis=1)/2.)\n data = (xs, ys, zs)\n\n # if any data is a datetime, transform to matplotlib's numerical format\n data_is_datetime = tuple(isdatetime(arr) for k, arr in enumerate(data))\n if any(data_is_datetime):\n data = tuple(\n date2num(d) if is_datetime else d\n for d, is_datetime in zip(data, data_is_datetime)\n )\n\n xdim, ydim = element.dimensions('key', label=True)\n if self.p.filled:\n contour_type = Polygons\n else:\n contour_type = Contours\n vdims = element.vdims[:1]\n\n kwargs = {}\n levels = self.p.levels\n zmin, zmax = element.range(2)\n if isinstance(self.p.levels, int):\n if zmin == zmax:\n contours = contour_type([], [xdim, ydim], vdims)\n return (element * contours) if self.p.overlaid else contours\n data += (levels,)\n else:\n kwargs = {'levels': levels}\n\n fig = Figure()\n ax = Axes(fig, [0, 0, 1, 1])\n contour_set = QuadContourSet(ax, *data, filled=self.p.filled,\n extent=extent, **kwargs)\n levels = np.array(contour_set.get_array())\n crange = levels.min(), levels.max()\n if self.p.filled:\n levels = levels[:-1] + np.diff(levels)/2.\n vdims = [vdims[0].clone(range=crange)]\n\n paths = []\n empty = np.array([[np.nan, np.nan]])\n for level, cset in zip(levels, contour_set.collections):\n exteriors = []\n interiors = []\n for geom in cset.get_paths():\n interior = []\n polys = geom.to_polygons(closed_only=False)\n for ncp, cp in enumerate(polys):\n if any(data_is_datetime[0:2]):\n # transform x/y coordinates back to datetimes\n xs, ys = np.split(cp, 2, axis=1)\n if data_is_datetime[0]:\n xs = np.array(num2date(xs))\n if data_is_datetime[1]:\n ys = np.array(num2date(ys))\n cp = np.concatenate((xs, ys), axis=1)\n if ncp == 0:\n exteriors.append(cp)\n exteriors.append(empty)\n else:\n interior.append(cp)\n if len(polys):\n interiors.append(interior)\n if not exteriors:\n continue\n geom = {\n element.vdims[0].name:\n num2date(level) if data_is_datetime[2] else level,\n (xdim, ydim): np.concatenate(exteriors[:-1])\n }\n if self.p.filled and interiors:\n geom['holes'] = interiors\n paths.append(geom)\n contours = contour_type(paths, label=element.label, kdims=element.kdims, vdims=vdims)\n if self.p.overlaid:\n contours = element * contours\n return contours\n\n\nclass histogram(Operation):\n \"\"\"\n Returns a Histogram of the input element data, binned into\n num_bins over the bin_range (if specified) along the specified\n dimension.\n \"\"\"\n\n bin_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n Specifies the range within which to compute the bins.\"\"\")\n\n bins = param.ClassSelector(default=None, class_=(np.ndarray, list, tuple, str), doc=\"\"\"\n An explicit set of bin edges or a method to find the optimal\n set of bin edges, e.g. 'auto', 'fd', 'scott' etc. For more\n documentation on these approaches see the np.histogram_bin_edges\n documentation.\"\"\")\n\n cumulative = param.Boolean(default=False, doc=\"\"\"\n Whether to compute the cumulative histogram\"\"\")\n\n dimension = param.String(default=None, doc=\"\"\"\n Along which dimension of the Element to compute the histogram.\"\"\")\n\n frequency_label = param.String(default=None, doc=\"\"\"\n Format string defining the label of the frequency dimension of the Histogram.\"\"\")\n\n groupby = param.ClassSelector(default=None, class_=(str, Dimension), doc=\"\"\"\n Defines a dimension to group the Histogram returning an NdOverlay of Histograms.\"\"\")\n\n log = param.Boolean(default=False, doc=\"\"\"\n Whether to use base 10 logarithmic samples for the bin edges.\"\"\")\n\n mean_weighted = param.Boolean(default=False, doc=\"\"\"\n Whether the weighted frequencies are averaged.\"\"\")\n\n normed = param.ObjectSelector(default=False,\n objects=[True, False, 'integral', 'height'],\n doc=\"\"\"\n Controls normalization behavior. If `True` or `'integral'`, then\n `density=True` is passed to np.histogram, and the distribution\n is normalized such that the integral is unity. If `False`,\n then the frequencies will be raw counts. If `'height'`, then the\n frequencies are normalized such that the max bin height is unity.\"\"\")\n\n nonzero = param.Boolean(default=False, doc=\"\"\"\n Whether to use only nonzero values when computing the histogram\"\"\")\n\n num_bins = param.Integer(default=20, doc=\"\"\"\n Number of bins in the histogram .\"\"\")\n\n weight_dimension = param.String(default=None, doc=\"\"\"\n Name of the dimension the weighting should be drawn from\"\"\")\n\n style_prefix = param.String(default=None, allow_None=None, doc=\"\"\"\n Used for setting a common style for histograms in a HoloMap or AdjointLayout.\"\"\")\n\n def _process(self, element, key=None):\n if self.p.groupby:\n if not isinstance(element, Dataset):\n raise ValueError('Cannot use histogram groupby on non-Dataset Element')\n grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)\n self.p.groupby = None\n return grouped.map(self._process, Dataset)\n\n normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed\n if self.p.dimension:\n selected_dim = self.p.dimension\n else:\n selected_dim = [d.name for d in element.vdims + element.kdims][0]\n dim = element.get_dimension(selected_dim)\n\n if hasattr(element, 'interface'):\n data = element.interface.values(element, selected_dim, compute=False)\n else:\n data = element.dimension_values(selected_dim)\n\n is_datetime = isdatetime(data)\n if is_datetime:\n data = data.astype('datetime64[ns]').astype('int64')\n\n # Handle different datatypes\n is_finite = isfinite\n is_cupy = is_cupy_array(data)\n if is_cupy:\n import cupy\n full_cupy_support = LooseVersion(cupy.__version__) > LooseVersion('8.0')\n if not full_cupy_support and (normed or self.p.weight_dimension):\n data = cupy.asnumpy(data)\n is_cupy = False\n else:\n is_finite = cupy.isfinite\n\n # Mask data\n if is_ibis_expr(data):\n mask = data.notnull()\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data.to_projection()\n data = data[mask]\n no_data = not len(data.head(1).execute())\n data = data[dim.name]\n else:\n mask = is_finite(data)\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data[mask]\n da = dask_array_module()\n no_data = False if da and isinstance(data, da.Array) else not len(data)\n\n # Compute weights\n if self.p.weight_dimension:\n if hasattr(element, 'interface'):\n weights = element.interface.values(element, self.p.weight_dimension, compute=False)\n else:\n weights = element.dimension_values(self.p.weight_dimension)\n weights = weights[mask]\n else:\n weights = None\n\n # Compute bins\n if isinstance(self.p.bins, str):\n bin_data = cupy.asnumpy(data) if is_cupy else data\n edges = np.histogram_bin_edges(bin_data, bins=self.p.bins)\n elif isinstance(self.p.bins, (list, np.ndarray)):\n edges = self.p.bins\n if isdatetime(edges):\n edges = edges.astype('datetime64[ns]').astype('int64')\n else:\n hist_range = self.p.bin_range or element.range(selected_dim)\n # Suppress a warning emitted by Numpy when datetime or timedelta scalars\n # are compared. See https://github.com/numpy/numpy/issues/10095 and\n # https://github.com/numpy/numpy/issues/9210. \n with warnings.catch_warnings():\n warnings.filterwarnings(\n action='ignore', message='elementwise comparison failed',\n category=DeprecationWarning\n )\n null_hist_range = hist_range == (0, 0)\n # Avoids range issues including zero bin range and empty bins\n if null_hist_range or any(not isfinite(r) for r in hist_range):\n hist_range = (0, 1)\n steps = self.p.num_bins + 1\n start, end = hist_range\n if is_datetime:\n start, end = dt_to_int(start, 'ns'), dt_to_int(end, 'ns')\n if self.p.log:\n bin_min = max([abs(start), data[data>0].min()])\n edges = np.logspace(np.log10(bin_min), np.log10(end), steps)\n else:\n edges = np.linspace(start, end, steps)\n if is_cupy:\n edges = cupy.asarray(edges)\n\n if not is_dask_array(data) and no_data:\n nbins = self.p.num_bins if self.p.bins is None else len(self.p.bins)-1\n hist = np.zeros(nbins)\n elif hasattr(element, 'interface'):\n density = True if normed else False\n hist, edges = element.interface.histogram(\n data, edges, density=density, weights=weights\n )\n if normed == 'height':\n hist /= hist.max()\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = element.interface.histogram(\n data, density=False, bins=edges\n )\n hist /= hist_mean\n elif normed:\n # This covers True, 'height', 'integral'\n hist, edges = np.histogram(data, density=True,\n weights=weights, bins=edges)\n if normed == 'height':\n hist /= hist.max()\n else:\n hist, edges = np.histogram(data, normed=normed, weights=weights, bins=edges)\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = np.histogram(data, density=False, bins=self.p.num_bins)\n hist /= hist_mean\n\n hist[np.isnan(hist)] = 0\n if is_datetime:\n edges = (edges/1e3).astype('datetime64[us]')\n\n params = {}\n if self.p.weight_dimension:\n params['vdims'] = [element.get_dimension(self.p.weight_dimension)]\n elif self.p.frequency_label:\n label = self.p.frequency_label.format(dim=dim.pprint_label)\n params['vdims'] = [Dimension('Frequency', label=label)]\n else:\n label = 'Frequency' if normed else 'Count'\n params['vdims'] = [Dimension('{0}_{1}'.format(dim.name, label.lower()),\n label=label)]\n\n if element.group != element.__class__.__name__:\n params['group'] = element.group\n\n if self.p.cumulative:\n hist = np.cumsum(hist)\n if self.p.normed in (True, 'integral'):\n hist *= edges[1]-edges[0]\n\n # Save off the computed bin edges so that if this operation instance\n # is used to compute another histogram, it will default to the same\n # bin edges.\n self.bins = list(edges)\n return Histogram((edges, hist), kdims=[element.get_dimension(selected_dim)],\n label=element.label, **params)\n\n\nclass decimate(Operation):\n \"\"\"\n Decimates any column based Element to a specified number of random\n rows if the current element defined by the x_range and y_range\n contains more than max_samples. By default the operation returns a\n DynamicMap with a RangeXY stream allowing dynamic downsampling.\n \"\"\"\n\n dynamic = param.Boolean(default=True, doc=\"\"\"\n Enables dynamic processing by default.\"\"\")\n\n link_inputs = param.Boolean(default=True, doc=\"\"\"\n By default, the link_inputs parameter is set to True so that\n when applying shade, backends that support linked streams\n update RangeXY streams on the inputs of the shade operation.\"\"\")\n\n max_samples = param.Integer(default=5000, doc=\"\"\"\n Maximum number of samples to display at the same time.\"\"\")\n\n random_seed = param.Integer(default=42, doc=\"\"\"\n Seed used to initialize randomization.\"\"\")\n\n streams = param.ClassSelector(default=[RangeXY], class_=(dict, list),\n doc=\"\"\"\n List of streams that are applied if dynamic=True, allowing\n for dynamic interaction with the plot.\"\"\")\n\n x_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max x-value. Auto-ranges\n if set to None.\"\"\")\n\n y_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max y-value. Auto-ranges\n if set to None.\"\"\")\n\n _per_element = True\n\n def _process_layer(self, element, key=None):\n if not isinstance(element, Dataset):\n raise ValueError(\"Cannot downsample non-Dataset types.\")\n if element.interface not in column_interfaces:\n element = element.clone(tuple(element.columns().values()))\n\n xstart, xend = self.p.x_range if self.p.x_range else element.range(0)\n ystart, yend = self.p.y_range if self.p.y_range else element.range(1)\n\n # Slice element to current ranges\n xdim, ydim = element.dimensions(label=True)[0:2]\n sliced = element.select(**{xdim: (xstart, xend),\n ydim: (ystart, yend)})\n\n if len(sliced) > self.p.max_samples:\n prng = np.random.RandomState(self.p.random_seed)\n return sliced.iloc[prng.choice(len(sliced), self.p.max_samples, False)]\n return sliced\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\nclass interpolate_curve(Operation):\n \"\"\"\n Resamples a Curve using the defined interpolation method, e.g.\n to represent changes in y-values as steps.\n \"\"\"\n\n interpolation = param.ObjectSelector(objects=['steps-pre', 'steps-mid',\n 'steps-post', 'linear'],\n default='steps-mid', doc=\"\"\"\n Controls the transition point of the step along the x-axis.\"\"\")\n\n _per_element = True\n\n @classmethod\n def pts_to_prestep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[0:-2:2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[2::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_midstep(cls, x, values):\n steps = np.zeros(2 * len(x))\n value_steps = tuple(np.empty(2 * len(x), dtype=v.dtype) for v in values)\n\n steps[1:-1:2] = steps[2::2] = x[:-1] + (x[1:] - x[:-1])/2\n steps[0], steps[-1] = x[0], x[-1]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_poststep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[2::2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0:-2:2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n def _process_layer(self, element, key=None):\n INTERPOLATE_FUNCS = {'steps-pre': self.pts_to_prestep,\n 'steps-mid': self.pts_to_midstep,\n 'steps-post': self.pts_to_poststep}\n if self.p.interpolation not in INTERPOLATE_FUNCS:\n return element\n x = element.dimension_values(0)\n is_datetime = isdatetime(x)\n if is_datetime:\n dt_type = 'datetime64[ns]'\n x = x.astype(dt_type)\n dvals = tuple(element.dimension_values(d) for d in element.dimensions()[1:])\n xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x, dvals)\n if is_datetime:\n xs = xs.astype(dt_type)\n return element.clone((xs,)+dvals)\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\n#==================#\n# Other operations #\n#==================#\n\n\nclass collapse(Operation):\n \"\"\"\n Given an overlay of Element types, collapse into single Element\n object using supplied function. Collapsing aggregates over the\n key dimensions of each object applying the supplied fn to each group.\n\n This is an example of an Operation that does not involve\n any Raster types.\n \"\"\"\n\n fn = param.Callable(default=np.mean, doc=\"\"\"\n The function that is used to collapse the curve y-values for\n each x-value.\"\"\")\n\n def _process(self, overlay, key=None):\n if isinstance(overlay, NdOverlay):\n collapse_map = HoloMap(overlay)\n else:\n collapse_map = HoloMap({i: el for i, el in enumerate(overlay)})\n return collapse_map.collapse(function=self.p.fn)\n\n\nclass gridmatrix(param.ParameterizedFunction):\n \"\"\"\n The gridmatrix operation takes an Element or HoloMap\n of Elements as input and creates a GridMatrix object,\n which plots each dimension in the Element against\n each other dimension. This provides a very useful\n overview of high-dimensional data and is inspired\n by pandas and seaborn scatter_matrix implementations.\n \"\"\"\n\n chart_type = param.Parameter(default=Scatter, doc=\"\"\"\n The Element type used to display bivariate distributions\n of the data.\"\"\")\n\n diagonal_type = param.Parameter(default=None, doc=\"\"\"\n The Element type along the diagonal, may be a Histogram or any\n other plot type which can visualize a univariate distribution.\n This parameter overrides diagonal_operation.\"\"\")\n\n diagonal_operation = param.Parameter(default=histogram, doc=\"\"\"\n The operation applied along the diagonal, may be a histogram-operation\n or any other function which returns a viewable element.\"\"\")\n\n overlay_dims = param.List(default=[], doc=\"\"\"\n If a HoloMap is supplied, this will allow overlaying one or\n more of its key dimensions.\"\"\")\n\n def __call__(self, data, **params):\n p = param.ParamOverrides(self, params)\n\n if isinstance(data, (HoloMap, NdOverlay)):\n ranges = {d.name: data.range(d) for d in data.dimensions()}\n data = data.clone({k: GridMatrix(self._process(p, v, ranges))\n for k, v in data.items()})\n data = Collator(data, merge_type=type(data))()\n if p.overlay_dims:\n data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,))\n return data\n elif isinstance(data, Element):\n data = self._process(p, data)\n return GridMatrix(data)\n\n\n def _process(self, p, element, ranges={}):\n # Creates a unified Dataset.data attribute\n # to draw the data from\n if isinstance(element.data, np.ndarray):\n el_data = element.table(default_datatype)\n else:\n el_data = element.data\n\n # Get dimensions to plot against each other\n types = (str, np.str_, np.object_)+datetime_types\n dims = [d for d in element.dimensions()\n if _is_number(element.range(d)[0]) and\n not issubclass(element.get_dimension_type(d), types)]\n permuted_dims = [(d1, d2) for d1 in dims\n for d2 in dims[::-1]]\n\n # Convert Histogram type to operation to avoid one case in the if below.\n if p.diagonal_type is Histogram:\n p.diagonal_type = None\n p.diagonal_operation = histogram\n\n data = {}\n for d1, d2 in permuted_dims:\n if d1 == d2:\n if p.diagonal_type is not None:\n if p.diagonal_type._auto_indexable_1d:\n el = p.diagonal_type(el_data, kdims=[d1], vdims=[d2],\n datatype=[default_datatype])\n else:\n values = element.dimension_values(d1)\n el = p.diagonal_type(values, kdims=[d1])\n elif p.diagonal_operation is None:\n continue\n elif p.diagonal_operation is histogram or isinstance(p.diagonal_operation, histogram):\n bin_range = ranges.get(d1.name, element.range(d1))\n el = p.diagonal_operation(element, dimension=d1.name, bin_range=bin_range)\n else:\n el = p.diagonal_operation(element, dimension=d1.name)\n else:\n kdims, vdims = ([d1, d2], []) if len(p.chart_type.kdims) == 2 else (d1, d2)\n el = p.chart_type(el_data, kdims=kdims, vdims=vdims,\n datatype=[default_datatype])\n data[(d1.name, d2.name)] = el\n return data\n", "path": "holoviews/operation/element.py"}], "after_files": [{"content": "\"\"\"\nCollection of either extremely generic or simple Operation\nexamples.\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport param\n\nfrom param import _is_number\n\nfrom ..core import (Operation, NdOverlay, Overlay, GridMatrix,\n HoloMap, Dataset, Element, Collator, Dimension)\nfrom ..core.data import ArrayInterface, DictInterface, default_datatype\nfrom ..core.data.util import dask_array_module\nfrom ..core.util import (\n LooseVersion, group_sanitizer, label_sanitizer, pd, datetime_types, isfinite,\n dt_to_int, isdatetime, is_dask_array, is_cupy_array, is_ibis_expr\n)\nfrom ..element.chart import Histogram, Scatter\nfrom ..element.raster import Image, RGB\nfrom ..element.path import Contours, Polygons\nfrom ..element.util import categorical_aggregate2d # noqa (API import)\nfrom ..streams import RangeXY\n\ncolumn_interfaces = [ArrayInterface, DictInterface]\nif pd:\n from ..core.data import PandasInterface\n column_interfaces.append(PandasInterface)\n\n\ndef identity(x,k): return x\n\nclass operation(Operation):\n \"\"\"\n The most generic operation that wraps any callable into an\n Operation. The callable needs to accept an HoloViews\n component and a key (that may be ignored) and must return a new\n HoloViews component.\n\n This class may be useful for turning a HoloViews method into an\n operation to define as compositor operation. For instance, the\n following definition:\n\n operation.instance(op=lambda x, k: x.collapse(np.subtract))\n\n Could be used to implement a collapse operation to subtracts the\n data between Rasters in an Overlay.\n \"\"\"\n\n output_type = param.Parameter(None, doc=\"\"\"\n The output element type which may be None to disable type\n checking.\n\n May be used to declare useful information to other code in\n HoloViews, e.g. required for tab-completion support of operations\n registered with compositors.\"\"\")\n\n group = param.String(default='Operation', doc=\"\"\"\n The group assigned to the result after having applied the\n operator.\"\"\")\n\n op = param.Callable(default=identity, doc=\"\"\"\n The operation used to generate a new HoloViews object returned\n by the operation. By default, the identity operation is\n applied.\"\"\")\n\n def _process(self, view, key=None):\n retval = self.p.op(view, key)\n if (self.p.output_type is not None):\n assert isinstance(retval, self.p.output_type), \\\n \"Return value does not match the declared output type.\"\n return retval.relabel(group=self.p.group)\n\n\nclass factory(Operation):\n \"\"\"\n Simple operation that constructs any element that accepts some\n other element as input. For instance, RGB and HSV elements can be\n created from overlays of Image elements.\n \"\"\"\n\n output_type = param.Parameter(RGB, doc=\"\"\"\n The output type of the factor operation.\n\n By default, if three overlaid Images elements are supplied,\n the corresponding RGB element will be returned. \"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the factory\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the factory\"\"\")\n\n def _process(self, view, key=None):\n return self.p.output_type(view, *self.p.args, **self.p.kwargs)\n\n\nclass function(Operation):\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n fn = param.Callable(default=lambda el, *args, **kwargs: el, doc=\"\"\"\n The function to apply.\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n return self.p.fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass method(Operation):\n \"\"\"\n Operation that wraps a method call\n \"\"\"\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n method_name = param.String(default='__call__', doc=\"\"\"\n The method name\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n fn = getattr(self.p.input_type, self.p.method_name)\n return fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass apply_when(param.ParameterizedFunction):\n \"\"\"\n Applies a selection depending on the current zoom range. If the\n supplied predicate function returns a True it will apply the\n operation otherwise it will return the raw element after the\n selection. For example the following will apply datashading if\n the number of points in the current viewport exceed 1000 otherwise\n just returning the selected points element:\n\n apply_when(points, operation=datashade, predicate=lambda x: x > 1000)\n \"\"\"\n\n operation = param.Callable(default=lambda x: x)\n\n predicate = param.Callable(default=None)\n\n def _apply(self, element, x_range, y_range, invert=False):\n selected = element\n if x_range is not None and y_range is not None:\n selected = element[x_range, y_range]\n condition = self.predicate(selected)\n if (not invert and condition) or (invert and not condition):\n return selected\n elif selected.interface.gridded:\n return selected.clone([])\n else:\n return selected.iloc[:0]\n\n def __call__(self, obj, **params):\n if 'streams' in params:\n streams = params.pop('streams')\n else:\n streams = [RangeXY()]\n self.param.set_param(**params)\n if not self.predicate:\n raise ValueError(\n 'Must provide a predicate function to determine when '\n 'to apply the operation and when to return the selected '\n 'data.'\n )\n applied = self.operation(obj.apply(self._apply, streams=streams))\n raw = obj.apply(self._apply, streams=streams, invert=True)\n return applied * raw\n\n\nclass chain(Operation):\n \"\"\"\n Defining an Operation chain is an easy way to define a new\n Operation from a series of existing ones. The argument is a\n list of Operation (or Operation instances) that are\n called in sequence to generate the returned element.\n\n chain(operations=[gradient, threshold.instance(level=2)])\n\n This operation can accept an Image instance and would first\n compute the gradient before thresholding the result at a level of\n 2.0.\n\n Instances are only required when arguments need to be passed to\n individual operations so the resulting object is a function over a\n single argument.\n \"\"\"\n\n output_type = param.Parameter(Image, doc=\"\"\"\n The output type of the chain operation. Must be supplied if\n the chain is to be used as a channel operation.\"\"\")\n\n group = param.String(default='', doc=\"\"\"\n The group assigned to the result after having applied the chain.\n Defaults to the group produced by the last operation in the chain\"\"\")\n\n operations = param.List(default=[], class_=Operation, doc=\"\"\"\n A list of Operations (or Operation instances)\n that are applied on the input from left to right.\"\"\")\n\n def _process(self, view, key=None):\n processed = view\n for i, operation in enumerate(self.p.operations):\n processed = operation.process_element(\n processed, key, input_ranges=self.p.input_ranges\n )\n\n if not self.p.group:\n return processed\n else:\n return processed.clone(group=self.p.group)\n\n def find(self, operation, skip_nonlinked=True):\n \"\"\"\n Returns the first found occurrence of an operation while\n performing a backward traversal of the chain pipeline.\n \"\"\"\n found = None\n for op in self.operations[::-1]:\n if isinstance(op, operation):\n found = op\n break\n if not op.link_inputs and skip_nonlinked:\n break\n return found\n\n\nclass transform(Operation):\n \"\"\"\n Generic Operation to transform an input Image or RGBA\n element into an output Image. The transformation is defined by\n the supplied callable that accepts the data of the input Image\n (typically a numpy array) and returns the transformed data of the\n output Image.\n\n This operator is extremely versatile; for instance, you could\n implement an alternative to the explicit threshold operator with:\n\n operator=lambda x: np.clip(x, 0, 0.5)\n\n Alternatively, you can implement a transform computing the 2D\n autocorrelation using the scipy library with:\n\n operator=lambda x: scipy.signal.correlate2d(x, x)\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the result after applying the\n transform.\"\"\")\n\n operator = param.Callable(doc=\"\"\"\n Function of one argument that transforms the data in the input\n Image to the data in the output Image. By default, acts as\n the identity function such that the output matches the input.\"\"\")\n\n def _process(self, img, key=None):\n processed = (img.data if not self.p.operator\n else self.p.operator(img.data))\n return img.clone(processed, group=self.p.group)\n\n\nclass image_overlay(Operation):\n \"\"\"\n Operation to build a overlay of images to a specification from a\n subset of the required elements.\n\n This is useful for reordering the elements of an overlay,\n duplicating layers of an overlay or creating blank image elements\n in the appropriate positions.\n\n For instance, image_overlay may build a three layered input\n suitable for the RGB factory operation even if supplied with one\n or two of the required channels (creating blank channels for the\n missing elements).\n\n Note that if there is any ambiguity regarding the match, the\n strongest match will be used. In the case of a tie in match\n strength, the first layer in the input is used. One successful\n match is always required.\n \"\"\"\n\n output_type = Overlay\n\n spec = param.String(doc=\"\"\"\n Specification of the output Overlay structure. For instance:\n\n Image.R * Image.G * Image.B\n\n Will ensure an overlay of this structure is created even if\n (for instance) only (Image.R * Image.B) is supplied.\n\n Elements in the input overlay that match are placed in the\n appropriate positions and unavailable specification elements\n are created with the specified fill group.\"\"\")\n\n fill = param.Number(default=0)\n\n default_range = param.Tuple(default=(0,1), doc=\"\"\"\n The default range that will be set on the value_dimension of\n any automatically created blank image elements.\"\"\")\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the resulting overlay.\"\"\")\n\n\n @classmethod\n def _match(cls, el, spec):\n \"Return the strength of the match (None if no match)\"\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n if not isinstance(el, Image) or spec_dict['type'] != 'Image':\n raise NotImplementedError(\"Only Image currently supported\")\n\n sanitizers = {'group':group_sanitizer, 'label':label_sanitizer}\n strength = 1\n for key in ['group', 'label']:\n attr_value = sanitizers[key](getattr(el, key))\n if key in spec_dict:\n if spec_dict[key] != attr_value: return None\n strength += 1\n return strength\n\n\n def _match_overlay(self, raster, overlay_spec):\n \"\"\"\n Given a raster or input overlay, generate a list of matched\n elements (None if no match) and corresponding tuple of match\n strength values.\n \"\"\"\n ordering = [None]*len(overlay_spec) # Elements to overlay\n strengths = [0]*len(overlay_spec) # Match strengths\n\n elements = raster.values() if isinstance(raster, Overlay) else [raster]\n\n for el in elements:\n for pos in range(len(overlay_spec)):\n strength = self._match(el, overlay_spec[pos])\n if strength is None: continue # No match\n elif (strength <= strengths[pos]): continue # Weaker match\n else: # Stronger match\n ordering[pos] = el\n strengths[pos] = strength\n return ordering, strengths\n\n\n def _process(self, raster, key=None):\n specs = tuple(el.strip() for el in self.p.spec.split('*'))\n ordering, strengths = self._match_overlay(raster, specs)\n if all(el is None for el in ordering):\n raise Exception(\"The image_overlay operation requires at least one match\")\n\n completed = []\n strongest = ordering[np.argmax(strengths)]\n for el, spec in zip(ordering, specs):\n if el is None:\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n el = Image(np.ones(strongest.data.shape) * self.p.fill,\n group=spec_dict.get('group','Image'),\n label=spec_dict.get('label',''))\n el.vdims[0].range = self.p.default_range\n completed.append(el)\n return np.prod(completed)\n\n\n\nclass threshold(Operation):\n \"\"\"\n Threshold a given Image whereby all values higher than a given\n level map to the specified high value and all values lower than\n that level map to the specified low value.\n \"\"\"\n output_type = Image\n\n level = param.Number(default=0.5, doc=\"\"\"\n The value at which the threshold is applied. Values lower than\n the threshold map to the 'low' value and values above map to\n the 'high' value.\"\"\")\n\n high = param.Number(default=1.0, doc=\"\"\"\n The value given to elements greater than (or equal to) the\n threshold.\"\"\")\n\n low = param.Number(default=0.0, doc=\"\"\"\n The value given to elements below the threshold.\"\"\")\n\n group = param.String(default='Threshold', doc=\"\"\"\n The group assigned to the thresholded output.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if not isinstance(matrix, Image):\n raise TypeError(\"The threshold operation requires a Image as input.\")\n\n arr = matrix.data\n high = np.ones(arr.shape) * self.p.high\n low = np.ones(arr.shape) * self.p.low\n thresholded = np.where(arr > self.p.level, high, low)\n\n return matrix.clone(thresholded, group=self.p.group)\n\n\n\nclass gradient(Operation):\n \"\"\"\n Compute the gradient plot of the supplied Image.\n\n If the Image value dimension is cyclic, the smallest step is taken\n considered the cyclic range\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Gradient', doc=\"\"\"\n The group assigned to the output gradient matrix.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if len(matrix.vdims) != 1:\n raise ValueError(\"Input matrix to gradient operation must \"\n \"have single value dimension.\")\n\n matrix_dim = matrix.vdims[0]\n\n data = np.flipud(matrix.dimension_values(matrix_dim, flat=False))\n r, c = data.shape\n\n if matrix_dim.cyclic and (None in matrix_dim.range):\n raise Exception(\"Cyclic range must be specified to compute \"\n \"the gradient of cyclic quantities\")\n cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)\n if cyclic_range is not None:\n # shift values such that wrapping works ok\n data = data - matrix_dim.range[0]\n\n dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]\n dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]\n\n if cyclic_range is not None: # Wrap into the specified range\n # Convert negative differences to an equivalent positive value\n dx = dx % cyclic_range\n dy = dy % cyclic_range\n #\n # Prefer small jumps\n dx_negatives = dx - cyclic_range\n dy_negatives = dy - cyclic_range\n dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)\n dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)\n\n return Image(np.sqrt(dx * dx + dy * dy), bounds=matrix.bounds, group=self.p.group)\n\n\n\nclass convolve(Operation):\n \"\"\"\n Apply a convolution to an overlay using the top layer as the\n kernel for convolving the bottom layer. Both Image elements in\n the input overlay should have a single value dimension.\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Convolution', doc=\"\"\"\n The group assigned to the convolved output.\"\"\")\n\n kernel_roi = param.NumericTuple(default=(0,0,0,0), length=4, doc=\"\"\"\n A 2-dimensional slice of the kernel layer to use in the\n convolution in lbrt (left, bottom, right, top) format. By\n default, no slicing is applied.\"\"\")\n\n _per_element = True\n\n def _process(self, overlay, key=None):\n if len(overlay) != 2:\n raise Exception(\"Overlay must contain at least to items.\")\n\n [target, kernel] = overlay.get(0), overlay.get(1)\n\n if len(target.vdims) != 1:\n raise Exception(\"Convolution requires inputs with single value dimensions.\")\n\n xslice = slice(self.p.kernel_roi[0], self.p.kernel_roi[2])\n yslice = slice(self.p.kernel_roi[1], self.p.kernel_roi[3])\n\n k = kernel.data if self.p.kernel_roi == (0,0,0,0) else kernel[xslice, yslice].data\n\n data = np.flipud(target.dimension_values(2, flat=False))\n fft1 = np.fft.fft2(data)\n fft2 = np.fft.fft2(k, s=data.shape)\n convolved_raw = np.fft.ifft2(fft1 * fft2).real\n\n k_rows, k_cols = k.shape\n rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)\n convolved = rolled / float(k.sum())\n\n return Image(convolved, bounds=target.bounds, group=self.p.group)\n\n\n\nclass contours(Operation):\n \"\"\"\n Given a Image with a single channel, annotate it with contour\n lines for a given set of contour levels.\n\n The return is an NdOverlay with a Contours layer for each given\n level, overlaid on top of the input Image.\n \"\"\"\n\n output_type = Overlay\n\n levels = param.ClassSelector(default=10, class_=(list, int), doc=\"\"\"\n A list of scalar values used to specify the contour levels.\"\"\")\n\n group = param.String(default='Level', doc=\"\"\"\n The group assigned to the output contours.\"\"\")\n\n filled = param.Boolean(default=False, doc=\"\"\"\n Whether to generate filled contours\"\"\")\n\n overlaid = param.Boolean(default=False, doc=\"\"\"\n Whether to overlay the contour on the supplied Element.\"\"\")\n\n _per_element = True\n\n def _process(self, element, key=None):\n try:\n from matplotlib.contour import QuadContourSet\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n from matplotlib.dates import num2date, date2num\n except ImportError:\n raise ImportError(\"contours operation requires matplotlib.\")\n extent = element.range(0) + element.range(1)[::-1]\n\n xs = element.dimension_values(0, True, flat=False)\n ys = element.dimension_values(1, True, flat=False)\n zs = element.dimension_values(2, flat=False)\n\n # Ensure that coordinate arrays specify bin centers\n if xs.shape[0] != zs.shape[0]:\n xs = xs[:-1] + np.diff(xs, axis=0)/2.\n if xs.shape[1] != zs.shape[1]:\n xs = xs[:, :-1] + (np.diff(xs, axis=1)/2.)\n if ys.shape[0] != zs.shape[0]:\n ys = ys[:-1] + np.diff(ys, axis=0)/2.\n if ys.shape[1] != zs.shape[1]:\n ys = ys[:, :-1] + (np.diff(ys, axis=1)/2.)\n data = (xs, ys, zs)\n\n # if any data is a datetime, transform to matplotlib's numerical format\n data_is_datetime = tuple(isdatetime(arr) for k, arr in enumerate(data))\n if any(data_is_datetime):\n data = tuple(\n date2num(d) if is_datetime else d\n for d, is_datetime in zip(data, data_is_datetime)\n )\n\n xdim, ydim = element.dimensions('key', label=True)\n if self.p.filled:\n contour_type = Polygons\n else:\n contour_type = Contours\n vdims = element.vdims[:1]\n\n kwargs = {}\n levels = self.p.levels\n zmin, zmax = element.range(2)\n if isinstance(self.p.levels, int):\n if zmin == zmax:\n contours = contour_type([], [xdim, ydim], vdims)\n return (element * contours) if self.p.overlaid else contours\n data += (levels,)\n else:\n kwargs = {'levels': levels}\n\n fig = Figure()\n ax = Axes(fig, [0, 0, 1, 1])\n contour_set = QuadContourSet(ax, *data, filled=self.p.filled,\n extent=extent, **kwargs)\n levels = np.array(contour_set.get_array())\n crange = levels.min(), levels.max()\n if self.p.filled:\n levels = levels[:-1] + np.diff(levels)/2.\n vdims = [vdims[0].clone(range=crange)]\n\n paths = []\n empty = np.array([[np.nan, np.nan]])\n for level, cset in zip(levels, contour_set.collections):\n exteriors = []\n interiors = []\n for geom in cset.get_paths():\n interior = []\n polys = geom.to_polygons(closed_only=False)\n for ncp, cp in enumerate(polys):\n if any(data_is_datetime[0:2]):\n # transform x/y coordinates back to datetimes\n xs, ys = np.split(cp, 2, axis=1)\n if data_is_datetime[0]:\n xs = np.array(num2date(xs))\n if data_is_datetime[1]:\n ys = np.array(num2date(ys))\n cp = np.concatenate((xs, ys), axis=1)\n if ncp == 0:\n exteriors.append(cp)\n exteriors.append(empty)\n else:\n interior.append(cp)\n if len(polys):\n interiors.append(interior)\n if not exteriors:\n continue\n geom = {\n element.vdims[0].name:\n num2date(level) if data_is_datetime[2] else level,\n (xdim, ydim): np.concatenate(exteriors[:-1])\n }\n if self.p.filled and interiors:\n geom['holes'] = interiors\n paths.append(geom)\n contours = contour_type(paths, label=element.label, kdims=element.kdims, vdims=vdims)\n if self.p.overlaid:\n contours = element * contours\n return contours\n\n\nclass histogram(Operation):\n \"\"\"\n Returns a Histogram of the input element data, binned into\n num_bins over the bin_range (if specified) along the specified\n dimension.\n \"\"\"\n\n bin_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n Specifies the range within which to compute the bins.\"\"\")\n\n bins = param.ClassSelector(default=None, class_=(np.ndarray, list, tuple, str), doc=\"\"\"\n An explicit set of bin edges or a method to find the optimal\n set of bin edges, e.g. 'auto', 'fd', 'scott' etc. For more\n documentation on these approaches see the np.histogram_bin_edges\n documentation.\"\"\")\n\n cumulative = param.Boolean(default=False, doc=\"\"\"\n Whether to compute the cumulative histogram\"\"\")\n\n dimension = param.String(default=None, doc=\"\"\"\n Along which dimension of the Element to compute the histogram.\"\"\")\n\n frequency_label = param.String(default=None, doc=\"\"\"\n Format string defining the label of the frequency dimension of the Histogram.\"\"\")\n\n groupby = param.ClassSelector(default=None, class_=(str, Dimension), doc=\"\"\"\n Defines a dimension to group the Histogram returning an NdOverlay of Histograms.\"\"\")\n\n log = param.Boolean(default=False, doc=\"\"\"\n Whether to use base 10 logarithmic samples for the bin edges.\"\"\")\n\n mean_weighted = param.Boolean(default=False, doc=\"\"\"\n Whether the weighted frequencies are averaged.\"\"\")\n\n normed = param.ObjectSelector(default=False,\n objects=[True, False, 'integral', 'height'],\n doc=\"\"\"\n Controls normalization behavior. If `True` or `'integral'`, then\n `density=True` is passed to np.histogram, and the distribution\n is normalized such that the integral is unity. If `False`,\n then the frequencies will be raw counts. If `'height'`, then the\n frequencies are normalized such that the max bin height is unity.\"\"\")\n\n nonzero = param.Boolean(default=False, doc=\"\"\"\n Whether to use only nonzero values when computing the histogram\"\"\")\n\n num_bins = param.Integer(default=20, doc=\"\"\"\n Number of bins in the histogram .\"\"\")\n\n weight_dimension = param.String(default=None, doc=\"\"\"\n Name of the dimension the weighting should be drawn from\"\"\")\n\n style_prefix = param.String(default=None, allow_None=None, doc=\"\"\"\n Used for setting a common style for histograms in a HoloMap or AdjointLayout.\"\"\")\n\n def _process(self, element, key=None):\n if self.p.groupby:\n if not isinstance(element, Dataset):\n raise ValueError('Cannot use histogram groupby on non-Dataset Element')\n grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)\n self.p.groupby = None\n return grouped.map(self._process, Dataset)\n\n normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed\n if self.p.dimension:\n selected_dim = self.p.dimension\n else:\n selected_dim = [d.name for d in element.vdims + element.kdims][0]\n dim = element.get_dimension(selected_dim)\n\n if hasattr(element, 'interface'):\n data = element.interface.values(element, selected_dim, compute=False)\n else:\n data = element.dimension_values(selected_dim)\n\n is_datetime = isdatetime(data)\n if is_datetime:\n data = data.astype('datetime64[ns]').astype('int64')\n\n # Handle different datatypes\n is_finite = isfinite\n is_cupy = is_cupy_array(data)\n if is_cupy:\n import cupy\n full_cupy_support = LooseVersion(cupy.__version__) > LooseVersion('8.0')\n if not full_cupy_support and (normed or self.p.weight_dimension):\n data = cupy.asnumpy(data)\n is_cupy = False\n else:\n is_finite = cupy.isfinite\n\n # Mask data\n if is_ibis_expr(data):\n mask = data.notnull()\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data.to_projection()\n data = data[mask]\n no_data = not len(data.head(1).execute())\n data = data[dim.name]\n else:\n mask = is_finite(data)\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data[mask]\n da = dask_array_module()\n no_data = False if da and isinstance(data, da.Array) else not len(data)\n\n # Compute weights\n if self.p.weight_dimension:\n if hasattr(element, 'interface'):\n weights = element.interface.values(element, self.p.weight_dimension, compute=False)\n else:\n weights = element.dimension_values(self.p.weight_dimension)\n weights = weights[mask]\n else:\n weights = None\n\n # Compute bins\n if isinstance(self.p.bins, str):\n bin_data = cupy.asnumpy(data) if is_cupy else data\n edges = np.histogram_bin_edges(bin_data, bins=self.p.bins)\n elif isinstance(self.p.bins, (list, np.ndarray)):\n edges = self.p.bins\n if isdatetime(edges):\n edges = edges.astype('datetime64[ns]').astype('int64')\n else:\n hist_range = self.p.bin_range or element.range(selected_dim)\n # Suppress a warning emitted by Numpy when datetime or timedelta scalars\n # are compared. See https://github.com/numpy/numpy/issues/10095 and\n # https://github.com/numpy/numpy/issues/9210. \n with warnings.catch_warnings():\n warnings.filterwarnings(\n action='ignore', message='elementwise comparison failed',\n category=DeprecationWarning\n )\n null_hist_range = hist_range == (0, 0)\n # Avoids range issues including zero bin range and empty bins\n if null_hist_range or any(not isfinite(r) for r in hist_range):\n hist_range = (0, 1)\n steps = self.p.num_bins + 1\n start, end = hist_range\n if is_datetime:\n start, end = dt_to_int(start, 'ns'), dt_to_int(end, 'ns')\n if self.p.log:\n bin_min = max([abs(start), data[data>0].min()])\n edges = np.logspace(np.log10(bin_min), np.log10(end), steps)\n else:\n edges = np.linspace(start, end, steps)\n if is_cupy:\n edges = cupy.asarray(edges)\n\n if not is_dask_array(data) and no_data:\n nbins = self.p.num_bins if self.p.bins is None else len(self.p.bins)-1\n hist = np.zeros(nbins)\n elif hasattr(element, 'interface'):\n density = True if normed else False\n hist, edges = element.interface.histogram(\n data, edges, density=density, weights=weights\n )\n if normed == 'height':\n hist /= hist.max()\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = element.interface.histogram(\n data, density=False, bins=edges\n )\n hist /= hist_mean\n elif normed:\n # This covers True, 'height', 'integral'\n hist, edges = np.histogram(data, density=True,\n weights=weights, bins=edges)\n if normed == 'height':\n hist /= hist.max()\n else:\n hist, edges = np.histogram(data, normed=normed, weights=weights, bins=edges)\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = np.histogram(data, density=False, bins=self.p.num_bins)\n hist /= hist_mean\n\n hist[np.isnan(hist)] = 0\n if is_datetime:\n edges = (edges/1e3).astype('datetime64[us]')\n\n params = {}\n if self.p.weight_dimension:\n params['vdims'] = [element.get_dimension(self.p.weight_dimension)]\n elif self.p.frequency_label:\n label = self.p.frequency_label.format(dim=dim.pprint_label)\n params['vdims'] = [Dimension('Frequency', label=label)]\n else:\n label = 'Frequency' if normed else 'Count'\n params['vdims'] = [Dimension('{0}_{1}'.format(dim.name, label.lower()),\n label=label)]\n\n if element.group != element.__class__.__name__:\n params['group'] = element.group\n\n if self.p.cumulative:\n hist = np.cumsum(hist)\n if self.p.normed in (True, 'integral'):\n hist *= edges[1]-edges[0]\n\n # Save off the computed bin edges so that if this operation instance\n # is used to compute another histogram, it will default to the same\n # bin edges.\n self.bins = list(edges)\n return Histogram((edges, hist), kdims=[element.get_dimension(selected_dim)],\n label=element.label, **params)\n\n\nclass decimate(Operation):\n \"\"\"\n Decimates any column based Element to a specified number of random\n rows if the current element defined by the x_range and y_range\n contains more than max_samples. By default the operation returns a\n DynamicMap with a RangeXY stream allowing dynamic downsampling.\n \"\"\"\n\n dynamic = param.Boolean(default=True, doc=\"\"\"\n Enables dynamic processing by default.\"\"\")\n\n link_inputs = param.Boolean(default=True, doc=\"\"\"\n By default, the link_inputs parameter is set to True so that\n when applying shade, backends that support linked streams\n update RangeXY streams on the inputs of the shade operation.\"\"\")\n\n max_samples = param.Integer(default=5000, doc=\"\"\"\n Maximum number of samples to display at the same time.\"\"\")\n\n random_seed = param.Integer(default=42, doc=\"\"\"\n Seed used to initialize randomization.\"\"\")\n\n streams = param.ClassSelector(default=[RangeXY], class_=(dict, list),\n doc=\"\"\"\n List of streams that are applied if dynamic=True, allowing\n for dynamic interaction with the plot.\"\"\")\n\n x_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max x-value. Auto-ranges\n if set to None.\"\"\")\n\n y_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max y-value. Auto-ranges\n if set to None.\"\"\")\n\n _per_element = True\n\n def _process_layer(self, element, key=None):\n if not isinstance(element, Dataset):\n raise ValueError(\"Cannot downsample non-Dataset types.\")\n if element.interface not in column_interfaces:\n element = element.clone(tuple(element.columns().values()))\n\n xstart, xend = self.p.x_range if self.p.x_range else element.range(0)\n ystart, yend = self.p.y_range if self.p.y_range else element.range(1)\n\n # Slice element to current ranges\n xdim, ydim = element.dimensions(label=True)[0:2]\n sliced = element.select(**{xdim: (xstart, xend),\n ydim: (ystart, yend)})\n\n if len(sliced) > self.p.max_samples:\n prng = np.random.RandomState(self.p.random_seed)\n choice = prng.choice(len(sliced), self.p.max_samples, False)\n return sliced.iloc[np.sort(choice)]\n return sliced\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\nclass interpolate_curve(Operation):\n \"\"\"\n Resamples a Curve using the defined interpolation method, e.g.\n to represent changes in y-values as steps.\n \"\"\"\n\n interpolation = param.ObjectSelector(objects=['steps-pre', 'steps-mid',\n 'steps-post', 'linear'],\n default='steps-mid', doc=\"\"\"\n Controls the transition point of the step along the x-axis.\"\"\")\n\n _per_element = True\n\n @classmethod\n def pts_to_prestep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[0:-2:2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[2::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_midstep(cls, x, values):\n steps = np.zeros(2 * len(x))\n value_steps = tuple(np.empty(2 * len(x), dtype=v.dtype) for v in values)\n\n steps[1:-1:2] = steps[2::2] = x[:-1] + (x[1:] - x[:-1])/2\n steps[0], steps[-1] = x[0], x[-1]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_poststep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[2::2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0:-2:2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n def _process_layer(self, element, key=None):\n INTERPOLATE_FUNCS = {'steps-pre': self.pts_to_prestep,\n 'steps-mid': self.pts_to_midstep,\n 'steps-post': self.pts_to_poststep}\n if self.p.interpolation not in INTERPOLATE_FUNCS:\n return element\n x = element.dimension_values(0)\n is_datetime = isdatetime(x)\n if is_datetime:\n dt_type = 'datetime64[ns]'\n x = x.astype(dt_type)\n dvals = tuple(element.dimension_values(d) for d in element.dimensions()[1:])\n xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x, dvals)\n if is_datetime:\n xs = xs.astype(dt_type)\n return element.clone((xs,)+dvals)\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\n#==================#\n# Other operations #\n#==================#\n\n\nclass collapse(Operation):\n \"\"\"\n Given an overlay of Element types, collapse into single Element\n object using supplied function. Collapsing aggregates over the\n key dimensions of each object applying the supplied fn to each group.\n\n This is an example of an Operation that does not involve\n any Raster types.\n \"\"\"\n\n fn = param.Callable(default=np.mean, doc=\"\"\"\n The function that is used to collapse the curve y-values for\n each x-value.\"\"\")\n\n def _process(self, overlay, key=None):\n if isinstance(overlay, NdOverlay):\n collapse_map = HoloMap(overlay)\n else:\n collapse_map = HoloMap({i: el for i, el in enumerate(overlay)})\n return collapse_map.collapse(function=self.p.fn)\n\n\nclass gridmatrix(param.ParameterizedFunction):\n \"\"\"\n The gridmatrix operation takes an Element or HoloMap\n of Elements as input and creates a GridMatrix object,\n which plots each dimension in the Element against\n each other dimension. This provides a very useful\n overview of high-dimensional data and is inspired\n by pandas and seaborn scatter_matrix implementations.\n \"\"\"\n\n chart_type = param.Parameter(default=Scatter, doc=\"\"\"\n The Element type used to display bivariate distributions\n of the data.\"\"\")\n\n diagonal_type = param.Parameter(default=None, doc=\"\"\"\n The Element type along the diagonal, may be a Histogram or any\n other plot type which can visualize a univariate distribution.\n This parameter overrides diagonal_operation.\"\"\")\n\n diagonal_operation = param.Parameter(default=histogram, doc=\"\"\"\n The operation applied along the diagonal, may be a histogram-operation\n or any other function which returns a viewable element.\"\"\")\n\n overlay_dims = param.List(default=[], doc=\"\"\"\n If a HoloMap is supplied, this will allow overlaying one or\n more of its key dimensions.\"\"\")\n\n def __call__(self, data, **params):\n p = param.ParamOverrides(self, params)\n\n if isinstance(data, (HoloMap, NdOverlay)):\n ranges = {d.name: data.range(d) for d in data.dimensions()}\n data = data.clone({k: GridMatrix(self._process(p, v, ranges))\n for k, v in data.items()})\n data = Collator(data, merge_type=type(data))()\n if p.overlay_dims:\n data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,))\n return data\n elif isinstance(data, Element):\n data = self._process(p, data)\n return GridMatrix(data)\n\n\n def _process(self, p, element, ranges={}):\n # Creates a unified Dataset.data attribute\n # to draw the data from\n if isinstance(element.data, np.ndarray):\n el_data = element.table(default_datatype)\n else:\n el_data = element.data\n\n # Get dimensions to plot against each other\n types = (str, np.str_, np.object_)+datetime_types\n dims = [d for d in element.dimensions()\n if _is_number(element.range(d)[0]) and\n not issubclass(element.get_dimension_type(d), types)]\n permuted_dims = [(d1, d2) for d1 in dims\n for d2 in dims[::-1]]\n\n # Convert Histogram type to operation to avoid one case in the if below.\n if p.diagonal_type is Histogram:\n p.diagonal_type = None\n p.diagonal_operation = histogram\n\n data = {}\n for d1, d2 in permuted_dims:\n if d1 == d2:\n if p.diagonal_type is not None:\n if p.diagonal_type._auto_indexable_1d:\n el = p.diagonal_type(el_data, kdims=[d1], vdims=[d2],\n datatype=[default_datatype])\n else:\n values = element.dimension_values(d1)\n el = p.diagonal_type(values, kdims=[d1])\n elif p.diagonal_operation is None:\n continue\n elif p.diagonal_operation is histogram or isinstance(p.diagonal_operation, histogram):\n bin_range = ranges.get(d1.name, element.range(d1))\n el = p.diagonal_operation(element, dimension=d1.name, bin_range=bin_range)\n else:\n el = p.diagonal_operation(element, dimension=d1.name)\n else:\n kdims, vdims = ([d1, d2], []) if len(p.chart_type.kdims) == 2 else (d1, d2)\n el = p.chart_type(el_data, kdims=kdims, vdims=vdims,\n datatype=[default_datatype])\n data[(d1.name, d2.name)] = el\n return data\n", "path": "holoviews/operation/element.py"}]} |
gh_patches_debug_1488 | rasdani/github-patches | git_diff | interlegis__sapl-979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Erro na função adjust_normajuridica_depois_salvar()
```
File "[...]/sapl/sapl/legacy/migration.py", line 636, in adjust_normajuridica_depois_salvar
new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))
ValueError: invalid literal for int() with base 10: ''
```
É preciso verificar porque esse erro está acontecendo. Aparentemente o script não está conseguindo retornar o valor da query `AssuntoNorma.objects.get(pk=pk_assunto)` porque pk_assunto é uma string vazia quando deveria ser um número. Pode ser por conta de alguma inconsistência na antiga relação de many to many do SAPL2.5, que era feito por uma string separada por vírgulas.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/legacy/migration.py`
Content:
```
1 import re
2 from datetime import date
3 from subprocess import PIPE, call
4
5 import pkg_resources
6 import reversion
7 import yaml
8 from django.apps import apps
9 from django.apps.config import AppConfig
10 from django.contrib.auth import get_user_model
11 from django.contrib.contenttypes.models import ContentType
12 from django.core.exceptions import ObjectDoesNotExist
13 from django.db import OperationalError, ProgrammingError, connections, models
14 from django.db.models import CharField, Max, ProtectedError, TextField
15 from django.db.models.base import ModelBase
16 from model_mommy import mommy
17 from model_mommy.mommy import foreign_key_required, make
18
19 from sapl.base.models import Autor, ProblemaMigracao
20 from sapl.comissoes.models import Comissao, Composicao, Participacao
21 from sapl.legacy.models import Protocolo as ProtocoloLegado
22 from sapl.materia.models import (Proposicao, StatusTramitacao, TipoDocumento,
23 TipoMateriaLegislativa, TipoProposicao,
24 Tramitacao)
25 from sapl.norma.models import (AssuntoNorma, NormaJuridica,
26 TipoVinculoNormaJuridica)
27 from sapl.parlamentares.models import Parlamentar
28 from sapl.protocoloadm.models import Protocolo, StatusTramitacaoAdministrativo
29 from sapl.sessao.models import ExpedienteMateria, OrdemDia, SessaoPlenaria
30 from sapl.settings import PROJECT_DIR
31 from sapl.utils import normalize
32
33 # BASE ######################################################################
34 # apps to be migrated, in app dependency order (very important)
35 appconfs = [apps.get_app_config(n) for n in [
36 'parlamentares',
37 'comissoes',
38 'base',
39 'materia',
40 'norma',
41 'sessao',
42 'lexml',
43 'protocoloadm', ]]
44
45 unique_constraints = []
46 one_to_one_constraints = []
47 primeira_vez = []
48
49 name_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]
50
51 # apps do not overlap
52 for s1 in name_sets:
53 for s2 in name_sets:
54 if s1 is not s2:
55 assert not s1.intersection(s2)
56
57 # apps include all legacy models
58 legacy_app = apps.get_app_config('legacy')
59 legacy_model_names = set(m.__name__ for m in legacy_app.get_models())
60
61 model_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}
62
63
64 # RENAMES ###################################################################
65
66 MODEL_RENAME_PATTERN = re.compile('(.+) \((.+)\)')
67
68
69 def get_renames():
70 field_renames = {}
71 model_renames = {}
72 for app in appconfs:
73 app_rename_data = yaml.load(
74 pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))
75 for model_name, renames in app_rename_data.items():
76 match = MODEL_RENAME_PATTERN.match(model_name)
77 if match:
78 model_name, old_name = match.groups()
79 else:
80 old_name = None
81 model = getattr(app.models_module, model_name)
82 if old_name:
83 model_renames[model] = old_name
84 field_renames[model] = renames
85
86 # collect renames from parent classes
87 for model, renames in field_renames.items():
88 if any(parent in field_renames for parent in model.__mro__[1:]):
89 renames = {}
90 for parent in reversed(model.__mro__):
91 if parent in field_renames:
92 renames.update(field_renames[parent])
93 field_renames[model] = renames
94
95 # remove abstract classes
96 field_renames = {m: r for m, r in field_renames.items()
97 if not m._meta.abstract}
98
99 return field_renames, model_renames
100
101 # MIGRATION #################################################################
102
103
104 def info(msg):
105 print('INFO: ' + msg)
106
107
108 def warn(msg):
109 print('CUIDADO! ' + msg)
110
111
112 def get_fk_related(field, value, label=None):
113 if value is None and field.null is False:
114 value = 0
115 if value is not None:
116 try:
117 value = field.related_model.objects.get(id=value)
118 except ObjectDoesNotExist:
119 msg = 'FK [%s] não encontrada para valor %s ' \
120 '(em %s %s)' % (
121 field.name, value,
122 field.model.__name__, label or '---')
123 if value == 0:
124 if not field.null:
125 fields_dict = get_fields_dict(field.related_model)
126 # Cria stub ao final da tabela para evitar erros
127 pk = 1
128 if hasattr(field.related_model.objects.last(), 'pk'):
129 pk = field.related_model.objects.last().pk
130 with reversion.create_revision():
131 reversion.set_comment('Stub criado pela migração')
132 value = mommy.make(
133 field.related_model, **fields_dict,
134 pk=(pk + 1 or 1))
135 descricao = 'stub criado para campos não nuláveis!'
136 save_relation(value, [field.name], msg, descricao,
137 eh_stub=True)
138 warn(msg + ' => ' + descricao)
139 else:
140 value = None
141 else:
142 if field.model._meta.label == 'sessao.RegistroVotacao' and \
143 field.name == 'ordem':
144 return value
145 # Caso TipoProposicao não exista, um objeto será criado então
146 # com content_type=13 (ProblemaMigracao)
147 if field.related_model.__name__ == 'TipoProposicao':
148 tipo = TipoProposicao.objects.filter(descricao='Erro')
149 if not tipo:
150 with reversion.create_revision():
151 reversion.set_comment(
152 'TipoProposicao "Erro" criado')
153 ct = ContentType.objects.get(pk=13)
154 value = TipoProposicao.objects.create(
155 id=value, descricao='Erro', content_type=ct)
156 else:
157 value = tipo[0]
158 else:
159 with reversion.create_revision():
160 reversion.set_comment('Stub criado pela migração')
161 value = make_stub(field.related_model, value)
162 descricao = 'stub criado para entrada orfã!'
163 warn(msg + ' => ' + descricao)
164 save_relation(value, [field.name], msg, descricao,
165 eh_stub=True)
166 else:
167 assert value
168 return value
169
170
171 def get_field(model, fieldname):
172 return model._meta.get_field(fieldname)
173
174
175 def exec_sql_file(path, db='default'):
176 cursor = connections[db].cursor()
177 for line in open(path):
178 try:
179 cursor.execute(line)
180 except (OperationalError, ProgrammingError) as e:
181 print("Args: '%s'" % (str(e.args)))
182
183
184 def exec_sql(sql, db='default'):
185 cursor = connections[db].cursor()
186 cursor.execute(sql)
187 return cursor
188
189
190 def iter_sql_records(sql, db):
191 class Record:
192 pass
193 cursor = exec_sql(sql, db)
194 fieldnames = [name[0] for name in cursor.description]
195 for row in cursor.fetchall():
196 record = Record()
197 record.__dict__.update(zip(fieldnames, row))
198 yield record
199
200
201 def delete_constraints(model):
202 # pega nome da unique constraint dado o nome da tabela
203 table = model._meta.db_table
204 cursor = exec_sql("SELECT conname FROM pg_constraint WHERE conrelid = "
205 "(SELECT oid FROM pg_class WHERE relname LIKE "
206 "'%s') and contype = 'u';" % (table))
207 result = ()
208 result = cursor.fetchall()
209 # se existir um resultado, unique constraint será deletado
210 for r in result:
211 if r[0].endswith('key'):
212 words_list = r[0].split('_')
213 one_to_one_constraints.append([table, r[0], words_list, model])
214 else:
215 args = None
216 args_list = []
217 if model._meta.unique_together:
218 args = model._meta.unique_together[0]
219 args_list = list(args)
220 unique_constraints.append([table, r[0], args_list, model])
221 warn('Excluindo unique constraint de nome %s' % r[0])
222 exec_sql("ALTER TABLE %s DROP CONSTRAINT %s;" %
223 (table, r[0]))
224
225
226 def recreate_constraints():
227 if one_to_one_constraints:
228 for constraint in one_to_one_constraints:
229 table, name, args, model = constraint
230 args_string = ''
231 args_string = "(" + "_".join(map(str, args[2:-1])) + ")"
232 exec_sql("ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;" %
233 (table, name, args_string))
234 if unique_constraints:
235 for constraint in unique_constraints:
236 table, name, args, model = constraint
237 for i in range(len(args)):
238 if isinstance(model._meta.get_field(args[i]),
239 models.ForeignKey):
240 args[i] = args[i] + '_id'
241 args_string = ''
242 args_string += "(" + ', '.join(map(str, args)) + ")"
243 exec_sql("ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;" %
244 (table, name, args_string))
245 one_to_one_constraints.clear()
246 unique_constraints.clear()
247
248
249 def stub_desnecessario(obj):
250 lista_fields = [
251 f for f in obj._meta.get_fields()
252 if (f.one_to_many or f.one_to_one) and f.auto_created
253 ]
254 desnecessario = not any(
255 rr.related_model.objects.filter(**{rr.field.name: obj}).exists()
256 for rr in lista_fields)
257 return desnecessario
258
259
260 def get_last_value(model):
261 last_value = model.objects.all().aggregate(Max('pk'))
262 return last_value['pk__max'] if last_value['pk__max'] else 0
263
264
265 def alter_sequence(model, id):
266 sequence_name = '%s_id_seq' % model._meta.db_table
267 exec_sql('ALTER SEQUENCE %s RESTART WITH %s;' % (sequence_name, id))
268
269
270 def save_with_id(new, id):
271 last_value = get_last_value(type(new))
272 alter_sequence(type(new), id)
273 new.save()
274 alter_sequence(type(new), last_value + 1)
275 assert new.id == id, 'New id is different from provided!'
276
277
278 def save_relation(obj, nome_campo='', problema='', descricao='',
279 eh_stub=False):
280 link = ProblemaMigracao(
281 content_object=obj, nome_campo=nome_campo, problema=problema,
282 descricao=descricao, eh_stub=eh_stub,
283 )
284 link.save()
285
286
287 def make_stub(model, id):
288 fields_dict = get_fields_dict(model)
289 new = mommy.prepare(model, **fields_dict, pk=id)
290 save_with_id(new, id)
291
292 return new
293
294
295 def get_fields_dict(model):
296 all_fields = model._meta.get_fields()
297 fields_dict = {}
298 fields_dict = {f.name: '????????????'[:f.max_length]
299 for f in all_fields
300 if isinstance(f, (CharField, TextField)) and
301 not f.choices and not f.blank}
302 return fields_dict
303
304
305 def fill_vinculo_norma_juridica():
306 lista = [('A', 'Altera a norma'),
307 ('R', 'Revoga integralmente a norma'),
308 ('P', 'Revoga parcialmente a norma'),
309 ('T', 'Revoga integralmente por consolidação'),
310 ('C', 'Norma Correlata'),
311 ('S', 'Ressalva a Norma'),
312 ('E', 'Reedita a Norma'),
313 ('I', 'Reedita a Norma com Alteração'),
314 ('G', 'Regulamenta a Norma'),
315 ('K', 'Suspende parcialmente a norma'),
316 ('L', 'Suspende integralmente a norma'),
317 ('N', 'Julgada integralmente inconstitucional'),
318 ('O', 'Julgada parcialmente inconstitucional')]
319 lista_objs = [TipoVinculoNormaJuridica(sigla=item[0], descricao=item[1])
320 for item in lista]
321 TipoVinculoNormaJuridica.objects.bulk_create(lista_objs)
322
323
324 class DataMigrator:
325
326 def __init__(self):
327 self.field_renames, self.model_renames = get_renames()
328 self.data_mudada = {}
329 self.choice_valida = {}
330
331 def populate_renamed_fields(self, new, old):
332 renames = self.field_renames[type(new)]
333
334 for field in new._meta.fields:
335 old_field_name = renames.get(field.name)
336 field_type = field.get_internal_type()
337 msg = ("O valor do campo %s (%s) da model %s era inválido" %
338 (field.name, field_type, field.model.__name__))
339 if old_field_name:
340 old_value = getattr(old, old_field_name)
341 if isinstance(field, models.ForeignKey):
342 old_type = type(old) # not necessarily a model
343 if hasattr(old_type, '_meta') and \
344 old_type._meta.pk.name != 'id':
345 label = old.pk
346 else:
347 label = '-- SEM PK --'
348 value = get_fk_related(field, old_value, label)
349 else:
350 value = getattr(old, old_field_name)
351 if field_type == 'DateField' and \
352 not field.null and value is None:
353 descricao = 'A data 1111-11-11 foi colocada no lugar'
354 problema = 'O valor da data era nulo ou inválido'
355 warn(msg +
356 ' => ' + descricao)
357 value = date(1111, 11, 11)
358 self.data_mudada['obj'] = new
359 self.data_mudada['descricao'] = descricao
360 self.data_mudada['problema'] = problema
361 self.data_mudada.setdefault('nome_campo', []).\
362 append(field.name)
363 if field_type == 'CharField' or field_type == 'TextField':
364 if value is None or value == 'None':
365 value = ''
366 if field.model._meta.label == 'sessao.RegistroVotacao' and \
367 field.name == 'ordem' and \
368 not isinstance(value, OrdemDia):
369 try:
370 new_value = ExpedienteMateria.objects.get(pk=value)
371 setattr(new, 'expediente', new_value)
372 setattr(new, field.name, None)
373 continue
374 except ObjectDoesNotExist:
375 msg = 'FK [%s] não encontrada para valor %s ' \
376 '(em %s %s)' % (
377 field.name, value,
378 field.model.__name__, label or '---')
379 with reversion.create_revision():
380 value = make_stub(field.related_model, value)
381 descricao = 'stub criado para entrada orfã!'
382 warn(msg + ' => ' + descricao)
383 save_relation(value, [field.name], msg, descricao,
384 eh_stub=True)
385 reversion.set_comment('Stub criado pela migração')
386 setattr(new, field.name, value)
387 elif field.model.__name__ == 'TipoAutor' and \
388 field.name == 'content_type':
389
390 model = normalize(new.descricao.lower()).replace(' ', '')
391 content_types = field.related_model.objects.filter(
392 model=model).exclude(app_label='legacy')
393 assert len(content_types) <= 1
394
395 value = content_types[0] if content_types else None
396 setattr(new, field.name, value)
397
398 def migrate(self, obj=appconfs, interativo=True):
399 # warning: model/app migration order is of utmost importance
400 exec_sql_file(PROJECT_DIR.child(
401 'sapl', 'legacy', 'scripts', 'fix_tables.sql'), 'legacy')
402 self.to_delete = []
403
404 # excluindo database antigo.
405 if interativo:
406 info('Todos os dados do banco serão excluidos. '
407 'Recomendamos que faça backup do banco sapl '
408 'antes de continuar.')
409 info('Deseja continuar? [s/n]')
410 resposta = input()
411 if resposta.lower() in ['s', 'sim', 'y', 'yes']:
412 pass
413 else:
414 info('Migração cancelada.')
415 return 0
416 info('Excluindo entradas antigas do banco.')
417 call([PROJECT_DIR.child('manage.py'), 'flush',
418 '--settings=sapl.settings', '--database=default', '--no-input'],
419 stdout=PIPE)
420
421 info('Começando migração: %s...' % obj)
422 self._do_migrate(obj)
423 # exclude logically deleted in legacy base
424 info('Deletando models com ind_excluido...')
425 while self.to_delete:
426 for obj in self.to_delete:
427 try:
428 obj.delete()
429 self.to_delete.remove(obj)
430 except ProtectedError:
431 msg = 'A entrada de PK %s da model %s não pode ser ' \
432 'excluida' % (obj.pk, obj._meta.model_name)
433 descricao = 'Um ou mais objetos protegidos '
434 warn(msg + ' => ' + descricao)
435 save_relation(obj=obj, problema=msg,
436 descricao=descricao, eh_stub=False)
437
438 info('Deletando stubs desnecessários...')
439 while self.delete_stubs():
440 pass
441 info('Recriando unique constraints...')
442 # recreate_constraints()
443
444 def _do_migrate(self, obj):
445 if isinstance(obj, AppConfig):
446 models_to_migrate = (model for model in obj.models.values()
447 if model in self.field_renames)
448 self._do_migrate(models_to_migrate)
449 elif isinstance(obj, ModelBase):
450 # A migração vai pular TipoProposicao e só vai migrar essa model
451 # antes de migrar Proposicao. Isso deve acontecer por causa da
452 # GenericRelation existente em TipoProposicao.
453 if not obj.__name__ == 'TipoProposicao':
454 if obj.__name__ == 'Proposicao':
455 self.migrate_model(TipoProposicao)
456 self.migrate_model(obj)
457 elif hasattr(obj, '__iter__'):
458 for item in obj:
459 self._do_migrate(item)
460 else:
461 raise TypeError(
462 'Parameter must be a Model, AppConfig or a sequence of them')
463
464 def migrate_model(self, model):
465 print('Migrando %s...' % model.__name__)
466
467 legacy_model_name = self.model_renames.get(model, model.__name__)
468 legacy_model = legacy_app.get_model(legacy_model_name)
469 legacy_pk_name = legacy_model._meta.pk.name
470
471 # Clear all model entries
472 # They may have been created in a previous migration attempt
473 try:
474 model.objects.all().delete()
475 except ProtectedError:
476 Proposicao.objects.all().delete()
477 model.objects.all().delete()
478 delete_constraints(model)
479
480 # setup migration strategy for tables with or without a pk
481 if legacy_pk_name == 'id':
482 # There is no pk in the legacy table
483 def save(new, old):
484 with reversion.create_revision():
485 new.save()
486 reversion.set_comment('Objeto criado pela migração')
487 old_records = iter_sql_records(
488 'select * from ' + legacy_model._meta.db_table, 'legacy')
489 else:
490 def save(new, old):
491 with reversion.create_revision():
492 save_with_id(new, getattr(old, legacy_pk_name))
493 reversion.set_comment('Objeto criado pela migração')
494
495 old_records = legacy_model.objects.all().order_by(legacy_pk_name)
496
497 ajuste_antes_salvar = AJUSTE_ANTES_SALVAR.get(model)
498 ajuste_depois_salvar = AJUSTE_DEPOIS_SALVAR.get(model)
499
500 # convert old records to new ones
501 for old in old_records:
502 new = model()
503 self.populate_renamed_fields(new, old)
504 if ajuste_antes_salvar:
505 ajuste_antes_salvar(new, old)
506 save(new, old)
507 if ajuste_depois_salvar:
508 ajuste_depois_salvar(new, old)
509 if self.data_mudada:
510 with reversion.create_revision():
511 save_relation(**self.data_mudada)
512 self.data_mudada.clear()
513 reversion.set_comment('Ajuste de data pela migração')
514 if getattr(old, 'ind_excluido', False):
515 self.to_delete.append(new)
516
517 def delete_stubs(self):
518 excluidos = 0
519 for obj in ProblemaMigracao.objects.all():
520 if obj.content_object and obj.eh_stub:
521 original = obj.content_type.get_all_objects_for_this_type(
522 id=obj.object_id)
523 if stub_desnecessario(original[0]):
524 qtd_exclusoes, *_ = original.delete()
525 assert qtd_exclusoes == 1
526 qtd_exclusoes, *_ = obj.delete()
527 assert qtd_exclusoes == 1
528 excluidos = excluidos + 1
529 elif not obj.content_object and not obj.eh_stub:
530 qtd_exclusoes, *_ = obj.delete()
531 assert qtd_exclusoes == 1
532 excluidos = excluidos + 1
533 return excluidos
534
535
536 def migrate(obj=appconfs, interativo=True):
537 dm = DataMigrator()
538 dm.migrate(obj, interativo)
539
540
541 # MIGRATION_ADJUSTMENTS #####################################################
542
543 def adjust_ordemdia(new, old):
544 # Prestar atenção
545 if not old.tip_votacao:
546 new.tipo_votacao = 1
547
548
549 def adjust_parlamentar(new, old):
550 if old.ind_unid_deliberativa:
551 value = new.unidade_deliberativa
552 # Field is defined as not null in legacy db,
553 # but data includes null values
554 # => transform None to False
555 if value is None:
556 warn('nulo convertido para falso')
557 new.unidade_deliberativa = False
558
559
560 def adjust_participacao(new, old):
561 composicao = Composicao()
562 composicao.comissao, composicao.periodo = [
563 get_fk_related(Composicao._meta.get_field(name), value)
564 for name, value in (('comissao', old.cod_comissao),
565 ('periodo', old.cod_periodo_comp))]
566 # check if there is already an "equal" one in the db
567 already_created = Composicao.objects.filter(
568 comissao=composicao.comissao, periodo=composicao.periodo)
569 if already_created:
570 assert len(already_created) == 1 # we must never have made 2 copies
571 [composicao] = already_created
572 else:
573 with reversion.create_revision():
574 composicao.save()
575 reversion.set_comment('Objeto criado pela migração')
576 new.composicao = composicao
577
578
579 def adjust_protocolo(new, old):
580 if new.numero is None and not primeira_vez:
581 p = ProtocoloLegado.objects.filter(
582 ano_protocolo=new.ano).aggregate(Max('num_protocolo'))
583 numero_maximo = p['num_protocolo__max']
584 new.numero = 1 if numero_maximo is None else numero_maximo + 1
585 primeira_vez.append(True)
586 if new.numero is None and primeira_vez:
587 p = Protocolo.objects.filter(
588 ano=new.ano).aggregate(Max('numero'))
589 new.numero = p['numero__max'] + 1
590
591
592 def adjust_sessaoplenaria(new, old):
593 assert not old.tip_expediente
594
595
596 def adjust_tipoproposicao(new, old):
597 if old.ind_mat_ou_doc == 'M':
598 new.tipo_conteudo_related = TipoMateriaLegislativa.objects.get(
599 pk=old.tip_mat_ou_doc)
600 elif old.ind_mat_ou_doc == 'D':
601 new.tipo_conteudo_related = TipoDocumento.objects.get(
602 pk=old.tip_mat_ou_doc)
603
604
605 def adjust_statustramitacao(new, old):
606 if old.ind_fim_tramitacao:
607 new.indicador = 'F'
608 elif old.ind_retorno_tramitacao:
609 new.indicador = 'R'
610 else:
611 new.indicador = ''
612
613
614 def adjust_statustramitacaoadm(new, old):
615 adjust_statustramitacao(new, old)
616
617
618 def adjust_tramitacao(new, old):
619 if old.sgl_turno == 'Ú':
620 new.turno = 'U'
621
622
623 def adjust_normajuridica_antes_salvar(new, old):
624 # Ajusta choice de esfera_federacao
625 # O 'S' vem de 'Selecionar'. Na versão antiga do SAPL, quando uma opção do
626 # combobox era selecionada, o sistema pegava a primeira letra da seleção,
627 # sendo F para Federal, E para Estadual, M para Municipal e o S para
628 # Selecionar, que era a primeira opção quando nada era selecionado.
629 if old.tip_esfera_federacao == 'S':
630 new.esfera_federacao = ''
631
632
633 def adjust_normajuridica_depois_salvar(new, old):
634 # Ajusta relação M2M
635 lista_pks_assunto = old.cod_assunto.split(',')
636 for pk_assunto in lista_pks_assunto:
637 new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))
638
639
640 def adjust_protocolo_depois_salvar(new, old):
641 if old.num_protocolo is None:
642 with reversion.create_revision():
643 problema = 'Número do protocolo de PK %s é nulo' % new.pk
644 descricao = 'Número do protocolo alterado para %s!' % new.numero
645 warn(problema + ' => ' + descricao)
646 save_relation(obj=new, problema=problema,
647 descricao=descricao, eh_stub=False)
648 reversion.set_comment('Numero de protocolo teve que ser alterado')
649
650
651 def adjust_autor(new, old):
652 if old.cod_parlamentar:
653 new.autor_related = Parlamentar.objects.get(pk=old.cod_parlamentar)
654 new.nome = new.autor_related.nome_parlamentar
655 elif old.cod_comissao:
656 new.autor_related = Comissao.objects.get(pk=old.cod_comissao)
657
658 if old.col_username:
659 if not get_user_model().objects.filter(
660 username=old.col_username).exists():
661 user = get_user_model()(
662 username=old.col_username, password=12345)
663 with reversion.create_revision():
664 user.save()
665 reversion.set_comment('Objeto criado pela migração')
666 new.user = user
667 else:
668 new.user = get_user_model().objects.filter(
669 username=old.col_username)[0]
670
671
672 def adjust_comissao(new, old):
673 if old.dat_extincao:
674 if date.today() < new.data_extincao:
675 new.ativa = True
676 else:
677 new.ativa = False
678 if not old.dat_extincao:
679 new.ativa = True
680
681
682 AJUSTE_ANTES_SALVAR = {
683 Autor: adjust_autor,
684 Comissao: adjust_comissao,
685 NormaJuridica: adjust_normajuridica_antes_salvar,
686 OrdemDia: adjust_ordemdia,
687 Parlamentar: adjust_parlamentar,
688 Participacao: adjust_participacao,
689 Protocolo: adjust_protocolo,
690 SessaoPlenaria: adjust_sessaoplenaria,
691 TipoProposicao: adjust_tipoproposicao,
692 StatusTramitacao: adjust_statustramitacao,
693 StatusTramitacaoAdministrativo: adjust_statustramitacaoadm,
694 Tramitacao: adjust_tramitacao,
695 }
696
697 AJUSTE_DEPOIS_SALVAR = {
698 NormaJuridica: adjust_normajuridica_depois_salvar,
699 Protocolo: adjust_protocolo_depois_salvar,
700 }
701
702 # CHECKS ####################################################################
703
704
705 def get_ind_excluido(obj):
706 legacy_model = legacy_app.get_model(type(obj).__name__)
707 return getattr(legacy_model.objects.get(
708 **{legacy_model._meta.pk.name: obj.id}), 'ind_excluido', False)
709
710
711 def check_app_no_ind_excluido(app):
712 for model in app.models.values():
713 assert not any(get_ind_excluido(obj) for obj in model.objects.all())
714 print('OK!')
715
716 # MOMMY MAKE WITH LOG ######################################################
717
718
719 def make_with_log(model, _quantity=None, make_m2m=False, **attrs):
720 last_value = get_last_value(model)
721 alter_sequence(model, last_value + 1)
722 fields_dict = get_fields_dict(model)
723 stub = make(model, _quantity, make_m2m, **fields_dict)
724 problema = 'Um stub foi necessário durante a criação de um outro stub'
725 descricao = 'Essa entrada é necessária para um dos stubs criados'
726 ' anteriormente'
727 warn(problema)
728 save_relation(obj=stub, problema=problema,
729 descricao=descricao, eh_stub=True)
730 return stub
731
732 make_with_log.required = foreign_key_required
733
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sapl/legacy/migration.py b/sapl/legacy/migration.py
--- a/sapl/legacy/migration.py
+++ b/sapl/legacy/migration.py
@@ -633,7 +633,9 @@
def adjust_normajuridica_depois_salvar(new, old):
# Ajusta relação M2M
lista_pks_assunto = old.cod_assunto.split(',')
- for pk_assunto in lista_pks_assunto:
+
+ # list(filter(..)) usado para retirar strings vazias da lista
+ for pk_assunto in list(filter(None, lista_pks_assunto)):
new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))
| {"golden_diff": "diff --git a/sapl/legacy/migration.py b/sapl/legacy/migration.py\n--- a/sapl/legacy/migration.py\n+++ b/sapl/legacy/migration.py\n@@ -633,7 +633,9 @@\n def adjust_normajuridica_depois_salvar(new, old):\n # Ajusta rela\u00e7\u00e3o M2M\n lista_pks_assunto = old.cod_assunto.split(',')\n- for pk_assunto in lista_pks_assunto:\n+\n+ # list(filter(..)) usado para retirar strings vazias da lista\n+ for pk_assunto in list(filter(None, lista_pks_assunto)):\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\n", "issue": "Erro na fun\u00e7\u00e3o adjust_normajuridica_depois_salvar()\n```\r\nFile \"[...]/sapl/sapl/legacy/migration.py\", line 636, in adjust_normajuridica_depois_salvar\r\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\r\nValueError: invalid literal for int() with base 10: ''\r\n```\r\n\r\n\u00c9 preciso verificar porque esse erro est\u00e1 acontecendo. Aparentemente o script n\u00e3o est\u00e1 conseguindo retornar o valor da query `AssuntoNorma.objects.get(pk=pk_assunto)` porque pk_assunto \u00e9 uma string vazia quando deveria ser um n\u00famero. Pode ser por conta de alguma inconsist\u00eancia na antiga rela\u00e7\u00e3o de many to many do SAPL2.5, que era feito por uma string separada por v\u00edrgulas.\n", "before_files": [{"content": "import re\nfrom datetime import date\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import OperationalError, ProgrammingError, connections, models\nfrom django.db.models import CharField, Max, ProtectedError, TextField\nfrom django.db.models.base import ModelBase\nfrom model_mommy import mommy\nfrom model_mommy.mommy import foreign_key_required, make\n\nfrom sapl.base.models import Autor, ProblemaMigracao\nfrom sapl.comissoes.models import Comissao, Composicao, Participacao\nfrom sapl.legacy.models import Protocolo as ProtocoloLegado\nfrom sapl.materia.models import (Proposicao, StatusTramitacao, TipoDocumento,\n TipoMateriaLegislativa, TipoProposicao,\n Tramitacao)\nfrom sapl.norma.models import (AssuntoNorma, NormaJuridica,\n TipoVinculoNormaJuridica)\nfrom sapl.parlamentares.models import Parlamentar\nfrom sapl.protocoloadm.models import Protocolo, StatusTramitacaoAdministrativo\nfrom sapl.sessao.models import ExpedienteMateria, OrdemDia, SessaoPlenaria\nfrom sapl.settings import PROJECT_DIR\nfrom sapl.utils import normalize\n\n# BASE ######################################################################\n# apps to be migrated, in app dependency order (very important)\nappconfs = [apps.get_app_config(n) for n in [\n 'parlamentares',\n 'comissoes',\n 'base',\n 'materia',\n 'norma',\n 'sessao',\n 'lexml',\n 'protocoloadm', ]]\n\nunique_constraints = []\none_to_one_constraints = []\nprimeira_vez = []\n\nname_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]\n\n# apps do not overlap\nfor s1 in name_sets:\n for s2 in name_sets:\n if s1 is not s2:\n assert not s1.intersection(s2)\n\n# apps include all legacy models\nlegacy_app = apps.get_app_config('legacy')\nlegacy_model_names = set(m.__name__ for m in legacy_app.get_models())\n\nmodel_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}\n\n\n# RENAMES ###################################################################\n\nMODEL_RENAME_PATTERN = re.compile('(.+) \\((.+)\\)')\n\n\ndef get_renames():\n field_renames = {}\n model_renames = {}\n for app in appconfs:\n app_rename_data = yaml.load(\n pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))\n for model_name, renames in app_rename_data.items():\n match = MODEL_RENAME_PATTERN.match(model_name)\n if match:\n model_name, old_name = match.groups()\n else:\n old_name = None\n model = getattr(app.models_module, model_name)\n if old_name:\n model_renames[model] = old_name\n field_renames[model] = renames\n\n # collect renames from parent classes\n for model, renames in field_renames.items():\n if any(parent in field_renames for parent in model.__mro__[1:]):\n renames = {}\n for parent in reversed(model.__mro__):\n if parent in field_renames:\n renames.update(field_renames[parent])\n field_renames[model] = renames\n\n # remove abstract classes\n field_renames = {m: r for m, r in field_renames.items()\n if not m._meta.abstract}\n\n return field_renames, model_renames\n\n# MIGRATION #################################################################\n\n\ndef info(msg):\n print('INFO: ' + msg)\n\n\ndef warn(msg):\n print('CUIDADO! ' + msg)\n\n\ndef get_fk_related(field, value, label=None):\n if value is None and field.null is False:\n value = 0\n if value is not None:\n try:\n value = field.related_model.objects.get(id=value)\n except ObjectDoesNotExist:\n msg = 'FK [%s] n\u00e3o encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n if value == 0:\n if not field.null:\n fields_dict = get_fields_dict(field.related_model)\n # Cria stub ao final da tabela para evitar erros\n pk = 1\n if hasattr(field.related_model.objects.last(), 'pk'):\n pk = field.related_model.objects.last().pk\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migra\u00e7\u00e3o')\n value = mommy.make(\n field.related_model, **fields_dict,\n pk=(pk + 1 or 1))\n descricao = 'stub criado para campos n\u00e3o nul\u00e1veis!'\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n warn(msg + ' => ' + descricao)\n else:\n value = None\n else:\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem':\n return value\n # Caso TipoProposicao n\u00e3o exista, um objeto ser\u00e1 criado ent\u00e3o\n # com content_type=13 (ProblemaMigracao)\n if field.related_model.__name__ == 'TipoProposicao':\n tipo = TipoProposicao.objects.filter(descricao='Erro')\n if not tipo:\n with reversion.create_revision():\n reversion.set_comment(\n 'TipoProposicao \"Erro\" criado')\n ct = ContentType.objects.get(pk=13)\n value = TipoProposicao.objects.create(\n id=value, descricao='Erro', content_type=ct)\n else:\n value = tipo[0]\n else:\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migra\u00e7\u00e3o')\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orf\u00e3!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n else:\n assert value\n return value\n\n\ndef get_field(model, fieldname):\n return model._meta.get_field(fieldname)\n\n\ndef exec_sql_file(path, db='default'):\n cursor = connections[db].cursor()\n for line in open(path):\n try:\n cursor.execute(line)\n except (OperationalError, ProgrammingError) as e:\n print(\"Args: '%s'\" % (str(e.args)))\n\n\ndef exec_sql(sql, db='default'):\n cursor = connections[db].cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef iter_sql_records(sql, db):\n class Record:\n pass\n cursor = exec_sql(sql, db)\n fieldnames = [name[0] for name in cursor.description]\n for row in cursor.fetchall():\n record = Record()\n record.__dict__.update(zip(fieldnames, row))\n yield record\n\n\ndef delete_constraints(model):\n # pega nome da unique constraint dado o nome da tabela\n table = model._meta.db_table\n cursor = exec_sql(\"SELECT conname FROM pg_constraint WHERE conrelid = \"\n \"(SELECT oid FROM pg_class WHERE relname LIKE \"\n \"'%s') and contype = 'u';\" % (table))\n result = ()\n result = cursor.fetchall()\n # se existir um resultado, unique constraint ser\u00e1 deletado\n for r in result:\n if r[0].endswith('key'):\n words_list = r[0].split('_')\n one_to_one_constraints.append([table, r[0], words_list, model])\n else:\n args = None\n args_list = []\n if model._meta.unique_together:\n args = model._meta.unique_together[0]\n args_list = list(args)\n unique_constraints.append([table, r[0], args_list, model])\n warn('Excluindo unique constraint de nome %s' % r[0])\n exec_sql(\"ALTER TABLE %s DROP CONSTRAINT %s;\" %\n (table, r[0]))\n\n\ndef recreate_constraints():\n if one_to_one_constraints:\n for constraint in one_to_one_constraints:\n table, name, args, model = constraint\n args_string = ''\n args_string = \"(\" + \"_\".join(map(str, args[2:-1])) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n if unique_constraints:\n for constraint in unique_constraints:\n table, name, args, model = constraint\n for i in range(len(args)):\n if isinstance(model._meta.get_field(args[i]),\n models.ForeignKey):\n args[i] = args[i] + '_id'\n args_string = ''\n args_string += \"(\" + ', '.join(map(str, args)) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n one_to_one_constraints.clear()\n unique_constraints.clear()\n\n\ndef stub_desnecessario(obj):\n lista_fields = [\n f for f in obj._meta.get_fields()\n if (f.one_to_many or f.one_to_one) and f.auto_created\n ]\n desnecessario = not any(\n rr.related_model.objects.filter(**{rr.field.name: obj}).exists()\n for rr in lista_fields)\n return desnecessario\n\n\ndef get_last_value(model):\n last_value = model.objects.all().aggregate(Max('pk'))\n return last_value['pk__max'] if last_value['pk__max'] else 0\n\n\ndef alter_sequence(model, id):\n sequence_name = '%s_id_seq' % model._meta.db_table\n exec_sql('ALTER SEQUENCE %s RESTART WITH %s;' % (sequence_name, id))\n\n\ndef save_with_id(new, id):\n last_value = get_last_value(type(new))\n alter_sequence(type(new), id)\n new.save()\n alter_sequence(type(new), last_value + 1)\n assert new.id == id, 'New id is different from provided!'\n\n\ndef save_relation(obj, nome_campo='', problema='', descricao='',\n eh_stub=False):\n link = ProblemaMigracao(\n content_object=obj, nome_campo=nome_campo, problema=problema,\n descricao=descricao, eh_stub=eh_stub,\n )\n link.save()\n\n\ndef make_stub(model, id):\n fields_dict = get_fields_dict(model)\n new = mommy.prepare(model, **fields_dict, pk=id)\n save_with_id(new, id)\n\n return new\n\n\ndef get_fields_dict(model):\n all_fields = model._meta.get_fields()\n fields_dict = {}\n fields_dict = {f.name: '????????????'[:f.max_length]\n for f in all_fields\n if isinstance(f, (CharField, TextField)) and\n not f.choices and not f.blank}\n return fields_dict\n\n\ndef fill_vinculo_norma_juridica():\n lista = [('A', 'Altera a norma'),\n ('R', 'Revoga integralmente a norma'),\n ('P', 'Revoga parcialmente a norma'),\n ('T', 'Revoga integralmente por consolida\u00e7\u00e3o'),\n ('C', 'Norma Correlata'),\n ('S', 'Ressalva a Norma'),\n ('E', 'Reedita a Norma'),\n ('I', 'Reedita a Norma com Altera\u00e7\u00e3o'),\n ('G', 'Regulamenta a Norma'),\n ('K', 'Suspende parcialmente a norma'),\n ('L', 'Suspende integralmente a norma'),\n ('N', 'Julgada integralmente inconstitucional'),\n ('O', 'Julgada parcialmente inconstitucional')]\n lista_objs = [TipoVinculoNormaJuridica(sigla=item[0], descricao=item[1])\n for item in lista]\n TipoVinculoNormaJuridica.objects.bulk_create(lista_objs)\n\n\nclass DataMigrator:\n\n def __init__(self):\n self.field_renames, self.model_renames = get_renames()\n self.data_mudada = {}\n self.choice_valida = {}\n\n def populate_renamed_fields(self, new, old):\n renames = self.field_renames[type(new)]\n\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n msg = (\"O valor do campo %s (%s) da model %s era inv\u00e1lido\" %\n (field.name, field_type, field.model.__name__))\n if old_field_name:\n old_value = getattr(old, old_field_name)\n if isinstance(field, models.ForeignKey):\n old_type = type(old) # not necessarily a model\n if hasattr(old_type, '_meta') and \\\n old_type._meta.pk.name != 'id':\n label = old.pk\n else:\n label = '-- SEM PK --'\n value = get_fk_related(field, old_value, label)\n else:\n value = getattr(old, old_field_name)\n if field_type == 'DateField' and \\\n not field.null and value is None:\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inv\u00e1lido'\n warn(msg +\n ' => ' + descricao)\n value = date(1111, 11, 11)\n self.data_mudada['obj'] = new\n self.data_mudada['descricao'] = descricao\n self.data_mudada['problema'] = problema\n self.data_mudada.setdefault('nome_campo', []).\\\n append(field.name)\n if field_type == 'CharField' or field_type == 'TextField':\n if value is None or value == 'None':\n value = ''\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem' and \\\n not isinstance(value, OrdemDia):\n try:\n new_value = ExpedienteMateria.objects.get(pk=value)\n setattr(new, 'expediente', new_value)\n setattr(new, field.name, None)\n continue\n except ObjectDoesNotExist:\n msg = 'FK [%s] n\u00e3o encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n with reversion.create_revision():\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orf\u00e3!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n reversion.set_comment('Stub criado pela migra\u00e7\u00e3o')\n setattr(new, field.name, value)\n elif field.model.__name__ == 'TipoAutor' and \\\n field.name == 'content_type':\n\n model = normalize(new.descricao.lower()).replace(' ', '')\n content_types = field.related_model.objects.filter(\n model=model).exclude(app_label='legacy')\n assert len(content_types) <= 1\n\n value = content_types[0] if content_types else None\n setattr(new, field.name, value)\n\n def migrate(self, obj=appconfs, interativo=True):\n # warning: model/app migration order is of utmost importance\n exec_sql_file(PROJECT_DIR.child(\n 'sapl', 'legacy', 'scripts', 'fix_tables.sql'), 'legacy')\n self.to_delete = []\n\n # excluindo database antigo.\n if interativo:\n info('Todos os dados do banco ser\u00e3o excluidos. '\n 'Recomendamos que fa\u00e7a backup do banco sapl '\n 'antes de continuar.')\n info('Deseja continuar? [s/n]')\n resposta = input()\n if resposta.lower() in ['s', 'sim', 'y', 'yes']:\n pass\n else:\n info('Migra\u00e7\u00e3o cancelada.')\n return 0\n info('Excluindo entradas antigas do banco.')\n call([PROJECT_DIR.child('manage.py'), 'flush',\n '--settings=sapl.settings', '--database=default', '--no-input'],\n stdout=PIPE)\n\n info('Come\u00e7ando migra\u00e7\u00e3o: %s...' % obj)\n self._do_migrate(obj)\n # exclude logically deleted in legacy base\n info('Deletando models com ind_excluido...')\n while self.to_delete:\n for obj in self.to_delete:\n try:\n obj.delete()\n self.to_delete.remove(obj)\n except ProtectedError:\n msg = 'A entrada de PK %s da model %s n\u00e3o pode ser ' \\\n 'excluida' % (obj.pk, obj._meta.model_name)\n descricao = 'Um ou mais objetos protegidos '\n warn(msg + ' => ' + descricao)\n save_relation(obj=obj, problema=msg,\n descricao=descricao, eh_stub=False)\n\n info('Deletando stubs desnecess\u00e1rios...')\n while self.delete_stubs():\n pass\n info('Recriando unique constraints...')\n # recreate_constraints()\n\n def _do_migrate(self, obj):\n if isinstance(obj, AppConfig):\n models_to_migrate = (model for model in obj.models.values()\n if model in self.field_renames)\n self._do_migrate(models_to_migrate)\n elif isinstance(obj, ModelBase):\n # A migra\u00e7\u00e3o vai pular TipoProposicao e s\u00f3 vai migrar essa model\n # antes de migrar Proposicao. Isso deve acontecer por causa da\n # GenericRelation existente em TipoProposicao.\n if not obj.__name__ == 'TipoProposicao':\n if obj.__name__ == 'Proposicao':\n self.migrate_model(TipoProposicao)\n self.migrate_model(obj)\n elif hasattr(obj, '__iter__'):\n for item in obj:\n self._do_migrate(item)\n else:\n raise TypeError(\n 'Parameter must be a Model, AppConfig or a sequence of them')\n\n def migrate_model(self, model):\n print('Migrando %s...' % model.__name__)\n\n legacy_model_name = self.model_renames.get(model, model.__name__)\n legacy_model = legacy_app.get_model(legacy_model_name)\n legacy_pk_name = legacy_model._meta.pk.name\n\n # Clear all model entries\n # They may have been created in a previous migration attempt\n try:\n model.objects.all().delete()\n except ProtectedError:\n Proposicao.objects.all().delete()\n model.objects.all().delete()\n delete_constraints(model)\n\n # setup migration strategy for tables with or without a pk\n if legacy_pk_name == 'id':\n # There is no pk in the legacy table\n def save(new, old):\n with reversion.create_revision():\n new.save()\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n old_records = iter_sql_records(\n 'select * from ' + legacy_model._meta.db_table, 'legacy')\n else:\n def save(new, old):\n with reversion.create_revision():\n save_with_id(new, getattr(old, legacy_pk_name))\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n\n old_records = legacy_model.objects.all().order_by(legacy_pk_name)\n\n ajuste_antes_salvar = AJUSTE_ANTES_SALVAR.get(model)\n ajuste_depois_salvar = AJUSTE_DEPOIS_SALVAR.get(model)\n\n # convert old records to new ones\n for old in old_records:\n new = model()\n self.populate_renamed_fields(new, old)\n if ajuste_antes_salvar:\n ajuste_antes_salvar(new, old)\n save(new, old)\n if ajuste_depois_salvar:\n ajuste_depois_salvar(new, old)\n if self.data_mudada:\n with reversion.create_revision():\n save_relation(**self.data_mudada)\n self.data_mudada.clear()\n reversion.set_comment('Ajuste de data pela migra\u00e7\u00e3o')\n if getattr(old, 'ind_excluido', False):\n self.to_delete.append(new)\n\n def delete_stubs(self):\n excluidos = 0\n for obj in ProblemaMigracao.objects.all():\n if obj.content_object and obj.eh_stub:\n original = obj.content_type.get_all_objects_for_this_type(\n id=obj.object_id)\n if stub_desnecessario(original[0]):\n qtd_exclusoes, *_ = original.delete()\n assert qtd_exclusoes == 1\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n elif not obj.content_object and not obj.eh_stub:\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n return excluidos\n\n\ndef migrate(obj=appconfs, interativo=True):\n dm = DataMigrator()\n dm.migrate(obj, interativo)\n\n\n# MIGRATION_ADJUSTMENTS #####################################################\n\ndef adjust_ordemdia(new, old):\n # Prestar aten\u00e7\u00e3o\n if not old.tip_votacao:\n new.tipo_votacao = 1\n\n\ndef adjust_parlamentar(new, old):\n if old.ind_unid_deliberativa:\n value = new.unidade_deliberativa\n # Field is defined as not null in legacy db,\n # but data includes null values\n # => transform None to False\n if value is None:\n warn('nulo convertido para falso')\n new.unidade_deliberativa = False\n\n\ndef adjust_participacao(new, old):\n composicao = Composicao()\n composicao.comissao, composicao.periodo = [\n get_fk_related(Composicao._meta.get_field(name), value)\n for name, value in (('comissao', old.cod_comissao),\n ('periodo', old.cod_periodo_comp))]\n # check if there is already an \"equal\" one in the db\n already_created = Composicao.objects.filter(\n comissao=composicao.comissao, periodo=composicao.periodo)\n if already_created:\n assert len(already_created) == 1 # we must never have made 2 copies\n [composicao] = already_created\n else:\n with reversion.create_revision():\n composicao.save()\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n new.composicao = composicao\n\n\ndef adjust_protocolo(new, old):\n if new.numero is None and not primeira_vez:\n p = ProtocoloLegado.objects.filter(\n ano_protocolo=new.ano).aggregate(Max('num_protocolo'))\n numero_maximo = p['num_protocolo__max']\n new.numero = 1 if numero_maximo is None else numero_maximo + 1\n primeira_vez.append(True)\n if new.numero is None and primeira_vez:\n p = Protocolo.objects.filter(\n ano=new.ano).aggregate(Max('numero'))\n new.numero = p['numero__max'] + 1\n\n\ndef adjust_sessaoplenaria(new, old):\n assert not old.tip_expediente\n\n\ndef adjust_tipoproposicao(new, old):\n if old.ind_mat_ou_doc == 'M':\n new.tipo_conteudo_related = TipoMateriaLegislativa.objects.get(\n pk=old.tip_mat_ou_doc)\n elif old.ind_mat_ou_doc == 'D':\n new.tipo_conteudo_related = TipoDocumento.objects.get(\n pk=old.tip_mat_ou_doc)\n\n\ndef adjust_statustramitacao(new, old):\n if old.ind_fim_tramitacao:\n new.indicador = 'F'\n elif old.ind_retorno_tramitacao:\n new.indicador = 'R'\n else:\n new.indicador = ''\n\n\ndef adjust_statustramitacaoadm(new, old):\n adjust_statustramitacao(new, old)\n\n\ndef adjust_tramitacao(new, old):\n if old.sgl_turno == '\u00da':\n new.turno = 'U'\n\n\ndef adjust_normajuridica_antes_salvar(new, old):\n # Ajusta choice de esfera_federacao\n # O 'S' vem de 'Selecionar'. Na vers\u00e3o antiga do SAPL, quando uma op\u00e7\u00e3o do\n # combobox era selecionada, o sistema pegava a primeira letra da sele\u00e7\u00e3o,\n # sendo F para Federal, E para Estadual, M para Municipal e o S para\n # Selecionar, que era a primeira op\u00e7\u00e3o quando nada era selecionado.\n if old.tip_esfera_federacao == 'S':\n new.esfera_federacao = ''\n\n\ndef adjust_normajuridica_depois_salvar(new, old):\n # Ajusta rela\u00e7\u00e3o M2M\n lista_pks_assunto = old.cod_assunto.split(',')\n for pk_assunto in lista_pks_assunto:\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\n\n\ndef adjust_protocolo_depois_salvar(new, old):\n if old.num_protocolo is None:\n with reversion.create_revision():\n problema = 'N\u00famero do protocolo de PK %s \u00e9 nulo' % new.pk\n descricao = 'N\u00famero do protocolo alterado para %s!' % new.numero\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Numero de protocolo teve que ser alterado')\n\n\ndef adjust_autor(new, old):\n if old.cod_parlamentar:\n new.autor_related = Parlamentar.objects.get(pk=old.cod_parlamentar)\n new.nome = new.autor_related.nome_parlamentar\n elif old.cod_comissao:\n new.autor_related = Comissao.objects.get(pk=old.cod_comissao)\n\n if old.col_username:\n if not get_user_model().objects.filter(\n username=old.col_username).exists():\n user = get_user_model()(\n username=old.col_username, password=12345)\n with reversion.create_revision():\n user.save()\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n new.user = user\n else:\n new.user = get_user_model().objects.filter(\n username=old.col_username)[0]\n\n\ndef adjust_comissao(new, old):\n if old.dat_extincao:\n if date.today() < new.data_extincao:\n new.ativa = True\n else:\n new.ativa = False\n if not old.dat_extincao:\n new.ativa = True\n\n\nAJUSTE_ANTES_SALVAR = {\n Autor: adjust_autor,\n Comissao: adjust_comissao,\n NormaJuridica: adjust_normajuridica_antes_salvar,\n OrdemDia: adjust_ordemdia,\n Parlamentar: adjust_parlamentar,\n Participacao: adjust_participacao,\n Protocolo: adjust_protocolo,\n SessaoPlenaria: adjust_sessaoplenaria,\n TipoProposicao: adjust_tipoproposicao,\n StatusTramitacao: adjust_statustramitacao,\n StatusTramitacaoAdministrativo: adjust_statustramitacaoadm,\n Tramitacao: adjust_tramitacao,\n}\n\nAJUSTE_DEPOIS_SALVAR = {\n NormaJuridica: adjust_normajuridica_depois_salvar,\n Protocolo: adjust_protocolo_depois_salvar,\n}\n\n# CHECKS ####################################################################\n\n\ndef get_ind_excluido(obj):\n legacy_model = legacy_app.get_model(type(obj).__name__)\n return getattr(legacy_model.objects.get(\n **{legacy_model._meta.pk.name: obj.id}), 'ind_excluido', False)\n\n\ndef check_app_no_ind_excluido(app):\n for model in app.models.values():\n assert not any(get_ind_excluido(obj) for obj in model.objects.all())\n print('OK!')\n\n# MOMMY MAKE WITH LOG ######################################################\n\n\ndef make_with_log(model, _quantity=None, make_m2m=False, **attrs):\n last_value = get_last_value(model)\n alter_sequence(model, last_value + 1)\n fields_dict = get_fields_dict(model)\n stub = make(model, _quantity, make_m2m, **fields_dict)\n problema = 'Um stub foi necess\u00e1rio durante a cria\u00e7\u00e3o de um outro stub'\n descricao = 'Essa entrada \u00e9 necess\u00e1ria para um dos stubs criados'\n ' anteriormente'\n warn(problema)\n save_relation(obj=stub, problema=problema,\n descricao=descricao, eh_stub=True)\n return stub\n\nmake_with_log.required = foreign_key_required\n", "path": "sapl/legacy/migration.py"}], "after_files": [{"content": "import re\nfrom datetime import date\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import OperationalError, ProgrammingError, connections, models\nfrom django.db.models import CharField, Max, ProtectedError, TextField\nfrom django.db.models.base import ModelBase\nfrom model_mommy import mommy\nfrom model_mommy.mommy import foreign_key_required, make\n\nfrom sapl.base.models import Autor, ProblemaMigracao\nfrom sapl.comissoes.models import Comissao, Composicao, Participacao\nfrom sapl.legacy.models import Protocolo as ProtocoloLegado\nfrom sapl.materia.models import (Proposicao, StatusTramitacao, TipoDocumento,\n TipoMateriaLegislativa, TipoProposicao,\n Tramitacao)\nfrom sapl.norma.models import (AssuntoNorma, NormaJuridica,\n TipoVinculoNormaJuridica)\nfrom sapl.parlamentares.models import Parlamentar\nfrom sapl.protocoloadm.models import Protocolo, StatusTramitacaoAdministrativo\nfrom sapl.sessao.models import ExpedienteMateria, OrdemDia, SessaoPlenaria\nfrom sapl.settings import PROJECT_DIR\nfrom sapl.utils import normalize\n\n# BASE ######################################################################\n# apps to be migrated, in app dependency order (very important)\nappconfs = [apps.get_app_config(n) for n in [\n 'parlamentares',\n 'comissoes',\n 'base',\n 'materia',\n 'norma',\n 'sessao',\n 'lexml',\n 'protocoloadm', ]]\n\nunique_constraints = []\none_to_one_constraints = []\nprimeira_vez = []\n\nname_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]\n\n# apps do not overlap\nfor s1 in name_sets:\n for s2 in name_sets:\n if s1 is not s2:\n assert not s1.intersection(s2)\n\n# apps include all legacy models\nlegacy_app = apps.get_app_config('legacy')\nlegacy_model_names = set(m.__name__ for m in legacy_app.get_models())\n\nmodel_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}\n\n\n# RENAMES ###################################################################\n\nMODEL_RENAME_PATTERN = re.compile('(.+) \\((.+)\\)')\n\n\ndef get_renames():\n field_renames = {}\n model_renames = {}\n for app in appconfs:\n app_rename_data = yaml.load(\n pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))\n for model_name, renames in app_rename_data.items():\n match = MODEL_RENAME_PATTERN.match(model_name)\n if match:\n model_name, old_name = match.groups()\n else:\n old_name = None\n model = getattr(app.models_module, model_name)\n if old_name:\n model_renames[model] = old_name\n field_renames[model] = renames\n\n # collect renames from parent classes\n for model, renames in field_renames.items():\n if any(parent in field_renames for parent in model.__mro__[1:]):\n renames = {}\n for parent in reversed(model.__mro__):\n if parent in field_renames:\n renames.update(field_renames[parent])\n field_renames[model] = renames\n\n # remove abstract classes\n field_renames = {m: r for m, r in field_renames.items()\n if not m._meta.abstract}\n\n return field_renames, model_renames\n\n# MIGRATION #################################################################\n\n\ndef info(msg):\n print('INFO: ' + msg)\n\n\ndef warn(msg):\n print('CUIDADO! ' + msg)\n\n\ndef get_fk_related(field, value, label=None):\n if value is None and field.null is False:\n value = 0\n if value is not None:\n try:\n value = field.related_model.objects.get(id=value)\n except ObjectDoesNotExist:\n msg = 'FK [%s] n\u00e3o encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n if value == 0:\n if not field.null:\n fields_dict = get_fields_dict(field.related_model)\n # Cria stub ao final da tabela para evitar erros\n pk = 1\n if hasattr(field.related_model.objects.last(), 'pk'):\n pk = field.related_model.objects.last().pk\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migra\u00e7\u00e3o')\n value = mommy.make(\n field.related_model, **fields_dict,\n pk=(pk + 1 or 1))\n descricao = 'stub criado para campos n\u00e3o nul\u00e1veis!'\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n warn(msg + ' => ' + descricao)\n else:\n value = None\n else:\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem':\n return value\n # Caso TipoProposicao n\u00e3o exista, um objeto ser\u00e1 criado ent\u00e3o\n # com content_type=13 (ProblemaMigracao)\n if field.related_model.__name__ == 'TipoProposicao':\n tipo = TipoProposicao.objects.filter(descricao='Erro')\n if not tipo:\n with reversion.create_revision():\n reversion.set_comment(\n 'TipoProposicao \"Erro\" criado')\n ct = ContentType.objects.get(pk=13)\n value = TipoProposicao.objects.create(\n id=value, descricao='Erro', content_type=ct)\n else:\n value = tipo[0]\n else:\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migra\u00e7\u00e3o')\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orf\u00e3!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n else:\n assert value\n return value\n\n\ndef get_field(model, fieldname):\n return model._meta.get_field(fieldname)\n\n\ndef exec_sql_file(path, db='default'):\n cursor = connections[db].cursor()\n for line in open(path):\n try:\n cursor.execute(line)\n except (OperationalError, ProgrammingError) as e:\n print(\"Args: '%s'\" % (str(e.args)))\n\n\ndef exec_sql(sql, db='default'):\n cursor = connections[db].cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef iter_sql_records(sql, db):\n class Record:\n pass\n cursor = exec_sql(sql, db)\n fieldnames = [name[0] for name in cursor.description]\n for row in cursor.fetchall():\n record = Record()\n record.__dict__.update(zip(fieldnames, row))\n yield record\n\n\ndef delete_constraints(model):\n # pega nome da unique constraint dado o nome da tabela\n table = model._meta.db_table\n cursor = exec_sql(\"SELECT conname FROM pg_constraint WHERE conrelid = \"\n \"(SELECT oid FROM pg_class WHERE relname LIKE \"\n \"'%s') and contype = 'u';\" % (table))\n result = ()\n result = cursor.fetchall()\n # se existir um resultado, unique constraint ser\u00e1 deletado\n for r in result:\n if r[0].endswith('key'):\n words_list = r[0].split('_')\n one_to_one_constraints.append([table, r[0], words_list, model])\n else:\n args = None\n args_list = []\n if model._meta.unique_together:\n args = model._meta.unique_together[0]\n args_list = list(args)\n unique_constraints.append([table, r[0], args_list, model])\n warn('Excluindo unique constraint de nome %s' % r[0])\n exec_sql(\"ALTER TABLE %s DROP CONSTRAINT %s;\" %\n (table, r[0]))\n\n\ndef recreate_constraints():\n if one_to_one_constraints:\n for constraint in one_to_one_constraints:\n table, name, args, model = constraint\n args_string = ''\n args_string = \"(\" + \"_\".join(map(str, args[2:-1])) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n if unique_constraints:\n for constraint in unique_constraints:\n table, name, args, model = constraint\n for i in range(len(args)):\n if isinstance(model._meta.get_field(args[i]),\n models.ForeignKey):\n args[i] = args[i] + '_id'\n args_string = ''\n args_string += \"(\" + ', '.join(map(str, args)) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n one_to_one_constraints.clear()\n unique_constraints.clear()\n\n\ndef stub_desnecessario(obj):\n lista_fields = [\n f for f in obj._meta.get_fields()\n if (f.one_to_many or f.one_to_one) and f.auto_created\n ]\n desnecessario = not any(\n rr.related_model.objects.filter(**{rr.field.name: obj}).exists()\n for rr in lista_fields)\n return desnecessario\n\n\ndef get_last_value(model):\n last_value = model.objects.all().aggregate(Max('pk'))\n return last_value['pk__max'] if last_value['pk__max'] else 0\n\n\ndef alter_sequence(model, id):\n sequence_name = '%s_id_seq' % model._meta.db_table\n exec_sql('ALTER SEQUENCE %s RESTART WITH %s;' % (sequence_name, id))\n\n\ndef save_with_id(new, id):\n last_value = get_last_value(type(new))\n alter_sequence(type(new), id)\n new.save()\n alter_sequence(type(new), last_value + 1)\n assert new.id == id, 'New id is different from provided!'\n\n\ndef save_relation(obj, nome_campo='', problema='', descricao='',\n eh_stub=False):\n link = ProblemaMigracao(\n content_object=obj, nome_campo=nome_campo, problema=problema,\n descricao=descricao, eh_stub=eh_stub,\n )\n link.save()\n\n\ndef make_stub(model, id):\n fields_dict = get_fields_dict(model)\n new = mommy.prepare(model, **fields_dict, pk=id)\n save_with_id(new, id)\n\n return new\n\n\ndef get_fields_dict(model):\n all_fields = model._meta.get_fields()\n fields_dict = {}\n fields_dict = {f.name: '????????????'[:f.max_length]\n for f in all_fields\n if isinstance(f, (CharField, TextField)) and\n not f.choices and not f.blank}\n return fields_dict\n\n\ndef fill_vinculo_norma_juridica():\n lista = [('A', 'Altera a norma'),\n ('R', 'Revoga integralmente a norma'),\n ('P', 'Revoga parcialmente a norma'),\n ('T', 'Revoga integralmente por consolida\u00e7\u00e3o'),\n ('C', 'Norma Correlata'),\n ('S', 'Ressalva a Norma'),\n ('E', 'Reedita a Norma'),\n ('I', 'Reedita a Norma com Altera\u00e7\u00e3o'),\n ('G', 'Regulamenta a Norma'),\n ('K', 'Suspende parcialmente a norma'),\n ('L', 'Suspende integralmente a norma'),\n ('N', 'Julgada integralmente inconstitucional'),\n ('O', 'Julgada parcialmente inconstitucional')]\n lista_objs = [TipoVinculoNormaJuridica(sigla=item[0], descricao=item[1])\n for item in lista]\n TipoVinculoNormaJuridica.objects.bulk_create(lista_objs)\n\n\nclass DataMigrator:\n\n def __init__(self):\n self.field_renames, self.model_renames = get_renames()\n self.data_mudada = {}\n self.choice_valida = {}\n\n def populate_renamed_fields(self, new, old):\n renames = self.field_renames[type(new)]\n\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n msg = (\"O valor do campo %s (%s) da model %s era inv\u00e1lido\" %\n (field.name, field_type, field.model.__name__))\n if old_field_name:\n old_value = getattr(old, old_field_name)\n if isinstance(field, models.ForeignKey):\n old_type = type(old) # not necessarily a model\n if hasattr(old_type, '_meta') and \\\n old_type._meta.pk.name != 'id':\n label = old.pk\n else:\n label = '-- SEM PK --'\n value = get_fk_related(field, old_value, label)\n else:\n value = getattr(old, old_field_name)\n if field_type == 'DateField' and \\\n not field.null and value is None:\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inv\u00e1lido'\n warn(msg +\n ' => ' + descricao)\n value = date(1111, 11, 11)\n self.data_mudada['obj'] = new\n self.data_mudada['descricao'] = descricao\n self.data_mudada['problema'] = problema\n self.data_mudada.setdefault('nome_campo', []).\\\n append(field.name)\n if field_type == 'CharField' or field_type == 'TextField':\n if value is None or value == 'None':\n value = ''\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem' and \\\n not isinstance(value, OrdemDia):\n try:\n new_value = ExpedienteMateria.objects.get(pk=value)\n setattr(new, 'expediente', new_value)\n setattr(new, field.name, None)\n continue\n except ObjectDoesNotExist:\n msg = 'FK [%s] n\u00e3o encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n with reversion.create_revision():\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orf\u00e3!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n reversion.set_comment('Stub criado pela migra\u00e7\u00e3o')\n setattr(new, field.name, value)\n elif field.model.__name__ == 'TipoAutor' and \\\n field.name == 'content_type':\n\n model = normalize(new.descricao.lower()).replace(' ', '')\n content_types = field.related_model.objects.filter(\n model=model).exclude(app_label='legacy')\n assert len(content_types) <= 1\n\n value = content_types[0] if content_types else None\n setattr(new, field.name, value)\n\n def migrate(self, obj=appconfs, interativo=True):\n # warning: model/app migration order is of utmost importance\n exec_sql_file(PROJECT_DIR.child(\n 'sapl', 'legacy', 'scripts', 'fix_tables.sql'), 'legacy')\n self.to_delete = []\n\n # excluindo database antigo.\n if interativo:\n info('Todos os dados do banco ser\u00e3o excluidos. '\n 'Recomendamos que fa\u00e7a backup do banco sapl '\n 'antes de continuar.')\n info('Deseja continuar? [s/n]')\n resposta = input()\n if resposta.lower() in ['s', 'sim', 'y', 'yes']:\n pass\n else:\n info('Migra\u00e7\u00e3o cancelada.')\n return 0\n info('Excluindo entradas antigas do banco.')\n call([PROJECT_DIR.child('manage.py'), 'flush',\n '--settings=sapl.settings', '--database=default', '--no-input'],\n stdout=PIPE)\n\n info('Come\u00e7ando migra\u00e7\u00e3o: %s...' % obj)\n self._do_migrate(obj)\n # exclude logically deleted in legacy base\n info('Deletando models com ind_excluido...')\n while self.to_delete:\n for obj in self.to_delete:\n try:\n obj.delete()\n self.to_delete.remove(obj)\n except ProtectedError:\n msg = 'A entrada de PK %s da model %s n\u00e3o pode ser ' \\\n 'excluida' % (obj.pk, obj._meta.model_name)\n descricao = 'Um ou mais objetos protegidos '\n warn(msg + ' => ' + descricao)\n save_relation(obj=obj, problema=msg,\n descricao=descricao, eh_stub=False)\n\n info('Deletando stubs desnecess\u00e1rios...')\n while self.delete_stubs():\n pass\n info('Recriando unique constraints...')\n # recreate_constraints()\n\n def _do_migrate(self, obj):\n if isinstance(obj, AppConfig):\n models_to_migrate = (model for model in obj.models.values()\n if model in self.field_renames)\n self._do_migrate(models_to_migrate)\n elif isinstance(obj, ModelBase):\n # A migra\u00e7\u00e3o vai pular TipoProposicao e s\u00f3 vai migrar essa model\n # antes de migrar Proposicao. Isso deve acontecer por causa da\n # GenericRelation existente em TipoProposicao.\n if not obj.__name__ == 'TipoProposicao':\n if obj.__name__ == 'Proposicao':\n self.migrate_model(TipoProposicao)\n self.migrate_model(obj)\n elif hasattr(obj, '__iter__'):\n for item in obj:\n self._do_migrate(item)\n else:\n raise TypeError(\n 'Parameter must be a Model, AppConfig or a sequence of them')\n\n def migrate_model(self, model):\n print('Migrando %s...' % model.__name__)\n\n legacy_model_name = self.model_renames.get(model, model.__name__)\n legacy_model = legacy_app.get_model(legacy_model_name)\n legacy_pk_name = legacy_model._meta.pk.name\n\n # Clear all model entries\n # They may have been created in a previous migration attempt\n try:\n model.objects.all().delete()\n except ProtectedError:\n Proposicao.objects.all().delete()\n model.objects.all().delete()\n delete_constraints(model)\n\n # setup migration strategy for tables with or without a pk\n if legacy_pk_name == 'id':\n # There is no pk in the legacy table\n def save(new, old):\n with reversion.create_revision():\n new.save()\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n old_records = iter_sql_records(\n 'select * from ' + legacy_model._meta.db_table, 'legacy')\n else:\n def save(new, old):\n with reversion.create_revision():\n save_with_id(new, getattr(old, legacy_pk_name))\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n\n old_records = legacy_model.objects.all().order_by(legacy_pk_name)\n\n ajuste_antes_salvar = AJUSTE_ANTES_SALVAR.get(model)\n ajuste_depois_salvar = AJUSTE_DEPOIS_SALVAR.get(model)\n\n # convert old records to new ones\n for old in old_records:\n new = model()\n self.populate_renamed_fields(new, old)\n if ajuste_antes_salvar:\n ajuste_antes_salvar(new, old)\n save(new, old)\n if ajuste_depois_salvar:\n ajuste_depois_salvar(new, old)\n if self.data_mudada:\n with reversion.create_revision():\n save_relation(**self.data_mudada)\n self.data_mudada.clear()\n reversion.set_comment('Ajuste de data pela migra\u00e7\u00e3o')\n if getattr(old, 'ind_excluido', False):\n self.to_delete.append(new)\n\n def delete_stubs(self):\n excluidos = 0\n for obj in ProblemaMigracao.objects.all():\n if obj.content_object and obj.eh_stub:\n original = obj.content_type.get_all_objects_for_this_type(\n id=obj.object_id)\n if stub_desnecessario(original[0]):\n qtd_exclusoes, *_ = original.delete()\n assert qtd_exclusoes == 1\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n elif not obj.content_object and not obj.eh_stub:\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n return excluidos\n\n\ndef migrate(obj=appconfs, interativo=True):\n dm = DataMigrator()\n dm.migrate(obj, interativo)\n\n\n# MIGRATION_ADJUSTMENTS #####################################################\n\ndef adjust_ordemdia(new, old):\n # Prestar aten\u00e7\u00e3o\n if not old.tip_votacao:\n new.tipo_votacao = 1\n\n\ndef adjust_parlamentar(new, old):\n if old.ind_unid_deliberativa:\n value = new.unidade_deliberativa\n # Field is defined as not null in legacy db,\n # but data includes null values\n # => transform None to False\n if value is None:\n warn('nulo convertido para falso')\n new.unidade_deliberativa = False\n\n\ndef adjust_participacao(new, old):\n composicao = Composicao()\n composicao.comissao, composicao.periodo = [\n get_fk_related(Composicao._meta.get_field(name), value)\n for name, value in (('comissao', old.cod_comissao),\n ('periodo', old.cod_periodo_comp))]\n # check if there is already an \"equal\" one in the db\n already_created = Composicao.objects.filter(\n comissao=composicao.comissao, periodo=composicao.periodo)\n if already_created:\n assert len(already_created) == 1 # we must never have made 2 copies\n [composicao] = already_created\n else:\n with reversion.create_revision():\n composicao.save()\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n new.composicao = composicao\n\n\ndef adjust_protocolo(new, old):\n if new.numero is None and not primeira_vez:\n p = ProtocoloLegado.objects.filter(\n ano_protocolo=new.ano).aggregate(Max('num_protocolo'))\n numero_maximo = p['num_protocolo__max']\n new.numero = 1 if numero_maximo is None else numero_maximo + 1\n primeira_vez.append(True)\n if new.numero is None and primeira_vez:\n p = Protocolo.objects.filter(\n ano=new.ano).aggregate(Max('numero'))\n new.numero = p['numero__max'] + 1\n\n\ndef adjust_sessaoplenaria(new, old):\n assert not old.tip_expediente\n\n\ndef adjust_tipoproposicao(new, old):\n if old.ind_mat_ou_doc == 'M':\n new.tipo_conteudo_related = TipoMateriaLegislativa.objects.get(\n pk=old.tip_mat_ou_doc)\n elif old.ind_mat_ou_doc == 'D':\n new.tipo_conteudo_related = TipoDocumento.objects.get(\n pk=old.tip_mat_ou_doc)\n\n\ndef adjust_statustramitacao(new, old):\n if old.ind_fim_tramitacao:\n new.indicador = 'F'\n elif old.ind_retorno_tramitacao:\n new.indicador = 'R'\n else:\n new.indicador = ''\n\n\ndef adjust_statustramitacaoadm(new, old):\n adjust_statustramitacao(new, old)\n\n\ndef adjust_tramitacao(new, old):\n if old.sgl_turno == '\u00da':\n new.turno = 'U'\n\n\ndef adjust_normajuridica_antes_salvar(new, old):\n # Ajusta choice de esfera_federacao\n # O 'S' vem de 'Selecionar'. Na vers\u00e3o antiga do SAPL, quando uma op\u00e7\u00e3o do\n # combobox era selecionada, o sistema pegava a primeira letra da sele\u00e7\u00e3o,\n # sendo F para Federal, E para Estadual, M para Municipal e o S para\n # Selecionar, que era a primeira op\u00e7\u00e3o quando nada era selecionado.\n if old.tip_esfera_federacao == 'S':\n new.esfera_federacao = ''\n\n\ndef adjust_normajuridica_depois_salvar(new, old):\n # Ajusta rela\u00e7\u00e3o M2M\n lista_pks_assunto = old.cod_assunto.split(',')\n\n # list(filter(..)) usado para retirar strings vazias da lista\n for pk_assunto in list(filter(None, lista_pks_assunto)):\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\n\n\ndef adjust_protocolo_depois_salvar(new, old):\n if old.num_protocolo is None:\n with reversion.create_revision():\n problema = 'N\u00famero do protocolo de PK %s \u00e9 nulo' % new.pk\n descricao = 'N\u00famero do protocolo alterado para %s!' % new.numero\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Numero de protocolo teve que ser alterado')\n\n\ndef adjust_autor(new, old):\n if old.cod_parlamentar:\n new.autor_related = Parlamentar.objects.get(pk=old.cod_parlamentar)\n new.nome = new.autor_related.nome_parlamentar\n elif old.cod_comissao:\n new.autor_related = Comissao.objects.get(pk=old.cod_comissao)\n\n if old.col_username:\n if not get_user_model().objects.filter(\n username=old.col_username).exists():\n user = get_user_model()(\n username=old.col_username, password=12345)\n with reversion.create_revision():\n user.save()\n reversion.set_comment('Objeto criado pela migra\u00e7\u00e3o')\n new.user = user\n else:\n new.user = get_user_model().objects.filter(\n username=old.col_username)[0]\n\n\ndef adjust_comissao(new, old):\n if old.dat_extincao:\n if date.today() < new.data_extincao:\n new.ativa = True\n else:\n new.ativa = False\n if not old.dat_extincao:\n new.ativa = True\n\n\nAJUSTE_ANTES_SALVAR = {\n Autor: adjust_autor,\n Comissao: adjust_comissao,\n NormaJuridica: adjust_normajuridica_antes_salvar,\n OrdemDia: adjust_ordemdia,\n Parlamentar: adjust_parlamentar,\n Participacao: adjust_participacao,\n Protocolo: adjust_protocolo,\n SessaoPlenaria: adjust_sessaoplenaria,\n TipoProposicao: adjust_tipoproposicao,\n StatusTramitacao: adjust_statustramitacao,\n StatusTramitacaoAdministrativo: adjust_statustramitacaoadm,\n Tramitacao: adjust_tramitacao,\n}\n\nAJUSTE_DEPOIS_SALVAR = {\n NormaJuridica: adjust_normajuridica_depois_salvar,\n Protocolo: adjust_protocolo_depois_salvar,\n}\n\n# CHECKS ####################################################################\n\n\ndef get_ind_excluido(obj):\n legacy_model = legacy_app.get_model(type(obj).__name__)\n return getattr(legacy_model.objects.get(\n **{legacy_model._meta.pk.name: obj.id}), 'ind_excluido', False)\n\n\ndef check_app_no_ind_excluido(app):\n for model in app.models.values():\n assert not any(get_ind_excluido(obj) for obj in model.objects.all())\n print('OK!')\n\n# MOMMY MAKE WITH LOG ######################################################\n\n\ndef make_with_log(model, _quantity=None, make_m2m=False, **attrs):\n last_value = get_last_value(model)\n alter_sequence(model, last_value + 1)\n fields_dict = get_fields_dict(model)\n stub = make(model, _quantity, make_m2m, **fields_dict)\n problema = 'Um stub foi necess\u00e1rio durante a cria\u00e7\u00e3o de um outro stub'\n descricao = 'Essa entrada \u00e9 necess\u00e1ria para um dos stubs criados'\n ' anteriormente'\n warn(problema)\n save_relation(obj=stub, problema=problema,\n descricao=descricao, eh_stub=True)\n return stub\n\nmake_with_log.required = foreign_key_required\n", "path": "sapl/legacy/migration.py"}]} |
gh_patches_debug_1489 | rasdani/github-patches | git_diff | uccser__cs-unplugged-862 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Only prepend www for production website
It should not be used for development website.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/config/settings/production.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Django settings for production environment.
4
5 - Load secret values from environment variables.
6 - Set static URL to Google Cloud Storage Bucket.
7 """
8
9 from .base import * # noqa: F403
10
11
12 # SECRET CONFIGURATION
13 # ------------------------------------------------------------------------------
14 # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
15 # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
16 SECRET_KEY = env("DJANGO_SECRET_KEY") # noqa: F405
17
18 # SECURITY WARNING: App Engine"s security features ensure that it is safe to
19 # have ALLOWED_HOSTS = ["*"] when the app is deployed. If you deploy a Django
20 # app not on App Engine, make sure to set an appropriate host here.
21 # See https://docs.djangoproject.com/en/1.10/ref/settings/
22 ALLOWED_HOSTS = ["*"]
23
24 # URL Configuration
25 # ------------------------------------------------------------------------------
26 PREPEND_WWW = True
27
28 # DATABASE CONFIGURATION
29 # ----------------------------------------------------------------------------
30 # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
31 DATABASES = {
32 "default": {
33 "ENGINE": "django.db.backends.postgresql",
34 "NAME": "csunplugged",
35 "USER": env("GOOGLE_CLOUD_SQL_DATABASE_USERNAME"), # noqa: F405
36 "PASSWORD": env("GOOGLE_CLOUD_SQL_DATABASE_PASSWORD"), # noqa: F405
37 "HOST": "/cloudsql/" + env("GOOGLE_CLOUD_SQL_CONNECTION_NAME"), # noqa: F405
38 }
39 }
40 DATABASES["default"]["ATOMIC_REQUESTS"] = True
41
42 # Static files
43 STATIC_URL = "https://storage.googleapis.com/" + env("GOOGLE_CLOUD_STORAGE_BUCKET_NAME") + "/static/" # noqa: F405
44
45 # SECURITY CONFIGURATION
46 # ------------------------------------------------------------------------------
47 # See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
48 # and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
49
50 # set this to 60 seconds and then to 518400 when you can prove it works
51 SECURE_HSTS_SECONDS = 60
52 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
53 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True) # noqa: F405
54 SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True) # noqa: F405
55 SECURE_BROWSER_XSS_FILTER = True
56 SESSION_COOKIE_SECURE = True
57 SESSION_COOKIE_HTTPONLY = True
58 SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True) # noqa: F405
59 CSRF_COOKIE_SECURE = True
60 CSRF_COOKIE_HTTPONLY = True
61 X_FRAME_OPTIONS = "DENY"
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/config/settings/production.py b/csunplugged/config/settings/production.py
--- a/csunplugged/config/settings/production.py
+++ b/csunplugged/config/settings/production.py
@@ -23,7 +23,10 @@
# URL Configuration
# ------------------------------------------------------------------------------
-PREPEND_WWW = True
+if env("DEPLOYMENT", default=None) == "prod": # noqa: F405
+ PREPEND_WWW = True
+else:
+ PREPEND_WWW = False
# DATABASE CONFIGURATION
# ----------------------------------------------------------------------------
| {"golden_diff": "diff --git a/csunplugged/config/settings/production.py b/csunplugged/config/settings/production.py\n--- a/csunplugged/config/settings/production.py\n+++ b/csunplugged/config/settings/production.py\n@@ -23,7 +23,10 @@\n \n # URL Configuration\n # ------------------------------------------------------------------------------\n-PREPEND_WWW = True\n+if env(\"DEPLOYMENT\", default=None) == \"prod\": # noqa: F405\n+ PREPEND_WWW = True\n+else:\n+ PREPEND_WWW = False\n \n # DATABASE CONFIGURATION\n # ----------------------------------------------------------------------------\n", "issue": "Only prepend www for production website\nIt should not be used for development website.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n- Set static URL to Google Cloud Storage Bucket.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# SECURITY WARNING: App Engine\"s security features ensure that it is safe to\n# have ALLOWED_HOSTS = [\"*\"] when the app is deployed. If you deploy a Django\n# app not on App Engine, make sure to set an appropriate host here.\n# See https://docs.djangoproject.com/en/1.10/ref/settings/\nALLOWED_HOSTS = [\"*\"]\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nPREPEND_WWW = True\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"HOST\": \"/cloudsql/\" + env(\"GOOGLE_CLOUD_SQL_CONNECTION_NAME\"), # noqa: F405\n }\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Static files\nSTATIC_URL = \"https://storage.googleapis.com/\" + env(\"GOOGLE_CLOUD_STORAGE_BUCKET_NAME\") + \"/static/\" # noqa: F405\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\n", "path": "csunplugged/config/settings/production.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n- Set static URL to Google Cloud Storage Bucket.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# SECURITY WARNING: App Engine\"s security features ensure that it is safe to\n# have ALLOWED_HOSTS = [\"*\"] when the app is deployed. If you deploy a Django\n# app not on App Engine, make sure to set an appropriate host here.\n# See https://docs.djangoproject.com/en/1.10/ref/settings/\nALLOWED_HOSTS = [\"*\"]\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nif env(\"DEPLOYMENT\", default=None) == \"prod\": # noqa: F405\n PREPEND_WWW = True\nelse:\n PREPEND_WWW = False\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"HOST\": \"/cloudsql/\" + env(\"GOOGLE_CLOUD_SQL_CONNECTION_NAME\"), # noqa: F405\n }\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Static files\nSTATIC_URL = \"https://storage.googleapis.com/\" + env(\"GOOGLE_CLOUD_STORAGE_BUCKET_NAME\") + \"/static/\" # noqa: F405\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\n", "path": "csunplugged/config/settings/production.py"}]} |
gh_patches_debug_1490 | rasdani/github-patches | git_diff | kivy__kivy-2526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inspector property list scrolling selects an underlying widget
While scrolling with the mouse wheel through the property list of a selected widget, a different widget (one underneath the property list) is selected, when reaching the top or the bottom of the list.
The same happens while trying to drag the view with the scrollbar.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/scrollview.py`
Content:
```
1 '''Scroll View
2 ===========
3
4 .. versionadded:: 1.0.4
5
6 The :class:`ScrollView` widget provides a scrollable/pannable viewport that is
7 clipped at the scrollview's bounding box.
8
9
10 Scrolling Behavior
11 ------------------
12
13 The ScrollView accepts only one child and applies a viewport/window to
14 it according to the :attr:`ScrollView.scroll_x` and
15 :attr:`ScrollView.scroll_y` properties. Touches are analyzed to
16 determine if the user wants to scroll or control the child in some
17 other manner - you cannot do both at the same time. To determine if
18 interaction is a scrolling gesture, these properties are used:
19
20 - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,
21 defaults to 20 pixels.
22 - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults
23 to 250 milliseconds.
24
25 If a touch travels :attr:`~ScrollView.scroll_distance` pixels within the
26 :attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling
27 gesture and translation (scroll/pan) will begin. If the timeout occurs, the
28 touch down event is dispatched to the child instead (no translation).
29
30 The default value for those settings can be changed in the configuration file::
31
32 [widgets]
33 scroll_timeout = 250
34 scroll_distance = 20
35
36 .. versionadded:: 1.1.1
37
38 ScrollView now animates scrolling in Y when a mousewheel is used.
39
40
41 Limiting to the X or Y Axis
42 ---------------------------
43
44 By default, the ScrollView allows scrolling in both the X and Y axes. You can
45 explicitly disable scrolling on an axis by setting
46 :attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.
47
48
49 Managing the Content Size and Position
50 --------------------------------------
51
52 ScrollView manages the position of its children similarly to a
53 RelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must
54 carefully specify the `size_hint` of your content to get the desired
55 scroll/pan effect.
56
57 By default, size_hint is (1, 1), so the content size will fit your ScrollView
58 exactly (you will have nothing to scroll). You must deactivate at least one of
59 the size_hint instructions (x or y) of the child to enable scrolling.
60
61 To scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width
62 identical to that of the ScrollView (size_hint_x=1, default), and set the
63 size_hint_y property to None::
64
65 layout = GridLayout(cols=1, spacing=10, size_hint_y=None)
66 # Make sure the height is such that there is something to scroll.
67 layout.bind(minimum_height=layout.setter('height'))
68 for i in range(30):
69 btn = Button(text=str(i), size_hint_y=None, height=40)
70 layout.add_widget(btn)
71 root = ScrollView(size_hint=(None, None), size=(400, 400))
72 root.add_widget(layout)
73
74
75 Overscroll Effects
76 ------------------
77
78 .. versionadded:: 1.7.0
79
80 When scrolling would exceed the bounds of the :class:`ScrollView`, it
81 uses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the
82 overscroll. These effects can perform actions like bouncing back,
83 changing opacity, or simply preventing scrolling beyond the normal
84 boundaries. Note that complex effects may perform many computations,
85 which can be slow on weaker hardware.
86
87 You can change what effect is being used by setting
88 :attr:`ScrollView.effect_cls` to any effect class. Current options
89 include:
90
91 - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow
92 scrolling beyond the :class:`ScrollView` boundaries.
93 - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The
94 current default. Allows the user to scroll beyond the normal
95 boundaries, but has the content spring back once the
96 touch/click is released.
97 - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar
98 to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but
99 also reduces opacity during overscroll.
100
101 You can also create your own scroll effect by subclassing one of these,
102 then pass it as the :attr:`~ScrollView.effect_cls` in the same way.
103
104 Alternatively, you can set :attr:`ScrollView.effect_x` and/or
105 :attr:`ScrollView.effect_y` to an *instance* of the effect you want to
106 use. This will override the default effect set in
107 :attr:`ScrollView.effect_cls`.
108
109 All the effects are located in the :mod:`kivy.effects`.
110
111 '''
112
113 __all__ = ('ScrollView', )
114
115 from functools import partial
116 from kivy.animation import Animation
117 from kivy.compat import string_types
118 from kivy.config import Config
119 from kivy.clock import Clock
120 from kivy.factory import Factory
121 from kivy.uix.stencilview import StencilView
122 from kivy.metrics import sp
123 from kivy.effects.dampedscroll import DampedScrollEffect
124 from kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \
125 ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty
126
127
128 # When we are generating documentation, Config doesn't exist
129 _scroll_timeout = _scroll_distance = 0
130 if Config:
131 _scroll_timeout = Config.getint('widgets', 'scroll_timeout')
132 _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))
133
134
135 class ScrollView(StencilView):
136 '''ScrollView class. See module documentation for more information.
137
138 .. versionchanged:: 1.7.0
139 `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has
140 been deprecated, use :attr:`effect_cls` instead.
141 '''
142
143 scroll_distance = NumericProperty(_scroll_distance)
144 '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As
145 soon as the distance has been traveled, the :class:`ScrollView` will start
146 to scroll, and no touch event will go to children.
147 It is advisable that you base this value on the dpi of your target device's
148 screen.
149
150 :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and
151 defaults to 20 (pixels), according to the default value in user
152 configuration.
153 '''
154
155 scroll_wheel_distance = NumericProperty(20)
156 '''Distance to move when scrolling with a mouse wheel.
157 It is advisable that you base this value on the dpi of your target device's
158 screen.
159
160 .. versionadded:: 1.8.0
161
162 :attr:`scroll_wheel_distance` is a
163 :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.
164 '''
165
166 scroll_timeout = NumericProperty(_scroll_timeout)
167 '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.
168 If the user has not moved :attr:`scroll_distance` within the timeout,
169 the scrolling will be disabled, and the touch event will go to the
170 children.
171
172 :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and
173 defaults to 55 (milliseconds) according to the default value in user
174 configuration.
175
176 .. versionchanged:: 1.5.0
177 Default value changed from 250 to 55.
178 '''
179
180 scroll_x = NumericProperty(0.)
181 '''X scrolling value, between 0 and 1. If 0, the content's left side will
182 touch the left side of the ScrollView. If 1, the content's right side will
183 touch the right side.
184
185 This property is controled by :class:`ScrollView` only if
186 :attr:`do_scroll_x` is True.
187
188 :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and
189 defaults to 0.
190 '''
191
192 scroll_y = NumericProperty(1.)
193 '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will
194 touch the bottom side of the ScrollView. If 1, the content's top side will
195 touch the top side.
196
197 This property is controled by :class:`ScrollView` only if
198 :attr:`do_scroll_y` is True.
199
200 :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and
201 defaults to 1.
202 '''
203
204 do_scroll_x = BooleanProperty(True)
205 '''Allow scroll on X axis.
206
207 :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and
208 defaults to True.
209 '''
210
211 do_scroll_y = BooleanProperty(True)
212 '''Allow scroll on Y axis.
213
214 :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and
215 defaults to True.
216 '''
217
218 def _get_do_scroll(self):
219 return (self.do_scroll_x, self.do_scroll_y)
220
221 def _set_do_scroll(self, value):
222 if type(value) in (list, tuple):
223 self.do_scroll_x, self.do_scroll_y = value
224 else:
225 self.do_scroll_x = self.do_scroll_y = bool(value)
226 do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,
227 bind=('do_scroll_x', 'do_scroll_y'))
228 '''Allow scroll on X or Y axis.
229
230 :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of
231 (:attr:`do_scroll_x` + :attr:`do_scroll_y`)
232 '''
233
234 def _get_vbar(self):
235 # must return (y, height) in %
236 # calculate the viewport size / scrollview size %
237 if self._viewport is None:
238 return 0, 1.
239 vh = self._viewport.height
240 h = self.height
241 if vh < h or vh == 0:
242 return 0, 1.
243 ph = max(0.01, h / float(vh))
244 sy = min(1.0, max(0.0, self.scroll_y))
245 py = (1. - ph) * sy
246 return (py, ph)
247
248 vbar = AliasProperty(_get_vbar, None, bind=(
249 'scroll_y', '_viewport', 'viewport_size'))
250 '''Return a tuple of (position, size) of the vertical scrolling bar.
251
252 .. versionadded:: 1.2.0
253
254 The position and size are normalized between 0-1, and represent a
255 percentage of the current scrollview height. This property is used
256 internally for drawing the little vertical bar when you're scrolling.
257
258 :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.
259 '''
260
261 def _get_hbar(self):
262 # must return (x, width) in %
263 # calculate the viewport size / scrollview size %
264 if self._viewport is None:
265 return 0, 1.
266 vw = self._viewport.width
267 w = self.width
268 if vw < w or vw == 0:
269 return 0, 1.
270 pw = max(0.01, w / float(vw))
271 sx = min(1.0, max(0.0, self.scroll_x))
272 px = (1. - pw) * sx
273 return (px, pw)
274
275 hbar = AliasProperty(_get_hbar, None, bind=(
276 'scroll_x', '_viewport', 'viewport_size'))
277 '''Return a tuple of (position, size) of the horizontal scrolling bar.
278
279 .. versionadded:: 1.2.0
280
281 The position and size are normalized between 0-1, and represent a
282 percentage of the current scrollview height. This property is used
283 internally for drawing the little horizontal bar when you're scrolling.
284
285 :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.
286 '''
287
288 bar_color = ListProperty([.7, .7, .7, .9])
289 '''Color of horizontal / vertical scroll bar, in RGBA format.
290
291 .. versionadded:: 1.2.0
292
293 :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults
294 to [.7, .7, .7, .9].
295 '''
296
297 bar_inactive_color = ListProperty([.7, .7, .7, .2])
298 '''Color of horizontal / vertical scroll bar (in RGBA format), when no
299 scroll is happening.
300
301 .. versionadded:: 1.9.0
302
303 :attr:`bar_inactive_color` is a
304 :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].
305 '''
306
307 bar_width = NumericProperty('2dp')
308 '''Width of the horizontal / vertical scroll bar. The width is interpreted
309 as a height for the horizontal bar.
310
311 .. versionadded:: 1.2.0
312
313 :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and
314 defaults to 2.
315 '''
316
317 bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))
318 '''Which side of the ScrollView the horizontal scroll bar should go
319 on. Possible values are 'top' and 'bottom'.
320
321 .. versionadded:: 1.8.0
322
323 :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,
324 default to 'bottom'
325
326 '''
327
328 bar_pos_y = OptionProperty('right', options=('left', 'right'))
329 '''Which side of the ScrollView the vertical scroll bar should go
330 on. Possible values are 'left' and 'right'.
331
332 .. versionadded:: 1.8.0
333
334 :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,
335 default to 'right'
336
337 '''
338
339 bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)
340 '''Which side of the scroll view to place each of the bars on.
341
342 :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of
343 (:attr:`bar_pos_x`, :attr:`bar_pos_y`)
344 '''
345
346 bar_margin = NumericProperty(0)
347 '''Margin between the bottom / right side of the scrollview when drawing
348 the horizontal / vertical scroll bar.
349
350 .. versionadded:: 1.2.0
351
352 :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default
353 to 0
354 '''
355
356 effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)
357 '''Class effect to instanciate for X and Y axis.
358
359 .. versionadded:: 1.7.0
360
361 :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and
362 defaults to :class:`DampedScrollEffect`.
363
364 .. versionchanged:: 1.8.0
365 If you set a string, the :class:`~kivy.factory.Factory` will be used to
366 resolve the class.
367
368 '''
369
370 effect_x = ObjectProperty(None, allownone=True)
371 '''Effect to apply for the X axis. If None is set, an instance of
372 :attr:`effect_cls` will be created.
373
374 .. versionadded:: 1.7.0
375
376 :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and
377 defaults to None.
378 '''
379
380 effect_y = ObjectProperty(None, allownone=True)
381 '''Effect to apply for the Y axis. If None is set, an instance of
382 :attr:`effect_cls` will be created.
383
384 .. versionadded:: 1.7.0
385
386 :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and
387 defaults to None, read-only.
388 '''
389
390 viewport_size = ListProperty([0, 0])
391 '''(internal) Size of the internal viewport. This is the size of your only
392 child in the scrollview.
393 '''
394
395 scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],
396 ['bars', 'content'], ['content', 'bars']))
397 '''Sets the type of scrolling to use for the content of the scrollview.
398 Available options are: ['content'], ['bars'], ['bars', 'content'].
399
400 .. versionadded:: 1.8.0
401
402 :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults
403 to ['content'].
404 '''
405
406 # private, for internal use only
407
408 _viewport = ObjectProperty(None, allownone=True)
409 _bar_color = ListProperty([0, 0, 0, 0])
410
411 def _set_viewport_size(self, instance, value):
412 self.viewport_size = value
413
414 def on__viewport(self, instance, value):
415 if value:
416 value.bind(size=self._set_viewport_size)
417 self.viewport_size = value.size
418
419 def __init__(self, **kwargs):
420 self._touch = None
421 self._trigger_update_from_scroll = Clock.create_trigger(
422 self.update_from_scroll, -1)
423 # create a specific canvas for the viewport
424 from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas
425 self.canvas_viewport = Canvas()
426 self.canvas = Canvas()
427 with self.canvas_viewport.before:
428 PushMatrix()
429 self.g_translate = Translate(0, 0)
430 with self.canvas_viewport.after:
431 PopMatrix()
432
433 super(ScrollView, self).__init__(**kwargs)
434
435 self.register_event_type('on_scroll_start')
436 self.register_event_type('on_scroll_move')
437 self.register_event_type('on_scroll_stop')
438
439 # now add the viewport canvas to our canvas
440 self.canvas.add(self.canvas_viewport)
441
442 effect_cls = self.effect_cls
443 if isinstance(effect_cls, string_types):
444 effect_cls = Factory.get(effect_cls)
445 if self.effect_x is None and effect_cls is not None:
446 self.effect_x = effect_cls(target_widget=self._viewport)
447 if self.effect_y is None and effect_cls is not None:
448 self.effect_y = effect_cls(target_widget=self._viewport)
449 self.bind(
450 width=self._update_effect_x_bounds,
451 height=self._update_effect_y_bounds,
452 viewport_size=self._update_effect_bounds,
453 _viewport=self._update_effect_widget,
454 scroll_x=self._trigger_update_from_scroll,
455 scroll_y=self._trigger_update_from_scroll,
456 pos=self._trigger_update_from_scroll,
457 size=self._trigger_update_from_scroll)
458
459 self._update_effect_widget()
460 self._update_effect_x_bounds()
461 self._update_effect_y_bounds()
462
463 def on_effect_x(self, instance, value):
464 if value:
465 value.bind(scroll=self._update_effect_x)
466 value.target_widget = self._viewport
467
468 def on_effect_y(self, instance, value):
469 if value:
470 value.bind(scroll=self._update_effect_y)
471 value.target_widget = self._viewport
472
473 def on_effect_cls(self, instance, cls):
474 if isinstance(cls, string_types):
475 cls = Factory.get(cls)
476 self.effect_x = cls(target_widget=self._viewport)
477 self.effect_x.bind(scroll=self._update_effect_x)
478 self.effect_y = cls(target_widget=self._viewport)
479 self.effect_y.bind(scroll=self._update_effect_y)
480
481 def _update_effect_widget(self, *args):
482 if self.effect_x:
483 self.effect_x.target_widget = self._viewport
484 if self.effect_y:
485 self.effect_y.target_widget = self._viewport
486
487 def _update_effect_x_bounds(self, *args):
488 if not self._viewport or not self.effect_x:
489 return
490 self.effect_x.min = -(self.viewport_size[0] - self.width)
491 self.effect_x.max = 0
492 self.effect_x.value = self.effect_x.min * self.scroll_x
493
494 def _update_effect_y_bounds(self, *args):
495 if not self._viewport or not self.effect_y:
496 return
497 self.effect_y.min = -(self.viewport_size[1] - self.height)
498 self.effect_y.max = 0
499 self.effect_y.value = self.effect_y.min * self.scroll_y
500
501 def _update_effect_bounds(self, *args):
502 if not self._viewport:
503 return
504 if self.effect_x:
505 self._update_effect_x_bounds()
506 if self.effect_y:
507 self._update_effect_y_bounds()
508
509 def _update_effect_x(self, *args):
510 vp = self._viewport
511 if not vp or not self.effect_x:
512 return
513 sw = vp.width - self.width
514 if sw < 1:
515 return
516 sx = self.effect_x.scroll / float(sw)
517 self.scroll_x = -sx
518 self._trigger_update_from_scroll()
519
520 def _update_effect_y(self, *args):
521 vp = self._viewport
522 if not vp or not self.effect_y:
523 return
524 sh = vp.height - self.height
525 if sh < 1:
526 return
527 sy = self.effect_y.scroll / float(sh)
528 self.scroll_y = -sy
529 self._trigger_update_from_scroll()
530
531 def to_local(self, x, y, **k):
532 tx, ty = self.g_translate.xy
533 return x - tx, y - ty
534
535 def to_parent(self, x, y, **k):
536 tx, ty = self.g_translate.xy
537 return x + tx, y + ty
538
539 def simulate_touch_down(self, touch):
540 # at this point the touch is in parent coords
541 touch.push()
542 touch.apply_transform_2d(self.to_local)
543 ret = super(ScrollView, self).on_touch_down(touch)
544 touch.pop()
545 return ret
546
547 def on_touch_down(self, touch):
548 if self.dispatch('on_scroll_start', touch):
549 self._touch = touch
550 touch.grab(self)
551 return True
552
553 def on_scroll_start(self, touch, check_children=True):
554 if check_children:
555 touch.push()
556 touch.apply_transform_2d(self.to_local)
557 if self.dispatch_children('on_scroll_start', touch):
558 return True
559 touch.pop()
560
561 if not self.collide_point(*touch.pos):
562 touch.ud[self._get_uid('svavoid')] = True
563 return
564 if self.disabled:
565 return True
566 if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):
567 return self.simulate_touch_down(touch)
568
569 # handle mouse scrolling, only if the viewport size is bigger than the
570 # scrollview size, and if the user allowed to do it
571 vp = self._viewport
572 if not vp:
573 return True
574 scroll_type = self.scroll_type
575 ud = touch.ud
576 scroll_bar = 'bars' in scroll_type
577
578 # check if touch is in bar_x(horizontal) or bay_y(bertical)
579 ud['in_bar_x'] = ud['in_bar_y'] = False
580 width_scrollable = vp.width > self.width
581 height_scrollable = vp.height > self.height
582 bar_pos_x = self.bar_pos_x[0]
583 bar_pos_y = self.bar_pos_y[0]
584
585 d = {'b': True if touch.y < self.y + self.bar_width else False,
586 't': True if touch.y > self.top - self.bar_width else False,
587 'l': True if touch.x < self.x + self.bar_width else False,
588 'r': True if touch.x > self.right - self.bar_width else False}
589 if scroll_bar:
590 if (width_scrollable and d[bar_pos_x]):
591 ud['in_bar_x'] = True
592 if (height_scrollable and d[bar_pos_y]):
593 ud['in_bar_y'] = True
594
595 if vp and 'button' in touch.profile and \
596 touch.button.startswith('scroll'):
597 btn = touch.button
598 m = sp(self.scroll_wheel_distance)
599 e = None
600
601 if ((btn == 'scrolldown' and self.scroll_y >= 1) or
602 (btn == 'scrollup' and self.scroll_y <= 0) or
603 (btn == 'scrollleft' and self.scroll_x <= 0) or
604 (btn == 'scrollright' and self.scroll_x >= 1)):
605 return False
606
607 if (self.effect_x and self.do_scroll_y and height_scrollable
608 and btn in ('scrolldown', 'scrollup')):
609 e = self.effect_x if ud['in_bar_x'] else self.effect_y
610
611 elif (self.effect_y and self.do_scroll_x and width_scrollable
612 and btn in ('scrollleft', 'scrollright')):
613 e = self.effect_y if ud['in_bar_y'] else self.effect_x
614
615 if e:
616 if btn in ('scrolldown', 'scrollleft'):
617 e.value = max(e.value - m, e.min)
618 e.velocity = 0
619 elif btn in ('scrollup', 'scrollright'):
620 e.value = min(e.value + m, e.max)
621 e.velocity = 0
622 touch.ud[self._get_uid('svavoid')] = True
623 e.trigger_velocity_update()
624 return True
625
626 # no mouse scrolling, so the user is going to drag the scrollview with
627 # this touch.
628 self._touch = touch
629 uid = self._get_uid()
630
631 ud[uid] = {
632 'mode': 'unknown',
633 'dx': 0,
634 'dy': 0,
635 'user_stopped': False,
636 'frames': Clock.frames,
637 'time': touch.time_start}
638
639 if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:
640 self.effect_x.start(touch.x)
641 self._scroll_x_mouse = self.scroll_x
642 if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:
643 self.effect_y.start(touch.y)
644 self._scroll_y_mouse = self.scroll_y
645
646 if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):
647 return
648 if scroll_type == ['bars']:
649 # touch is in parent, but _change_touch_mode expects window coords
650 touch.push()
651 touch.apply_transform_2d(self.to_local)
652 touch.apply_transform_2d(self.to_window)
653 self._change_touch_mode()
654 touch.pop()
655 return False
656 else:
657 Clock.schedule_once(self._change_touch_mode,
658 self.scroll_timeout / 1000.)
659 return True
660
661 def on_touch_move(self, touch):
662 if self._touch is not touch:
663 # touch is in parent
664 touch.push()
665 touch.apply_transform_2d(self.to_local)
666 super(ScrollView, self).on_touch_move(touch)
667 touch.pop()
668 return self._get_uid() in touch.ud
669 if touch.grab_current is not self:
670 return True
671
672 touch.ud['sv.handled'] = {'x': False, 'y': False}
673 if self.dispatch('on_scroll_move', touch):
674 return True
675
676 def on_scroll_move(self, touch):
677 if self._get_uid('svavoid') in touch.ud:
678 return False
679
680 touch.push()
681 touch.apply_transform_2d(self.to_local)
682 if self.dispatch_children('on_scroll_move', touch):
683 return True
684 touch.pop()
685
686 rv = True
687
688 uid = self._get_uid()
689 if not uid in touch.ud:
690 self._touch = False
691 return self.on_scroll_start(touch, False)
692 ud = touch.ud[uid]
693 mode = ud['mode']
694
695 # check if the minimum distance has been travelled
696 if mode == 'unknown' or mode == 'scroll':
697 if not touch.ud['sv.handled']['x'] and self.do_scroll_x \
698 and self.effect_x:
699 width = self.width
700 if touch.ud.get('in_bar_x', False):
701 dx = touch.dx / float(width - width * self.hbar[1])
702 self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)
703 self._trigger_update_from_scroll()
704 else:
705 if self.scroll_type != ['bars']:
706 self.effect_x.update(touch.x)
707 if self.scroll_x < 0 or self.scroll_x > 1:
708 rv = False
709 else:
710 touch.ud['sv.handled']['x'] = True
711 if not touch.ud['sv.handled']['y'] and self.do_scroll_y \
712 and self.effect_y:
713 height = self.height
714 if touch.ud.get('in_bar_y', False):
715 dy = touch.dy / float(height - height * self.vbar[1])
716 self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)
717 self._trigger_update_from_scroll()
718 else:
719 if self.scroll_type != ['bars']:
720 self.effect_y.update(touch.y)
721 if self.scroll_y < 0 or self.scroll_y > 1:
722 rv = False
723 else:
724 touch.ud['sv.handled']['y'] = True
725
726 if mode == 'unknown':
727 ud['dx'] += abs(touch.dx)
728 ud['dy'] += abs(touch.dy)
729 if ud['dx'] > self.scroll_distance:
730 if not self.do_scroll_x:
731 # touch is in parent, but _change expects window coords
732 touch.push()
733 touch.apply_transform_2d(self.to_local)
734 touch.apply_transform_2d(self.to_window)
735 self._change_touch_mode()
736 touch.pop()
737 return
738 mode = 'scroll'
739
740 if ud['dy'] > self.scroll_distance:
741 if not self.do_scroll_y:
742 # touch is in parent, but _change expects window coords
743 touch.push()
744 touch.apply_transform_2d(self.to_local)
745 touch.apply_transform_2d(self.to_window)
746 self._change_touch_mode()
747 touch.pop()
748 return
749 mode = 'scroll'
750 ud['mode'] = mode
751
752 if mode == 'scroll':
753 ud['dt'] = touch.time_update - ud['time']
754 ud['time'] = touch.time_update
755 ud['user_stopped'] = True
756
757 return rv
758
759 def on_touch_up(self, touch):
760 if self._touch is not touch and self.uid not in touch.ud:
761 # touch is in parents
762 touch.push()
763 touch.apply_transform_2d(self.to_local)
764 if super(ScrollView, self).on_touch_up(touch):
765 return True
766 touch.pop()
767 return False
768
769 if self.dispatch('on_scroll_stop', touch):
770 touch.ungrab(self)
771 return True
772
773 def on_scroll_stop(self, touch, check_children=True):
774 self._touch = None
775
776 if check_children:
777 touch.push()
778 touch.apply_transform_2d(self.to_local)
779 if self.dispatch_children('on_scroll_stop', touch):
780 return True
781 touch.pop()
782
783 if self._get_uid('svavoid') in touch.ud:
784 return
785 if self._get_uid() not in touch.ud:
786 return False
787
788 self._touch = None
789 uid = self._get_uid()
790 ud = touch.ud[uid]
791 if self.do_scroll_x and self.effect_x:
792 if not touch.ud.get('in_bar_x', False) and\
793 self.scroll_type != ['bars']:
794 self.effect_x.stop(touch.x)
795 if self.do_scroll_y and self.effect_y and\
796 self.scroll_type != ['bars']:
797 if not touch.ud.get('in_bar_y', False):
798 self.effect_y.stop(touch.y)
799 if ud['mode'] == 'unknown':
800 # we must do the click at least..
801 # only send the click if it was not a click to stop
802 # autoscrolling
803 if not ud['user_stopped']:
804 self.simulate_touch_down(touch)
805 Clock.schedule_once(partial(self._do_touch_up, touch), .2)
806 Clock.unschedule(self._update_effect_bounds)
807 Clock.schedule_once(self._update_effect_bounds)
808
809 # if we do mouse scrolling, always accept it
810 if 'button' in touch.profile and touch.button.startswith('scroll'):
811 return True
812
813 return self._get_uid() in touch.ud
814
815 def convert_distance_to_scroll(self, dx, dy):
816 '''Convert a distance in pixels to a scroll distance, depending on the
817 content size and the scrollview size.
818
819 The result will be a tuple of scroll distance that can be added to
820 :data:`scroll_x` and :data:`scroll_y`
821 '''
822 if not self._viewport:
823 return 0, 0
824 vp = self._viewport
825 if vp.width > self.width:
826 sw = vp.width - self.width
827 sx = dx / float(sw)
828 else:
829 sx = 0
830 if vp.height > self.height:
831 sh = vp.height - self.height
832 sy = dy / float(sh)
833 else:
834 sy = 1
835 return sx, sy
836
837 def update_from_scroll(self, *largs):
838 '''Force the reposition of the content, according to current value of
839 :attr:`scroll_x` and :attr:`scroll_y`.
840
841 This method is automatically called when one of the :attr:`scroll_x`,
842 :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or
843 if the size of the content changes.
844 '''
845 if not self._viewport:
846 return
847 vp = self._viewport
848
849 # update from size_hint
850 if vp.size_hint_x is not None:
851 vp.width = vp.size_hint_x * self.width
852 if vp.size_hint_y is not None:
853 vp.height = vp.size_hint_y * self.height
854
855 if vp.width > self.width:
856 sw = vp.width - self.width
857 x = self.x - self.scroll_x * sw
858 else:
859 x = self.x
860 if vp.height > self.height:
861 sh = vp.height - self.height
862 y = self.y - self.scroll_y * sh
863 else:
864 y = self.top - vp.height
865
866 # from 1.8.0, we now use a matrix by default, instead of moving the
867 # widget position behind. We set it here, but it will be a no-op most of
868 # the time.
869 vp.pos = 0, 0
870 self.g_translate.xy = x, y
871
872 # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)
873 # fade to bar_inactive_color when no scroll is happening.
874 Clock.unschedule(self._bind_inactive_bar_color)
875 self.unbind(bar_inactive_color=self._change_bar_color)
876 Animation.stop_all(self, '_bar_color')
877 self.bind(bar_color=self._change_bar_color)
878 self._bar_color = self.bar_color
879 Clock.schedule_once(self._bind_inactive_bar_color, .5)
880
881 def _bind_inactive_bar_color(self, *l):
882 self.unbind(bar_color=self._change_bar_color)
883 self.bind(bar_inactive_color=self._change_bar_color)
884 Animation(
885 _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)
886
887 def _change_bar_color(self, inst, value):
888 self._bar_color = value
889
890 #
891 # Private
892 #
893 def add_widget(self, widget, index=0):
894 if self._viewport:
895 raise Exception('ScrollView accept only one widget')
896 canvas = self.canvas
897 self.canvas = self.canvas_viewport
898 super(ScrollView, self).add_widget(widget, index)
899 self.canvas = canvas
900 self._viewport = widget
901 widget.bind(size=self._trigger_update_from_scroll)
902 self._trigger_update_from_scroll()
903
904 def remove_widget(self, widget):
905 canvas = self.canvas
906 self.canvas = self.canvas_viewport
907 super(ScrollView, self).remove_widget(widget)
908 self.canvas = canvas
909 if widget is self._viewport:
910 self._viewport = None
911
912 def _get_uid(self, prefix='sv'):
913 return '{0}.{1}'.format(prefix, self.uid)
914
915 def _change_touch_mode(self, *largs):
916 if not self._touch:
917 return
918 uid = self._get_uid()
919 touch = self._touch
920 ud = touch.ud[uid]
921 if ud['mode'] != 'unknown' or ud['user_stopped']:
922 return
923 diff_frames = Clock.frames - ud['frames']
924
925 # in order to be able to scroll on very slow devices, let at least 3
926 # frames displayed to accumulate some velocity. And then, change the
927 # touch mode. Otherwise, we might never be able to compute velocity, and
928 # no way to scroll it. See #1464 and #1499
929 if diff_frames < 3:
930 Clock.schedule_once(self._change_touch_mode, 0)
931 return
932
933 if self.do_scroll_x and self.effect_x:
934 self.effect_x.cancel()
935 if self.do_scroll_y and self.effect_y:
936 self.effect_y.cancel()
937 # XXX the next line was in the condition. But this stop
938 # the possibily to "drag" an object out of the scrollview in the
939 # non-used direction: if you have an horizontal scrollview, a
940 # vertical gesture will not "stop" the scroll view to look for an
941 # horizontal gesture, until the timeout is done.
942 # and touch.dx + touch.dy == 0:
943 touch.ungrab(self)
944 self._touch = None
945 # touch is in window coords
946 touch.push()
947 touch.apply_transform_2d(self.to_widget)
948 touch.apply_transform_2d(self.to_parent)
949 self.simulate_touch_down(touch)
950 touch.pop()
951 return
952
953 def _do_touch_up(self, touch, *largs):
954 # touch is in window coords
955 touch.push()
956 touch.apply_transform_2d(self.to_widget)
957 super(ScrollView, self).on_touch_up(touch)
958 touch.pop()
959 # don't forget about grab event!
960 for x in touch.grab_list[:]:
961 touch.grab_list.remove(x)
962 x = x()
963 if not x:
964 continue
965 touch.grab_current = x
966 # touch is in window coords
967 touch.push()
968 touch.apply_transform_2d(self.to_widget)
969 super(ScrollView, self).on_touch_up(touch)
970 touch.pop()
971 touch.grab_current = None
972
973
974 if __name__ == '__main__':
975 from kivy.app import App
976
977 from kivy.uix.gridlayout import GridLayout
978 from kivy.uix.button import Button
979
980 class ScrollViewApp(App):
981
982 def build(self):
983 layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))
984 layout1.bind(minimum_height=layout1.setter('height'),
985 minimum_width=layout1.setter('width'))
986 for i in range(40):
987 btn = Button(text=str(i), size_hint=(None, None),
988 size=(200, 100))
989 layout1.add_widget(btn)
990 scrollview1 = ScrollView(bar_width='2dp')
991 scrollview1.add_widget(layout1)
992
993 layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))
994 layout2.bind(minimum_height=layout2.setter('height'),
995 minimum_width=layout2.setter('width'))
996 for i in range(40):
997 btn = Button(text=str(i), size_hint=(None, None),
998 size=(200, 100))
999 layout2.add_widget(btn)
1000 scrollview2 = ScrollView(scroll_type=['bars'],
1001 bar_width='9dp',
1002 scroll_wheel_distance=100)
1003 scrollview2.add_widget(layout2)
1004
1005 root = GridLayout(cols=2)
1006 root.add_widget(scrollview1)
1007 root.add_widget(scrollview2)
1008 return root
1009
1010 ScrollViewApp().run()
1011
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/uix/scrollview.py b/kivy/uix/scrollview.py
--- a/kivy/uix/scrollview.py
+++ b/kivy/uix/scrollview.py
@@ -644,7 +644,7 @@
self._scroll_y_mouse = self.scroll_y
if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):
- return
+ return True
if scroll_type == ['bars']:
# touch is in parent, but _change_touch_mode expects window coords
touch.push()
| {"golden_diff": "diff --git a/kivy/uix/scrollview.py b/kivy/uix/scrollview.py\n--- a/kivy/uix/scrollview.py\n+++ b/kivy/uix/scrollview.py\n@@ -644,7 +644,7 @@\n self._scroll_y_mouse = self.scroll_y\n \n if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):\n- return\n+ return True\n if scroll_type == ['bars']:\n # touch is in parent, but _change_touch_mode expects window coords\n touch.push()\n", "issue": "Inspector property list scrolling selects an underlying widget\nWhile scrolling with the mouse wheel through the property list of a selected widget, a different widget (one underneath the property list) is selected, when reaching the top or the bottom of the list.\nThe same happens while trying to drag the view with the scrollbar.\n\n", "before_files": [{"content": "'''Scroll View\n===========\n\n.. versionadded:: 1.0.4\n\nThe :class:`ScrollView` widget provides a scrollable/pannable viewport that is\nclipped at the scrollview's bounding box.\n\n\nScrolling Behavior\n------------------\n\nThe ScrollView accepts only one child and applies a viewport/window to\nit according to the :attr:`ScrollView.scroll_x` and\n:attr:`ScrollView.scroll_y` properties. Touches are analyzed to\ndetermine if the user wants to scroll or control the child in some\nother manner - you cannot do both at the same time. To determine if\ninteraction is a scrolling gesture, these properties are used:\n\n - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,\n defaults to 20 pixels.\n - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults\n to 250 milliseconds.\n\nIf a touch travels :attr:`~ScrollView.scroll_distance` pixels within the\n:attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling\ngesture and translation (scroll/pan) will begin. If the timeout occurs, the\ntouch down event is dispatched to the child instead (no translation).\n\nThe default value for those settings can be changed in the configuration file::\n\n [widgets]\n scroll_timeout = 250\n scroll_distance = 20\n\n.. versionadded:: 1.1.1\n\n ScrollView now animates scrolling in Y when a mousewheel is used.\n\n\nLimiting to the X or Y Axis\n---------------------------\n\nBy default, the ScrollView allows scrolling in both the X and Y axes. You can\nexplicitly disable scrolling on an axis by setting\n:attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.\n\n\nManaging the Content Size and Position\n--------------------------------------\n\nScrollView manages the position of its children similarly to a\nRelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must\ncarefully specify the `size_hint` of your content to get the desired\nscroll/pan effect.\n\nBy default, size_hint is (1, 1), so the content size will fit your ScrollView\nexactly (you will have nothing to scroll). You must deactivate at least one of\nthe size_hint instructions (x or y) of the child to enable scrolling.\n\nTo scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width\nidentical to that of the ScrollView (size_hint_x=1, default), and set the\nsize_hint_y property to None::\n\n layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n # Make sure the height is such that there is something to scroll.\n layout.bind(minimum_height=layout.setter('height'))\n for i in range(30):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n layout.add_widget(btn)\n root = ScrollView(size_hint=(None, None), size=(400, 400))\n root.add_widget(layout)\n\n\nOverscroll Effects\n------------------\n\n.. versionadded:: 1.7.0\n\nWhen scrolling would exceed the bounds of the :class:`ScrollView`, it\nuses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the\noverscroll. These effects can perform actions like bouncing back,\nchanging opacity, or simply preventing scrolling beyond the normal\nboundaries. Note that complex effects may perform many computations,\nwhich can be slow on weaker hardware.\n\nYou can change what effect is being used by setting\n:attr:`ScrollView.effect_cls` to any effect class. Current options\ninclude:\n\n - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow\n scrolling beyond the :class:`ScrollView` boundaries.\n - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The\n current default. Allows the user to scroll beyond the normal\n boundaries, but has the content spring back once the\n touch/click is released.\n - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar\n to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but\n also reduces opacity during overscroll.\n\nYou can also create your own scroll effect by subclassing one of these,\nthen pass it as the :attr:`~ScrollView.effect_cls` in the same way.\n\nAlternatively, you can set :attr:`ScrollView.effect_x` and/or\n:attr:`ScrollView.effect_y` to an *instance* of the effect you want to\nuse. This will override the default effect set in\n:attr:`ScrollView.effect_cls`.\n\nAll the effects are located in the :mod:`kivy.effects`.\n\n'''\n\n__all__ = ('ScrollView', )\n\nfrom functools import partial\nfrom kivy.animation import Animation\nfrom kivy.compat import string_types\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.factory import Factory\nfrom kivy.uix.stencilview import StencilView\nfrom kivy.metrics import sp\nfrom kivy.effects.dampedscroll import DampedScrollEffect\nfrom kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \\\n ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty\n\n\n# When we are generating documentation, Config doesn't exist\n_scroll_timeout = _scroll_distance = 0\nif Config:\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\n _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))\n\n\nclass ScrollView(StencilView):\n '''ScrollView class. See module documentation for more information.\n\n .. versionchanged:: 1.7.0\n `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has\n been deprecated, use :attr:`effect_cls` instead.\n '''\n\n scroll_distance = NumericProperty(_scroll_distance)\n '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As\n soon as the distance has been traveled, the :class:`ScrollView` will start\n to scroll, and no touch event will go to children.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 20 (pixels), according to the default value in user\n configuration.\n '''\n\n scroll_wheel_distance = NumericProperty(20)\n '''Distance to move when scrolling with a mouse wheel.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_wheel_distance` is a\n :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.\n '''\n\n scroll_timeout = NumericProperty(_scroll_timeout)\n '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.\n If the user has not moved :attr:`scroll_distance` within the timeout,\n the scrolling will be disabled, and the touch event will go to the\n children.\n\n :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 55 (milliseconds) according to the default value in user\n configuration.\n\n .. versionchanged:: 1.5.0\n Default value changed from 250 to 55.\n '''\n\n scroll_x = NumericProperty(0.)\n '''X scrolling value, between 0 and 1. If 0, the content's left side will\n touch the left side of the ScrollView. If 1, the content's right side will\n touch the right side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_x` is True.\n\n :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n scroll_y = NumericProperty(1.)\n '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will\n touch the bottom side of the ScrollView. If 1, the content's top side will\n touch the top side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_y` is True.\n\n :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n do_scroll_x = BooleanProperty(True)\n '''Allow scroll on X axis.\n\n :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scroll_y = BooleanProperty(True)\n '''Allow scroll on Y axis.\n\n :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_scroll(self):\n return (self.do_scroll_x, self.do_scroll_y)\n\n def _set_do_scroll(self, value):\n if type(value) in (list, tuple):\n self.do_scroll_x, self.do_scroll_y = value\n else:\n self.do_scroll_x = self.do_scroll_y = bool(value)\n do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,\n bind=('do_scroll_x', 'do_scroll_y'))\n '''Allow scroll on X or Y axis.\n\n :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_scroll_x` + :attr:`do_scroll_y`)\n '''\n\n def _get_vbar(self):\n # must return (y, height) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vh = self._viewport.height\n h = self.height\n if vh < h or vh == 0:\n return 0, 1.\n ph = max(0.01, h / float(vh))\n sy = min(1.0, max(0.0, self.scroll_y))\n py = (1. - ph) * sy\n return (py, ph)\n\n vbar = AliasProperty(_get_vbar, None, bind=(\n 'scroll_y', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the vertical scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little vertical bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n def _get_hbar(self):\n # must return (x, width) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vw = self._viewport.width\n w = self.width\n if vw < w or vw == 0:\n return 0, 1.\n pw = max(0.01, w / float(vw))\n sx = min(1.0, max(0.0, self.scroll_x))\n px = (1. - pw) * sx\n return (px, pw)\n\n hbar = AliasProperty(_get_hbar, None, bind=(\n 'scroll_x', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the horizontal scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little horizontal bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n bar_color = ListProperty([.7, .7, .7, .9])\n '''Color of horizontal / vertical scroll bar, in RGBA format.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults\n to [.7, .7, .7, .9].\n '''\n\n bar_inactive_color = ListProperty([.7, .7, .7, .2])\n '''Color of horizontal / vertical scroll bar (in RGBA format), when no\n scroll is happening.\n\n .. versionadded:: 1.9.0\n\n :attr:`bar_inactive_color` is a\n :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].\n '''\n\n bar_width = NumericProperty('2dp')\n '''Width of the horizontal / vertical scroll bar. The width is interpreted\n as a height for the horizontal bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 2.\n '''\n\n bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))\n '''Which side of the ScrollView the horizontal scroll bar should go\n on. Possible values are 'top' and 'bottom'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,\n default to 'bottom'\n\n '''\n\n bar_pos_y = OptionProperty('right', options=('left', 'right'))\n '''Which side of the ScrollView the vertical scroll bar should go\n on. Possible values are 'left' and 'right'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,\n default to 'right'\n\n '''\n\n bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)\n '''Which side of the scroll view to place each of the bars on.\n\n :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`bar_pos_x`, :attr:`bar_pos_y`)\n '''\n\n bar_margin = NumericProperty(0)\n '''Margin between the bottom / right side of the scrollview when drawing\n the horizontal / vertical scroll bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default\n to 0\n '''\n\n effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)\n '''Class effect to instanciate for X and Y axis.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to :class:`DampedScrollEffect`.\n\n .. versionchanged:: 1.8.0\n If you set a string, the :class:`~kivy.factory.Factory` will be used to\n resolve the class.\n\n '''\n\n effect_x = ObjectProperty(None, allownone=True)\n '''Effect to apply for the X axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n effect_y = ObjectProperty(None, allownone=True)\n '''Effect to apply for the Y axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None, read-only.\n '''\n\n viewport_size = ListProperty([0, 0])\n '''(internal) Size of the internal viewport. This is the size of your only\n child in the scrollview.\n '''\n\n scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],\n ['bars', 'content'], ['content', 'bars']))\n '''Sets the type of scrolling to use for the content of the scrollview.\n Available options are: ['content'], ['bars'], ['bars', 'content'].\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults\n to ['content'].\n '''\n\n # private, for internal use only\n\n _viewport = ObjectProperty(None, allownone=True)\n _bar_color = ListProperty([0, 0, 0, 0])\n\n def _set_viewport_size(self, instance, value):\n self.viewport_size = value\n\n def on__viewport(self, instance, value):\n if value:\n value.bind(size=self._set_viewport_size)\n self.viewport_size = value.size\n\n def __init__(self, **kwargs):\n self._touch = None\n self._trigger_update_from_scroll = Clock.create_trigger(\n self.update_from_scroll, -1)\n # create a specific canvas for the viewport\n from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas\n self.canvas_viewport = Canvas()\n self.canvas = Canvas()\n with self.canvas_viewport.before:\n PushMatrix()\n self.g_translate = Translate(0, 0)\n with self.canvas_viewport.after:\n PopMatrix()\n\n super(ScrollView, self).__init__(**kwargs)\n\n self.register_event_type('on_scroll_start')\n self.register_event_type('on_scroll_move')\n self.register_event_type('on_scroll_stop')\n\n # now add the viewport canvas to our canvas\n self.canvas.add(self.canvas_viewport)\n\n effect_cls = self.effect_cls\n if isinstance(effect_cls, string_types):\n effect_cls = Factory.get(effect_cls)\n if self.effect_x is None and effect_cls is not None:\n self.effect_x = effect_cls(target_widget=self._viewport)\n if self.effect_y is None and effect_cls is not None:\n self.effect_y = effect_cls(target_widget=self._viewport)\n self.bind(\n width=self._update_effect_x_bounds,\n height=self._update_effect_y_bounds,\n viewport_size=self._update_effect_bounds,\n _viewport=self._update_effect_widget,\n scroll_x=self._trigger_update_from_scroll,\n scroll_y=self._trigger_update_from_scroll,\n pos=self._trigger_update_from_scroll,\n size=self._trigger_update_from_scroll)\n\n self._update_effect_widget()\n self._update_effect_x_bounds()\n self._update_effect_y_bounds()\n\n def on_effect_x(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_x)\n value.target_widget = self._viewport\n\n def on_effect_y(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_y)\n value.target_widget = self._viewport\n\n def on_effect_cls(self, instance, cls):\n if isinstance(cls, string_types):\n cls = Factory.get(cls)\n self.effect_x = cls(target_widget=self._viewport)\n self.effect_x.bind(scroll=self._update_effect_x)\n self.effect_y = cls(target_widget=self._viewport)\n self.effect_y.bind(scroll=self._update_effect_y)\n\n def _update_effect_widget(self, *args):\n if self.effect_x:\n self.effect_x.target_widget = self._viewport\n if self.effect_y:\n self.effect_y.target_widget = self._viewport\n\n def _update_effect_x_bounds(self, *args):\n if not self._viewport or not self.effect_x:\n return\n self.effect_x.min = -(self.viewport_size[0] - self.width)\n self.effect_x.max = 0\n self.effect_x.value = self.effect_x.min * self.scroll_x\n\n def _update_effect_y_bounds(self, *args):\n if not self._viewport or not self.effect_y:\n return\n self.effect_y.min = -(self.viewport_size[1] - self.height)\n self.effect_y.max = 0\n self.effect_y.value = self.effect_y.min * self.scroll_y\n\n def _update_effect_bounds(self, *args):\n if not self._viewport:\n return\n if self.effect_x:\n self._update_effect_x_bounds()\n if self.effect_y:\n self._update_effect_y_bounds()\n\n def _update_effect_x(self, *args):\n vp = self._viewport\n if not vp or not self.effect_x:\n return\n sw = vp.width - self.width\n if sw < 1:\n return\n sx = self.effect_x.scroll / float(sw)\n self.scroll_x = -sx\n self._trigger_update_from_scroll()\n\n def _update_effect_y(self, *args):\n vp = self._viewport\n if not vp or not self.effect_y:\n return\n sh = vp.height - self.height\n if sh < 1:\n return\n sy = self.effect_y.scroll / float(sh)\n self.scroll_y = -sy\n self._trigger_update_from_scroll()\n\n def to_local(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x - tx, y - ty\n\n def to_parent(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x + tx, y + ty\n\n def simulate_touch_down(self, touch):\n # at this point the touch is in parent coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n ret = super(ScrollView, self).on_touch_down(touch)\n touch.pop()\n return ret\n\n def on_touch_down(self, touch):\n if self.dispatch('on_scroll_start', touch):\n self._touch = touch\n touch.grab(self)\n return True\n\n def on_scroll_start(self, touch, check_children=True):\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_start', touch):\n return True\n touch.pop()\n\n if not self.collide_point(*touch.pos):\n touch.ud[self._get_uid('svavoid')] = True\n return\n if self.disabled:\n return True\n if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):\n return self.simulate_touch_down(touch)\n\n # handle mouse scrolling, only if the viewport size is bigger than the\n # scrollview size, and if the user allowed to do it\n vp = self._viewport\n if not vp:\n return True\n scroll_type = self.scroll_type\n ud = touch.ud\n scroll_bar = 'bars' in scroll_type\n\n # check if touch is in bar_x(horizontal) or bay_y(bertical)\n ud['in_bar_x'] = ud['in_bar_y'] = False\n width_scrollable = vp.width > self.width\n height_scrollable = vp.height > self.height\n bar_pos_x = self.bar_pos_x[0]\n bar_pos_y = self.bar_pos_y[0]\n\n d = {'b': True if touch.y < self.y + self.bar_width else False,\n 't': True if touch.y > self.top - self.bar_width else False,\n 'l': True if touch.x < self.x + self.bar_width else False,\n 'r': True if touch.x > self.right - self.bar_width else False}\n if scroll_bar:\n if (width_scrollable and d[bar_pos_x]):\n ud['in_bar_x'] = True\n if (height_scrollable and d[bar_pos_y]):\n ud['in_bar_y'] = True\n\n if vp and 'button' in touch.profile and \\\n touch.button.startswith('scroll'):\n btn = touch.button\n m = sp(self.scroll_wheel_distance)\n e = None\n\n if ((btn == 'scrolldown' and self.scroll_y >= 1) or\n (btn == 'scrollup' and self.scroll_y <= 0) or\n (btn == 'scrollleft' and self.scroll_x <= 0) or\n (btn == 'scrollright' and self.scroll_x >= 1)):\n return False\n\n if (self.effect_x and self.do_scroll_y and height_scrollable\n and btn in ('scrolldown', 'scrollup')):\n e = self.effect_x if ud['in_bar_x'] else self.effect_y\n\n elif (self.effect_y and self.do_scroll_x and width_scrollable\n and btn in ('scrollleft', 'scrollright')):\n e = self.effect_y if ud['in_bar_y'] else self.effect_x\n\n if e:\n if btn in ('scrolldown', 'scrollleft'):\n e.value = max(e.value - m, e.min)\n e.velocity = 0\n elif btn in ('scrollup', 'scrollright'):\n e.value = min(e.value + m, e.max)\n e.velocity = 0\n touch.ud[self._get_uid('svavoid')] = True\n e.trigger_velocity_update()\n return True\n\n # no mouse scrolling, so the user is going to drag the scrollview with\n # this touch.\n self._touch = touch\n uid = self._get_uid()\n\n ud[uid] = {\n 'mode': 'unknown',\n 'dx': 0,\n 'dy': 0,\n 'user_stopped': False,\n 'frames': Clock.frames,\n 'time': touch.time_start}\n\n if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:\n self.effect_x.start(touch.x)\n self._scroll_x_mouse = self.scroll_x\n if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:\n self.effect_y.start(touch.y)\n self._scroll_y_mouse = self.scroll_y\n\n if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):\n return\n if scroll_type == ['bars']:\n # touch is in parent, but _change_touch_mode expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return False\n else:\n Clock.schedule_once(self._change_touch_mode,\n self.scroll_timeout / 1000.)\n return True\n\n def on_touch_move(self, touch):\n if self._touch is not touch:\n # touch is in parent\n touch.push()\n touch.apply_transform_2d(self.to_local)\n super(ScrollView, self).on_touch_move(touch)\n touch.pop()\n return self._get_uid() in touch.ud\n if touch.grab_current is not self:\n return True\n\n touch.ud['sv.handled'] = {'x': False, 'y': False}\n if self.dispatch('on_scroll_move', touch):\n return True\n\n def on_scroll_move(self, touch):\n if self._get_uid('svavoid') in touch.ud:\n return False\n\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_move', touch):\n return True\n touch.pop()\n\n rv = True\n\n uid = self._get_uid()\n if not uid in touch.ud:\n self._touch = False\n return self.on_scroll_start(touch, False)\n ud = touch.ud[uid]\n mode = ud['mode']\n\n # check if the minimum distance has been travelled\n if mode == 'unknown' or mode == 'scroll':\n if not touch.ud['sv.handled']['x'] and self.do_scroll_x \\\n and self.effect_x:\n width = self.width\n if touch.ud.get('in_bar_x', False):\n dx = touch.dx / float(width - width * self.hbar[1])\n self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_x.update(touch.x)\n if self.scroll_x < 0 or self.scroll_x > 1:\n rv = False\n else:\n touch.ud['sv.handled']['x'] = True\n if not touch.ud['sv.handled']['y'] and self.do_scroll_y \\\n and self.effect_y:\n height = self.height\n if touch.ud.get('in_bar_y', False):\n dy = touch.dy / float(height - height * self.vbar[1])\n self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_y.update(touch.y)\n if self.scroll_y < 0 or self.scroll_y > 1:\n rv = False\n else:\n touch.ud['sv.handled']['y'] = True\n\n if mode == 'unknown':\n ud['dx'] += abs(touch.dx)\n ud['dy'] += abs(touch.dy)\n if ud['dx'] > self.scroll_distance:\n if not self.do_scroll_x:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n\n if ud['dy'] > self.scroll_distance:\n if not self.do_scroll_y:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n ud['mode'] = mode\n\n if mode == 'scroll':\n ud['dt'] = touch.time_update - ud['time']\n ud['time'] = touch.time_update\n ud['user_stopped'] = True\n\n return rv\n\n def on_touch_up(self, touch):\n if self._touch is not touch and self.uid not in touch.ud:\n # touch is in parents\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(ScrollView, self).on_touch_up(touch):\n return True\n touch.pop()\n return False\n\n if self.dispatch('on_scroll_stop', touch):\n touch.ungrab(self)\n return True\n\n def on_scroll_stop(self, touch, check_children=True):\n self._touch = None\n\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_stop', touch):\n return True\n touch.pop()\n\n if self._get_uid('svavoid') in touch.ud:\n return\n if self._get_uid() not in touch.ud:\n return False\n\n self._touch = None\n uid = self._get_uid()\n ud = touch.ud[uid]\n if self.do_scroll_x and self.effect_x:\n if not touch.ud.get('in_bar_x', False) and\\\n self.scroll_type != ['bars']:\n self.effect_x.stop(touch.x)\n if self.do_scroll_y and self.effect_y and\\\n self.scroll_type != ['bars']:\n if not touch.ud.get('in_bar_y', False):\n self.effect_y.stop(touch.y)\n if ud['mode'] == 'unknown':\n # we must do the click at least..\n # only send the click if it was not a click to stop\n # autoscrolling\n if not ud['user_stopped']:\n self.simulate_touch_down(touch)\n Clock.schedule_once(partial(self._do_touch_up, touch), .2)\n Clock.unschedule(self._update_effect_bounds)\n Clock.schedule_once(self._update_effect_bounds)\n\n # if we do mouse scrolling, always accept it\n if 'button' in touch.profile and touch.button.startswith('scroll'):\n return True\n\n return self._get_uid() in touch.ud\n\n def convert_distance_to_scroll(self, dx, dy):\n '''Convert a distance in pixels to a scroll distance, depending on the\n content size and the scrollview size.\n\n The result will be a tuple of scroll distance that can be added to\n :data:`scroll_x` and :data:`scroll_y`\n '''\n if not self._viewport:\n return 0, 0\n vp = self._viewport\n if vp.width > self.width:\n sw = vp.width - self.width\n sx = dx / float(sw)\n else:\n sx = 0\n if vp.height > self.height:\n sh = vp.height - self.height\n sy = dy / float(sh)\n else:\n sy = 1\n return sx, sy\n\n def update_from_scroll(self, *largs):\n '''Force the reposition of the content, according to current value of\n :attr:`scroll_x` and :attr:`scroll_y`.\n\n This method is automatically called when one of the :attr:`scroll_x`,\n :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or\n if the size of the content changes.\n '''\n if not self._viewport:\n return\n vp = self._viewport\n\n # update from size_hint\n if vp.size_hint_x is not None:\n vp.width = vp.size_hint_x * self.width\n if vp.size_hint_y is not None:\n vp.height = vp.size_hint_y * self.height\n\n if vp.width > self.width:\n sw = vp.width - self.width\n x = self.x - self.scroll_x * sw\n else:\n x = self.x\n if vp.height > self.height:\n sh = vp.height - self.height\n y = self.y - self.scroll_y * sh\n else:\n y = self.top - vp.height\n\n # from 1.8.0, we now use a matrix by default, instead of moving the\n # widget position behind. We set it here, but it will be a no-op most of\n # the time.\n vp.pos = 0, 0\n self.g_translate.xy = x, y\n\n # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)\n # fade to bar_inactive_color when no scroll is happening.\n Clock.unschedule(self._bind_inactive_bar_color)\n self.unbind(bar_inactive_color=self._change_bar_color)\n Animation.stop_all(self, '_bar_color')\n self.bind(bar_color=self._change_bar_color)\n self._bar_color = self.bar_color\n Clock.schedule_once(self._bind_inactive_bar_color, .5)\n\n def _bind_inactive_bar_color(self, *l):\n self.unbind(bar_color=self._change_bar_color)\n self.bind(bar_inactive_color=self._change_bar_color)\n Animation(\n _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)\n\n def _change_bar_color(self, inst, value):\n self._bar_color = value\n\n #\n # Private\n #\n def add_widget(self, widget, index=0):\n if self._viewport:\n raise Exception('ScrollView accept only one widget')\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).add_widget(widget, index)\n self.canvas = canvas\n self._viewport = widget\n widget.bind(size=self._trigger_update_from_scroll)\n self._trigger_update_from_scroll()\n\n def remove_widget(self, widget):\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).remove_widget(widget)\n self.canvas = canvas\n if widget is self._viewport:\n self._viewport = None\n\n def _get_uid(self, prefix='sv'):\n return '{0}.{1}'.format(prefix, self.uid)\n\n def _change_touch_mode(self, *largs):\n if not self._touch:\n return\n uid = self._get_uid()\n touch = self._touch\n ud = touch.ud[uid]\n if ud['mode'] != 'unknown' or ud['user_stopped']:\n return\n diff_frames = Clock.frames - ud['frames']\n\n # in order to be able to scroll on very slow devices, let at least 3\n # frames displayed to accumulate some velocity. And then, change the\n # touch mode. Otherwise, we might never be able to compute velocity, and\n # no way to scroll it. See #1464 and #1499\n if diff_frames < 3:\n Clock.schedule_once(self._change_touch_mode, 0)\n return\n\n if self.do_scroll_x and self.effect_x:\n self.effect_x.cancel()\n if self.do_scroll_y and self.effect_y:\n self.effect_y.cancel()\n # XXX the next line was in the condition. But this stop\n # the possibily to \"drag\" an object out of the scrollview in the\n # non-used direction: if you have an horizontal scrollview, a\n # vertical gesture will not \"stop\" the scroll view to look for an\n # horizontal gesture, until the timeout is done.\n # and touch.dx + touch.dy == 0:\n touch.ungrab(self)\n self._touch = None\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n touch.apply_transform_2d(self.to_parent)\n self.simulate_touch_down(touch)\n touch.pop()\n return\n\n def _do_touch_up(self, touch, *largs):\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n # don't forget about grab event!\n for x in touch.grab_list[:]:\n touch.grab_list.remove(x)\n x = x()\n if not x:\n continue\n touch.grab_current = x\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n touch.grab_current = None\n\n\nif __name__ == '__main__':\n from kivy.app import App\n\n from kivy.uix.gridlayout import GridLayout\n from kivy.uix.button import Button\n\n class ScrollViewApp(App):\n\n def build(self):\n layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout1.bind(minimum_height=layout1.setter('height'),\n minimum_width=layout1.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout1.add_widget(btn)\n scrollview1 = ScrollView(bar_width='2dp')\n scrollview1.add_widget(layout1)\n\n layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout2.bind(minimum_height=layout2.setter('height'),\n minimum_width=layout2.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout2.add_widget(btn)\n scrollview2 = ScrollView(scroll_type=['bars'],\n bar_width='9dp',\n scroll_wheel_distance=100)\n scrollview2.add_widget(layout2)\n\n root = GridLayout(cols=2)\n root.add_widget(scrollview1)\n root.add_widget(scrollview2)\n return root\n\n ScrollViewApp().run()\n", "path": "kivy/uix/scrollview.py"}], "after_files": [{"content": "'''Scroll View\n===========\n\n.. versionadded:: 1.0.4\n\nThe :class:`ScrollView` widget provides a scrollable/pannable viewport that is\nclipped at the scrollview's bounding box.\n\n\nScrolling Behavior\n------------------\n\nThe ScrollView accepts only one child and applies a viewport/window to\nit according to the :attr:`ScrollView.scroll_x` and\n:attr:`ScrollView.scroll_y` properties. Touches are analyzed to\ndetermine if the user wants to scroll or control the child in some\nother manner - you cannot do both at the same time. To determine if\ninteraction is a scrolling gesture, these properties are used:\n\n - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,\n defaults to 20 pixels.\n - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults\n to 250 milliseconds.\n\nIf a touch travels :attr:`~ScrollView.scroll_distance` pixels within the\n:attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling\ngesture and translation (scroll/pan) will begin. If the timeout occurs, the\ntouch down event is dispatched to the child instead (no translation).\n\nThe default value for those settings can be changed in the configuration file::\n\n [widgets]\n scroll_timeout = 250\n scroll_distance = 20\n\n.. versionadded:: 1.1.1\n\n ScrollView now animates scrolling in Y when a mousewheel is used.\n\n\nLimiting to the X or Y Axis\n---------------------------\n\nBy default, the ScrollView allows scrolling in both the X and Y axes. You can\nexplicitly disable scrolling on an axis by setting\n:attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.\n\n\nManaging the Content Size and Position\n--------------------------------------\n\nScrollView manages the position of its children similarly to a\nRelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must\ncarefully specify the `size_hint` of your content to get the desired\nscroll/pan effect.\n\nBy default, size_hint is (1, 1), so the content size will fit your ScrollView\nexactly (you will have nothing to scroll). You must deactivate at least one of\nthe size_hint instructions (x or y) of the child to enable scrolling.\n\nTo scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width\nidentical to that of the ScrollView (size_hint_x=1, default), and set the\nsize_hint_y property to None::\n\n layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n # Make sure the height is such that there is something to scroll.\n layout.bind(minimum_height=layout.setter('height'))\n for i in range(30):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n layout.add_widget(btn)\n root = ScrollView(size_hint=(None, None), size=(400, 400))\n root.add_widget(layout)\n\n\nOverscroll Effects\n------------------\n\n.. versionadded:: 1.7.0\n\nWhen scrolling would exceed the bounds of the :class:`ScrollView`, it\nuses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the\noverscroll. These effects can perform actions like bouncing back,\nchanging opacity, or simply preventing scrolling beyond the normal\nboundaries. Note that complex effects may perform many computations,\nwhich can be slow on weaker hardware.\n\nYou can change what effect is being used by setting\n:attr:`ScrollView.effect_cls` to any effect class. Current options\ninclude:\n\n - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow\n scrolling beyond the :class:`ScrollView` boundaries.\n - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The\n current default. Allows the user to scroll beyond the normal\n boundaries, but has the content spring back once the\n touch/click is released.\n - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar\n to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but\n also reduces opacity during overscroll.\n\nYou can also create your own scroll effect by subclassing one of these,\nthen pass it as the :attr:`~ScrollView.effect_cls` in the same way.\n\nAlternatively, you can set :attr:`ScrollView.effect_x` and/or\n:attr:`ScrollView.effect_y` to an *instance* of the effect you want to\nuse. This will override the default effect set in\n:attr:`ScrollView.effect_cls`.\n\nAll the effects are located in the :mod:`kivy.effects`.\n\n'''\n\n__all__ = ('ScrollView', )\n\nfrom functools import partial\nfrom kivy.animation import Animation\nfrom kivy.compat import string_types\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.factory import Factory\nfrom kivy.uix.stencilview import StencilView\nfrom kivy.metrics import sp\nfrom kivy.effects.dampedscroll import DampedScrollEffect\nfrom kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \\\n ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty\n\n\n# When we are generating documentation, Config doesn't exist\n_scroll_timeout = _scroll_distance = 0\nif Config:\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\n _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))\n\n\nclass ScrollView(StencilView):\n '''ScrollView class. See module documentation for more information.\n\n .. versionchanged:: 1.7.0\n `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has\n been deprecated, use :attr:`effect_cls` instead.\n '''\n\n scroll_distance = NumericProperty(_scroll_distance)\n '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As\n soon as the distance has been traveled, the :class:`ScrollView` will start\n to scroll, and no touch event will go to children.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 20 (pixels), according to the default value in user\n configuration.\n '''\n\n scroll_wheel_distance = NumericProperty(20)\n '''Distance to move when scrolling with a mouse wheel.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_wheel_distance` is a\n :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.\n '''\n\n scroll_timeout = NumericProperty(_scroll_timeout)\n '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.\n If the user has not moved :attr:`scroll_distance` within the timeout,\n the scrolling will be disabled, and the touch event will go to the\n children.\n\n :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 55 (milliseconds) according to the default value in user\n configuration.\n\n .. versionchanged:: 1.5.0\n Default value changed from 250 to 55.\n '''\n\n scroll_x = NumericProperty(0.)\n '''X scrolling value, between 0 and 1. If 0, the content's left side will\n touch the left side of the ScrollView. If 1, the content's right side will\n touch the right side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_x` is True.\n\n :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n scroll_y = NumericProperty(1.)\n '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will\n touch the bottom side of the ScrollView. If 1, the content's top side will\n touch the top side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_y` is True.\n\n :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n do_scroll_x = BooleanProperty(True)\n '''Allow scroll on X axis.\n\n :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scroll_y = BooleanProperty(True)\n '''Allow scroll on Y axis.\n\n :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_scroll(self):\n return (self.do_scroll_x, self.do_scroll_y)\n\n def _set_do_scroll(self, value):\n if type(value) in (list, tuple):\n self.do_scroll_x, self.do_scroll_y = value\n else:\n self.do_scroll_x = self.do_scroll_y = bool(value)\n do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,\n bind=('do_scroll_x', 'do_scroll_y'))\n '''Allow scroll on X or Y axis.\n\n :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_scroll_x` + :attr:`do_scroll_y`)\n '''\n\n def _get_vbar(self):\n # must return (y, height) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vh = self._viewport.height\n h = self.height\n if vh < h or vh == 0:\n return 0, 1.\n ph = max(0.01, h / float(vh))\n sy = min(1.0, max(0.0, self.scroll_y))\n py = (1. - ph) * sy\n return (py, ph)\n\n vbar = AliasProperty(_get_vbar, None, bind=(\n 'scroll_y', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the vertical scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little vertical bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n def _get_hbar(self):\n # must return (x, width) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vw = self._viewport.width\n w = self.width\n if vw < w or vw == 0:\n return 0, 1.\n pw = max(0.01, w / float(vw))\n sx = min(1.0, max(0.0, self.scroll_x))\n px = (1. - pw) * sx\n return (px, pw)\n\n hbar = AliasProperty(_get_hbar, None, bind=(\n 'scroll_x', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the horizontal scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little horizontal bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n bar_color = ListProperty([.7, .7, .7, .9])\n '''Color of horizontal / vertical scroll bar, in RGBA format.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults\n to [.7, .7, .7, .9].\n '''\n\n bar_inactive_color = ListProperty([.7, .7, .7, .2])\n '''Color of horizontal / vertical scroll bar (in RGBA format), when no\n scroll is happening.\n\n .. versionadded:: 1.9.0\n\n :attr:`bar_inactive_color` is a\n :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].\n '''\n\n bar_width = NumericProperty('2dp')\n '''Width of the horizontal / vertical scroll bar. The width is interpreted\n as a height for the horizontal bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 2.\n '''\n\n bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))\n '''Which side of the ScrollView the horizontal scroll bar should go\n on. Possible values are 'top' and 'bottom'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,\n default to 'bottom'\n\n '''\n\n bar_pos_y = OptionProperty('right', options=('left', 'right'))\n '''Which side of the ScrollView the vertical scroll bar should go\n on. Possible values are 'left' and 'right'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,\n default to 'right'\n\n '''\n\n bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)\n '''Which side of the scroll view to place each of the bars on.\n\n :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`bar_pos_x`, :attr:`bar_pos_y`)\n '''\n\n bar_margin = NumericProperty(0)\n '''Margin between the bottom / right side of the scrollview when drawing\n the horizontal / vertical scroll bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default\n to 0\n '''\n\n effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)\n '''Class effect to instanciate for X and Y axis.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to :class:`DampedScrollEffect`.\n\n .. versionchanged:: 1.8.0\n If you set a string, the :class:`~kivy.factory.Factory` will be used to\n resolve the class.\n\n '''\n\n effect_x = ObjectProperty(None, allownone=True)\n '''Effect to apply for the X axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n effect_y = ObjectProperty(None, allownone=True)\n '''Effect to apply for the Y axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None, read-only.\n '''\n\n viewport_size = ListProperty([0, 0])\n '''(internal) Size of the internal viewport. This is the size of your only\n child in the scrollview.\n '''\n\n scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],\n ['bars', 'content'], ['content', 'bars']))\n '''Sets the type of scrolling to use for the content of the scrollview.\n Available options are: ['content'], ['bars'], ['bars', 'content'].\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults\n to ['content'].\n '''\n\n # private, for internal use only\n\n _viewport = ObjectProperty(None, allownone=True)\n _bar_color = ListProperty([0, 0, 0, 0])\n\n def _set_viewport_size(self, instance, value):\n self.viewport_size = value\n\n def on__viewport(self, instance, value):\n if value:\n value.bind(size=self._set_viewport_size)\n self.viewport_size = value.size\n\n def __init__(self, **kwargs):\n self._touch = None\n self._trigger_update_from_scroll = Clock.create_trigger(\n self.update_from_scroll, -1)\n # create a specific canvas for the viewport\n from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas\n self.canvas_viewport = Canvas()\n self.canvas = Canvas()\n with self.canvas_viewport.before:\n PushMatrix()\n self.g_translate = Translate(0, 0)\n with self.canvas_viewport.after:\n PopMatrix()\n\n super(ScrollView, self).__init__(**kwargs)\n\n self.register_event_type('on_scroll_start')\n self.register_event_type('on_scroll_move')\n self.register_event_type('on_scroll_stop')\n\n # now add the viewport canvas to our canvas\n self.canvas.add(self.canvas_viewport)\n\n effect_cls = self.effect_cls\n if isinstance(effect_cls, string_types):\n effect_cls = Factory.get(effect_cls)\n if self.effect_x is None and effect_cls is not None:\n self.effect_x = effect_cls(target_widget=self._viewport)\n if self.effect_y is None and effect_cls is not None:\n self.effect_y = effect_cls(target_widget=self._viewport)\n self.bind(\n width=self._update_effect_x_bounds,\n height=self._update_effect_y_bounds,\n viewport_size=self._update_effect_bounds,\n _viewport=self._update_effect_widget,\n scroll_x=self._trigger_update_from_scroll,\n scroll_y=self._trigger_update_from_scroll,\n pos=self._trigger_update_from_scroll,\n size=self._trigger_update_from_scroll)\n\n self._update_effect_widget()\n self._update_effect_x_bounds()\n self._update_effect_y_bounds()\n\n def on_effect_x(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_x)\n value.target_widget = self._viewport\n\n def on_effect_y(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_y)\n value.target_widget = self._viewport\n\n def on_effect_cls(self, instance, cls):\n if isinstance(cls, string_types):\n cls = Factory.get(cls)\n self.effect_x = cls(target_widget=self._viewport)\n self.effect_x.bind(scroll=self._update_effect_x)\n self.effect_y = cls(target_widget=self._viewport)\n self.effect_y.bind(scroll=self._update_effect_y)\n\n def _update_effect_widget(self, *args):\n if self.effect_x:\n self.effect_x.target_widget = self._viewport\n if self.effect_y:\n self.effect_y.target_widget = self._viewport\n\n def _update_effect_x_bounds(self, *args):\n if not self._viewport or not self.effect_x:\n return\n self.effect_x.min = -(self.viewport_size[0] - self.width)\n self.effect_x.max = 0\n self.effect_x.value = self.effect_x.min * self.scroll_x\n\n def _update_effect_y_bounds(self, *args):\n if not self._viewport or not self.effect_y:\n return\n self.effect_y.min = -(self.viewport_size[1] - self.height)\n self.effect_y.max = 0\n self.effect_y.value = self.effect_y.min * self.scroll_y\n\n def _update_effect_bounds(self, *args):\n if not self._viewport:\n return\n if self.effect_x:\n self._update_effect_x_bounds()\n if self.effect_y:\n self._update_effect_y_bounds()\n\n def _update_effect_x(self, *args):\n vp = self._viewport\n if not vp or not self.effect_x:\n return\n sw = vp.width - self.width\n if sw < 1:\n return\n sx = self.effect_x.scroll / float(sw)\n self.scroll_x = -sx\n self._trigger_update_from_scroll()\n\n def _update_effect_y(self, *args):\n vp = self._viewport\n if not vp or not self.effect_y:\n return\n sh = vp.height - self.height\n if sh < 1:\n return\n sy = self.effect_y.scroll / float(sh)\n self.scroll_y = -sy\n self._trigger_update_from_scroll()\n\n def to_local(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x - tx, y - ty\n\n def to_parent(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x + tx, y + ty\n\n def simulate_touch_down(self, touch):\n # at this point the touch is in parent coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n ret = super(ScrollView, self).on_touch_down(touch)\n touch.pop()\n return ret\n\n def on_touch_down(self, touch):\n if self.dispatch('on_scroll_start', touch):\n self._touch = touch\n touch.grab(self)\n return True\n\n def on_scroll_start(self, touch, check_children=True):\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_start', touch):\n return True\n touch.pop()\n\n if not self.collide_point(*touch.pos):\n touch.ud[self._get_uid('svavoid')] = True\n return\n if self.disabled:\n return True\n if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):\n return self.simulate_touch_down(touch)\n\n # handle mouse scrolling, only if the viewport size is bigger than the\n # scrollview size, and if the user allowed to do it\n vp = self._viewport\n if not vp:\n return True\n scroll_type = self.scroll_type\n ud = touch.ud\n scroll_bar = 'bars' in scroll_type\n\n # check if touch is in bar_x(horizontal) or bay_y(bertical)\n ud['in_bar_x'] = ud['in_bar_y'] = False\n width_scrollable = vp.width > self.width\n height_scrollable = vp.height > self.height\n bar_pos_x = self.bar_pos_x[0]\n bar_pos_y = self.bar_pos_y[0]\n\n d = {'b': True if touch.y < self.y + self.bar_width else False,\n 't': True if touch.y > self.top - self.bar_width else False,\n 'l': True if touch.x < self.x + self.bar_width else False,\n 'r': True if touch.x > self.right - self.bar_width else False}\n if scroll_bar:\n if (width_scrollable and d[bar_pos_x]):\n ud['in_bar_x'] = True\n if (height_scrollable and d[bar_pos_y]):\n ud['in_bar_y'] = True\n\n if vp and 'button' in touch.profile and \\\n touch.button.startswith('scroll'):\n btn = touch.button\n m = sp(self.scroll_wheel_distance)\n e = None\n\n if ((btn == 'scrolldown' and self.scroll_y >= 1) or\n (btn == 'scrollup' and self.scroll_y <= 0) or\n (btn == 'scrollleft' and self.scroll_x <= 0) or\n (btn == 'scrollright' and self.scroll_x >= 1)):\n return False\n\n if (self.effect_x and self.do_scroll_y and height_scrollable\n and btn in ('scrolldown', 'scrollup')):\n e = self.effect_x if ud['in_bar_x'] else self.effect_y\n\n elif (self.effect_y and self.do_scroll_x and width_scrollable\n and btn in ('scrollleft', 'scrollright')):\n e = self.effect_y if ud['in_bar_y'] else self.effect_x\n\n if e:\n if btn in ('scrolldown', 'scrollleft'):\n e.value = max(e.value - m, e.min)\n e.velocity = 0\n elif btn in ('scrollup', 'scrollright'):\n e.value = min(e.value + m, e.max)\n e.velocity = 0\n touch.ud[self._get_uid('svavoid')] = True\n e.trigger_velocity_update()\n return True\n\n # no mouse scrolling, so the user is going to drag the scrollview with\n # this touch.\n self._touch = touch\n uid = self._get_uid()\n\n ud[uid] = {\n 'mode': 'unknown',\n 'dx': 0,\n 'dy': 0,\n 'user_stopped': False,\n 'frames': Clock.frames,\n 'time': touch.time_start}\n\n if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:\n self.effect_x.start(touch.x)\n self._scroll_x_mouse = self.scroll_x\n if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:\n self.effect_y.start(touch.y)\n self._scroll_y_mouse = self.scroll_y\n\n if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):\n return True\n if scroll_type == ['bars']:\n # touch is in parent, but _change_touch_mode expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return False\n else:\n Clock.schedule_once(self._change_touch_mode,\n self.scroll_timeout / 1000.)\n return True\n\n def on_touch_move(self, touch):\n if self._touch is not touch:\n # touch is in parent\n touch.push()\n touch.apply_transform_2d(self.to_local)\n super(ScrollView, self).on_touch_move(touch)\n touch.pop()\n return self._get_uid() in touch.ud\n if touch.grab_current is not self:\n return True\n\n touch.ud['sv.handled'] = {'x': False, 'y': False}\n if self.dispatch('on_scroll_move', touch):\n return True\n\n def on_scroll_move(self, touch):\n if self._get_uid('svavoid') in touch.ud:\n return False\n\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_move', touch):\n return True\n touch.pop()\n\n rv = True\n\n uid = self._get_uid()\n if not uid in touch.ud:\n self._touch = False\n return self.on_scroll_start(touch, False)\n ud = touch.ud[uid]\n mode = ud['mode']\n\n # check if the minimum distance has been travelled\n if mode == 'unknown' or mode == 'scroll':\n if not touch.ud['sv.handled']['x'] and self.do_scroll_x \\\n and self.effect_x:\n width = self.width\n if touch.ud.get('in_bar_x', False):\n dx = touch.dx / float(width - width * self.hbar[1])\n self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_x.update(touch.x)\n if self.scroll_x < 0 or self.scroll_x > 1:\n rv = False\n else:\n touch.ud['sv.handled']['x'] = True\n if not touch.ud['sv.handled']['y'] and self.do_scroll_y \\\n and self.effect_y:\n height = self.height\n if touch.ud.get('in_bar_y', False):\n dy = touch.dy / float(height - height * self.vbar[1])\n self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_y.update(touch.y)\n if self.scroll_y < 0 or self.scroll_y > 1:\n rv = False\n else:\n touch.ud['sv.handled']['y'] = True\n\n if mode == 'unknown':\n ud['dx'] += abs(touch.dx)\n ud['dy'] += abs(touch.dy)\n if ud['dx'] > self.scroll_distance:\n if not self.do_scroll_x:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n\n if ud['dy'] > self.scroll_distance:\n if not self.do_scroll_y:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n ud['mode'] = mode\n\n if mode == 'scroll':\n ud['dt'] = touch.time_update - ud['time']\n ud['time'] = touch.time_update\n ud['user_stopped'] = True\n\n return rv\n\n def on_touch_up(self, touch):\n if self._touch is not touch and self.uid not in touch.ud:\n # touch is in parents\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(ScrollView, self).on_touch_up(touch):\n return True\n touch.pop()\n return False\n\n if self.dispatch('on_scroll_stop', touch):\n touch.ungrab(self)\n return True\n\n def on_scroll_stop(self, touch, check_children=True):\n self._touch = None\n\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_stop', touch):\n return True\n touch.pop()\n\n if self._get_uid('svavoid') in touch.ud:\n return\n if self._get_uid() not in touch.ud:\n return False\n\n self._touch = None\n uid = self._get_uid()\n ud = touch.ud[uid]\n if self.do_scroll_x and self.effect_x:\n if not touch.ud.get('in_bar_x', False) and\\\n self.scroll_type != ['bars']:\n self.effect_x.stop(touch.x)\n if self.do_scroll_y and self.effect_y and\\\n self.scroll_type != ['bars']:\n if not touch.ud.get('in_bar_y', False):\n self.effect_y.stop(touch.y)\n if ud['mode'] == 'unknown':\n # we must do the click at least..\n # only send the click if it was not a click to stop\n # autoscrolling\n if not ud['user_stopped']:\n self.simulate_touch_down(touch)\n Clock.schedule_once(partial(self._do_touch_up, touch), .2)\n Clock.unschedule(self._update_effect_bounds)\n Clock.schedule_once(self._update_effect_bounds)\n\n # if we do mouse scrolling, always accept it\n if 'button' in touch.profile and touch.button.startswith('scroll'):\n return True\n\n return self._get_uid() in touch.ud\n\n def convert_distance_to_scroll(self, dx, dy):\n '''Convert a distance in pixels to a scroll distance, depending on the\n content size and the scrollview size.\n\n The result will be a tuple of scroll distance that can be added to\n :data:`scroll_x` and :data:`scroll_y`\n '''\n if not self._viewport:\n return 0, 0\n vp = self._viewport\n if vp.width > self.width:\n sw = vp.width - self.width\n sx = dx / float(sw)\n else:\n sx = 0\n if vp.height > self.height:\n sh = vp.height - self.height\n sy = dy / float(sh)\n else:\n sy = 1\n return sx, sy\n\n def update_from_scroll(self, *largs):\n '''Force the reposition of the content, according to current value of\n :attr:`scroll_x` and :attr:`scroll_y`.\n\n This method is automatically called when one of the :attr:`scroll_x`,\n :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or\n if the size of the content changes.\n '''\n if not self._viewport:\n return\n vp = self._viewport\n\n # update from size_hint\n if vp.size_hint_x is not None:\n vp.width = vp.size_hint_x * self.width\n if vp.size_hint_y is not None:\n vp.height = vp.size_hint_y * self.height\n\n if vp.width > self.width:\n sw = vp.width - self.width\n x = self.x - self.scroll_x * sw\n else:\n x = self.x\n if vp.height > self.height:\n sh = vp.height - self.height\n y = self.y - self.scroll_y * sh\n else:\n y = self.top - vp.height\n\n # from 1.8.0, we now use a matrix by default, instead of moving the\n # widget position behind. We set it here, but it will be a no-op most of\n # the time.\n vp.pos = 0, 0\n self.g_translate.xy = x, y\n\n # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)\n # fade to bar_inactive_color when no scroll is happening.\n Clock.unschedule(self._bind_inactive_bar_color)\n self.unbind(bar_inactive_color=self._change_bar_color)\n Animation.stop_all(self, '_bar_color')\n self.bind(bar_color=self._change_bar_color)\n self._bar_color = self.bar_color\n Clock.schedule_once(self._bind_inactive_bar_color, .5)\n\n def _bind_inactive_bar_color(self, *l):\n self.unbind(bar_color=self._change_bar_color)\n self.bind(bar_inactive_color=self._change_bar_color)\n Animation(\n _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)\n\n def _change_bar_color(self, inst, value):\n self._bar_color = value\n\n #\n # Private\n #\n def add_widget(self, widget, index=0):\n if self._viewport:\n raise Exception('ScrollView accept only one widget')\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).add_widget(widget, index)\n self.canvas = canvas\n self._viewport = widget\n widget.bind(size=self._trigger_update_from_scroll)\n self._trigger_update_from_scroll()\n\n def remove_widget(self, widget):\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).remove_widget(widget)\n self.canvas = canvas\n if widget is self._viewport:\n self._viewport = None\n\n def _get_uid(self, prefix='sv'):\n return '{0}.{1}'.format(prefix, self.uid)\n\n def _change_touch_mode(self, *largs):\n if not self._touch:\n return\n uid = self._get_uid()\n touch = self._touch\n ud = touch.ud[uid]\n if ud['mode'] != 'unknown' or ud['user_stopped']:\n return\n diff_frames = Clock.frames - ud['frames']\n\n # in order to be able to scroll on very slow devices, let at least 3\n # frames displayed to accumulate some velocity. And then, change the\n # touch mode. Otherwise, we might never be able to compute velocity, and\n # no way to scroll it. See #1464 and #1499\n if diff_frames < 3:\n Clock.schedule_once(self._change_touch_mode, 0)\n return\n\n if self.do_scroll_x and self.effect_x:\n self.effect_x.cancel()\n if self.do_scroll_y and self.effect_y:\n self.effect_y.cancel()\n # XXX the next line was in the condition. But this stop\n # the possibily to \"drag\" an object out of the scrollview in the\n # non-used direction: if you have an horizontal scrollview, a\n # vertical gesture will not \"stop\" the scroll view to look for an\n # horizontal gesture, until the timeout is done.\n # and touch.dx + touch.dy == 0:\n touch.ungrab(self)\n self._touch = None\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n touch.apply_transform_2d(self.to_parent)\n self.simulate_touch_down(touch)\n touch.pop()\n return\n\n def _do_touch_up(self, touch, *largs):\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n # don't forget about grab event!\n for x in touch.grab_list[:]:\n touch.grab_list.remove(x)\n x = x()\n if not x:\n continue\n touch.grab_current = x\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n touch.grab_current = None\n\n\nif __name__ == '__main__':\n from kivy.app import App\n\n from kivy.uix.gridlayout import GridLayout\n from kivy.uix.button import Button\n\n class ScrollViewApp(App):\n\n def build(self):\n layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout1.bind(minimum_height=layout1.setter('height'),\n minimum_width=layout1.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout1.add_widget(btn)\n scrollview1 = ScrollView(bar_width='2dp')\n scrollview1.add_widget(layout1)\n\n layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout2.bind(minimum_height=layout2.setter('height'),\n minimum_width=layout2.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout2.add_widget(btn)\n scrollview2 = ScrollView(scroll_type=['bars'],\n bar_width='9dp',\n scroll_wheel_distance=100)\n scrollview2.add_widget(layout2)\n\n root = GridLayout(cols=2)\n root.add_widget(scrollview1)\n root.add_widget(scrollview2)\n return root\n\n ScrollViewApp().run()\n", "path": "kivy/uix/scrollview.py"}]} |
gh_patches_debug_1491 | rasdani/github-patches | git_diff | encode__httpx-721 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keepalive connections aren't released when closing the ConnectionPool
Hello. I am having an issue where it looks like connections aren't being closed correctly, and after i reach a number of requests equivalent to "hard_limit" of pool_limits, i get a PoolTimeout exception.
I tried upgrading to httpx==0.10.1, with no success.
Minimal example:
```
import httpx, asyncio, logging
from httpx import PoolLimits
from random import randint
queue = asyncio.Queue()
clients = [
httpx.AsyncClient(
http2=True,
pool_limits=PoolLimits(soft_limit=2, hard_limit=10),
cookies={'a': '123456789', 'b': '987654321'},
)
]
async def worker_loop(cid, client, queue):
while 1:
sub_id = await queue.get()
async with client as c:
r = await c.get(f'https://mywebsite.dummy/submission.php?id={sub_id}')
if r.status_code != 200:
print(cid, f'Got status code {r.status_code} while parsing {sub_id}')
return
async def main():
for i in range(2500):
await queue.put(randint(1, 80000000))
for k, v in enumerate(clients):
asyncio.create_task(worker_loop(k, v, queue))
while 1:
if queue.qsize() == 0:
await queue.put(randint(1, 80000000))
await asyncio.sleep(2)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.stop()
```
I checked with netstat, and only one actual connection is opened to the IP address, so pooling seems to work fine.
I really cannot understand why. I even tried using the "aclose()" syntax, without the "async with" block, but no difference at all.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/dispatch/connection_pool.py`
Content:
```
1 import typing
2
3 from ..backends.base import BaseSemaphore, ConcurrencyBackend, lookup_backend
4 from ..config import (
5 DEFAULT_POOL_LIMITS,
6 CertTypes,
7 PoolLimits,
8 SSLConfig,
9 Timeout,
10 VerifyTypes,
11 )
12 from ..exceptions import PoolTimeout
13 from ..models import Origin, Request, Response
14 from ..utils import get_logger
15 from .base import Dispatcher
16 from .connection import HTTPConnection
17
18 CONNECTIONS_DICT = typing.Dict[Origin, typing.List[HTTPConnection]]
19
20
21 logger = get_logger(__name__)
22
23
24 class NullSemaphore(BaseSemaphore):
25 async def acquire(self, timeout: float = None) -> None:
26 return
27
28 def release(self) -> None:
29 return
30
31
32 class ConnectionStore:
33 """
34 We need to maintain collections of connections in a way that allows us to:
35
36 * Lookup connections by origin.
37 * Iterate over connections by insertion time.
38 * Return the total number of connections.
39 """
40
41 def __init__(self) -> None:
42 self.all: typing.Dict[HTTPConnection, float] = {}
43 self.by_origin: typing.Dict[Origin, typing.Dict[HTTPConnection, float]] = {}
44
45 def pop_by_origin(
46 self, origin: Origin, http2_only: bool = False
47 ) -> typing.Optional[HTTPConnection]:
48 try:
49 connections = self.by_origin[origin]
50 except KeyError:
51 return None
52
53 connection = next(reversed(list(connections.keys())))
54 if http2_only and not connection.is_http2:
55 return None
56
57 del connections[connection]
58 if not connections:
59 del self.by_origin[origin]
60 del self.all[connection]
61
62 return connection
63
64 def add(self, connection: HTTPConnection) -> None:
65 self.all[connection] = 0.0
66 try:
67 self.by_origin[connection.origin][connection] = 0.0
68 except KeyError:
69 self.by_origin[connection.origin] = {connection: 0.0}
70
71 def remove(self, connection: HTTPConnection) -> None:
72 del self.all[connection]
73 del self.by_origin[connection.origin][connection]
74 if not self.by_origin[connection.origin]:
75 del self.by_origin[connection.origin]
76
77 def clear(self) -> None:
78 self.all.clear()
79 self.by_origin.clear()
80
81 def __iter__(self) -> typing.Iterator[HTTPConnection]:
82 return iter(self.all.keys())
83
84 def __len__(self) -> int:
85 return len(self.all)
86
87
88 class ConnectionPool(Dispatcher):
89 KEEP_ALIVE_EXPIRY = 5.0
90
91 def __init__(
92 self,
93 *,
94 verify: VerifyTypes = True,
95 cert: CertTypes = None,
96 trust_env: bool = None,
97 pool_limits: PoolLimits = DEFAULT_POOL_LIMITS,
98 http2: bool = False,
99 backend: typing.Union[str, ConcurrencyBackend] = "auto",
100 uds: typing.Optional[str] = None,
101 ):
102 self.ssl = SSLConfig(verify=verify, cert=cert, trust_env=trust_env, http2=http2)
103 self.pool_limits = pool_limits
104 self.is_closed = False
105 self.uds = uds
106
107 self.keepalive_connections = ConnectionStore()
108 self.active_connections = ConnectionStore()
109
110 self.backend = lookup_backend(backend)
111 self.next_keepalive_check = 0.0
112
113 @property
114 def max_connections(self) -> BaseSemaphore:
115 # We do this lazily, to make sure backend autodetection always
116 # runs within an async context.
117 if not hasattr(self, "_max_connections"):
118 limit = self.pool_limits.hard_limit
119 if limit:
120 self._max_connections = self.backend.create_semaphore(
121 limit, exc_class=PoolTimeout
122 )
123 else:
124 self._max_connections = NullSemaphore()
125
126 return self._max_connections
127
128 @property
129 def num_connections(self) -> int:
130 return len(self.keepalive_connections) + len(self.active_connections)
131
132 async def check_keepalive_expiry(self) -> None:
133 now = self.backend.time()
134 if now < self.next_keepalive_check:
135 return
136 self.next_keepalive_check = now + 1.0
137
138 # Iterate through all the keep alive connections.
139 # We create a list here to avoid any 'changed during iteration' errors.
140 keepalives = list(self.keepalive_connections.all.keys())
141 for connection in keepalives:
142 if connection.expires_at is not None and now > connection.expires_at:
143 self.keepalive_connections.remove(connection)
144 self.max_connections.release()
145 await connection.close()
146
147 async def send(self, request: Request, timeout: Timeout = None) -> Response:
148 await self.check_keepalive_expiry()
149 connection = await self.acquire_connection(
150 origin=request.url.origin, timeout=timeout
151 )
152 try:
153 response = await connection.send(request, timeout=timeout)
154 except BaseException as exc:
155 self.active_connections.remove(connection)
156 self.max_connections.release()
157 raise exc
158
159 return response
160
161 async def acquire_connection(
162 self, origin: Origin, timeout: Timeout = None
163 ) -> HTTPConnection:
164 logger.trace(f"acquire_connection origin={origin!r}")
165 connection = self.pop_connection(origin)
166
167 if connection is None:
168 pool_timeout = None if timeout is None else timeout.pool_timeout
169
170 await self.max_connections.acquire(timeout=pool_timeout)
171 connection = HTTPConnection(
172 origin,
173 ssl=self.ssl,
174 backend=self.backend,
175 release_func=self.release_connection,
176 uds=self.uds,
177 )
178 logger.trace(f"new_connection connection={connection!r}")
179 else:
180 logger.trace(f"reuse_connection connection={connection!r}")
181
182 self.active_connections.add(connection)
183
184 return connection
185
186 async def release_connection(self, connection: HTTPConnection) -> None:
187 logger.trace(f"release_connection connection={connection!r}")
188 if connection.is_closed:
189 self.active_connections.remove(connection)
190 self.max_connections.release()
191 elif (
192 self.pool_limits.soft_limit is not None
193 and self.num_connections > self.pool_limits.soft_limit
194 ):
195 self.active_connections.remove(connection)
196 self.max_connections.release()
197 await connection.close()
198 else:
199 now = self.backend.time()
200 connection.expires_at = now + self.KEEP_ALIVE_EXPIRY
201 self.active_connections.remove(connection)
202 self.keepalive_connections.add(connection)
203
204 async def close(self) -> None:
205 self.is_closed = True
206 connections = list(self.keepalive_connections)
207 self.keepalive_connections.clear()
208 for connection in connections:
209 await connection.close()
210
211 def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:
212 connection = self.active_connections.pop_by_origin(origin, http2_only=True)
213 if connection is None:
214 connection = self.keepalive_connections.pop_by_origin(origin)
215
216 if connection is not None and connection.is_connection_dropped():
217 self.max_connections.release()
218 connection = None
219
220 return connection
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/dispatch/connection_pool.py b/httpx/dispatch/connection_pool.py
--- a/httpx/dispatch/connection_pool.py
+++ b/httpx/dispatch/connection_pool.py
@@ -206,6 +206,7 @@
connections = list(self.keepalive_connections)
self.keepalive_connections.clear()
for connection in connections:
+ self.max_connections.release()
await connection.close()
def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:
| {"golden_diff": "diff --git a/httpx/dispatch/connection_pool.py b/httpx/dispatch/connection_pool.py\n--- a/httpx/dispatch/connection_pool.py\n+++ b/httpx/dispatch/connection_pool.py\n@@ -206,6 +206,7 @@\n connections = list(self.keepalive_connections)\n self.keepalive_connections.clear()\n for connection in connections:\n+ self.max_connections.release()\n await connection.close()\n \n def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:\n", "issue": "Keepalive connections aren't released when closing the ConnectionPool\nHello. I am having an issue where it looks like connections aren't being closed correctly, and after i reach a number of requests equivalent to \"hard_limit\" of pool_limits, i get a PoolTimeout exception.\r\n\r\nI tried upgrading to httpx==0.10.1, with no success.\r\n\r\nMinimal example:\r\n```\r\nimport httpx, asyncio, logging\r\nfrom httpx import PoolLimits\r\nfrom random import randint\r\n\r\nqueue = asyncio.Queue()\r\n\r\nclients = [\r\n\thttpx.AsyncClient(\r\n\t\thttp2=True,\r\n\t\tpool_limits=PoolLimits(soft_limit=2, hard_limit=10),\r\n\t\tcookies={'a': '123456789', 'b': '987654321'},\r\n\t)\r\n]\r\n\r\nasync def worker_loop(cid, client, queue):\r\n\twhile 1:\r\n\t\tsub_id = await queue.get()\r\n\r\n\t\tasync with client as c:\r\n\t\t\tr = await c.get(f'https://mywebsite.dummy/submission.php?id={sub_id}')\r\n\r\n\t\tif r.status_code != 200:\r\n\t\t\tprint(cid, f'Got status code {r.status_code} while parsing {sub_id}')\r\n\t\t\treturn\r\n\r\nasync def main():\r\n\tfor i in range(2500):\r\n\t\tawait queue.put(randint(1, 80000000))\r\n\r\n\tfor k, v in enumerate(clients):\r\n\t\tasyncio.create_task(worker_loop(k, v, queue))\r\n\r\n\twhile 1:\r\n\t\tif queue.qsize() == 0:\r\n\t\t\tawait queue.put(randint(1, 80000000))\r\n\t\tawait asyncio.sleep(2)\r\n\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(main())\r\nloop.stop()\r\n```\r\n\r\n\r\nI checked with netstat, and only one actual connection is opened to the IP address, so pooling seems to work fine.\r\nI really cannot understand why. I even tried using the \"aclose()\" syntax, without the \"async with\" block, but no difference at all.\n", "before_files": [{"content": "import typing\n\nfrom ..backends.base import BaseSemaphore, ConcurrencyBackend, lookup_backend\nfrom ..config import (\n DEFAULT_POOL_LIMITS,\n CertTypes,\n PoolLimits,\n SSLConfig,\n Timeout,\n VerifyTypes,\n)\nfrom ..exceptions import PoolTimeout\nfrom ..models import Origin, Request, Response\nfrom ..utils import get_logger\nfrom .base import Dispatcher\nfrom .connection import HTTPConnection\n\nCONNECTIONS_DICT = typing.Dict[Origin, typing.List[HTTPConnection]]\n\n\nlogger = get_logger(__name__)\n\n\nclass NullSemaphore(BaseSemaphore):\n async def acquire(self, timeout: float = None) -> None:\n return\n\n def release(self) -> None:\n return\n\n\nclass ConnectionStore:\n \"\"\"\n We need to maintain collections of connections in a way that allows us to:\n\n * Lookup connections by origin.\n * Iterate over connections by insertion time.\n * Return the total number of connections.\n \"\"\"\n\n def __init__(self) -> None:\n self.all: typing.Dict[HTTPConnection, float] = {}\n self.by_origin: typing.Dict[Origin, typing.Dict[HTTPConnection, float]] = {}\n\n def pop_by_origin(\n self, origin: Origin, http2_only: bool = False\n ) -> typing.Optional[HTTPConnection]:\n try:\n connections = self.by_origin[origin]\n except KeyError:\n return None\n\n connection = next(reversed(list(connections.keys())))\n if http2_only and not connection.is_http2:\n return None\n\n del connections[connection]\n if not connections:\n del self.by_origin[origin]\n del self.all[connection]\n\n return connection\n\n def add(self, connection: HTTPConnection) -> None:\n self.all[connection] = 0.0\n try:\n self.by_origin[connection.origin][connection] = 0.0\n except KeyError:\n self.by_origin[connection.origin] = {connection: 0.0}\n\n def remove(self, connection: HTTPConnection) -> None:\n del self.all[connection]\n del self.by_origin[connection.origin][connection]\n if not self.by_origin[connection.origin]:\n del self.by_origin[connection.origin]\n\n def clear(self) -> None:\n self.all.clear()\n self.by_origin.clear()\n\n def __iter__(self) -> typing.Iterator[HTTPConnection]:\n return iter(self.all.keys())\n\n def __len__(self) -> int:\n return len(self.all)\n\n\nclass ConnectionPool(Dispatcher):\n KEEP_ALIVE_EXPIRY = 5.0\n\n def __init__(\n self,\n *,\n verify: VerifyTypes = True,\n cert: CertTypes = None,\n trust_env: bool = None,\n pool_limits: PoolLimits = DEFAULT_POOL_LIMITS,\n http2: bool = False,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n uds: typing.Optional[str] = None,\n ):\n self.ssl = SSLConfig(verify=verify, cert=cert, trust_env=trust_env, http2=http2)\n self.pool_limits = pool_limits\n self.is_closed = False\n self.uds = uds\n\n self.keepalive_connections = ConnectionStore()\n self.active_connections = ConnectionStore()\n\n self.backend = lookup_backend(backend)\n self.next_keepalive_check = 0.0\n\n @property\n def max_connections(self) -> BaseSemaphore:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_max_connections\"):\n limit = self.pool_limits.hard_limit\n if limit:\n self._max_connections = self.backend.create_semaphore(\n limit, exc_class=PoolTimeout\n )\n else:\n self._max_connections = NullSemaphore()\n\n return self._max_connections\n\n @property\n def num_connections(self) -> int:\n return len(self.keepalive_connections) + len(self.active_connections)\n\n async def check_keepalive_expiry(self) -> None:\n now = self.backend.time()\n if now < self.next_keepalive_check:\n return\n self.next_keepalive_check = now + 1.0\n\n # Iterate through all the keep alive connections.\n # We create a list here to avoid any 'changed during iteration' errors.\n keepalives = list(self.keepalive_connections.all.keys())\n for connection in keepalives:\n if connection.expires_at is not None and now > connection.expires_at:\n self.keepalive_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n await self.check_keepalive_expiry()\n connection = await self.acquire_connection(\n origin=request.url.origin, timeout=timeout\n )\n try:\n response = await connection.send(request, timeout=timeout)\n except BaseException as exc:\n self.active_connections.remove(connection)\n self.max_connections.release()\n raise exc\n\n return response\n\n async def acquire_connection(\n self, origin: Origin, timeout: Timeout = None\n ) -> HTTPConnection:\n logger.trace(f\"acquire_connection origin={origin!r}\")\n connection = self.pop_connection(origin)\n\n if connection is None:\n pool_timeout = None if timeout is None else timeout.pool_timeout\n\n await self.max_connections.acquire(timeout=pool_timeout)\n connection = HTTPConnection(\n origin,\n ssl=self.ssl,\n backend=self.backend,\n release_func=self.release_connection,\n uds=self.uds,\n )\n logger.trace(f\"new_connection connection={connection!r}\")\n else:\n logger.trace(f\"reuse_connection connection={connection!r}\")\n\n self.active_connections.add(connection)\n\n return connection\n\n async def release_connection(self, connection: HTTPConnection) -> None:\n logger.trace(f\"release_connection connection={connection!r}\")\n if connection.is_closed:\n self.active_connections.remove(connection)\n self.max_connections.release()\n elif (\n self.pool_limits.soft_limit is not None\n and self.num_connections > self.pool_limits.soft_limit\n ):\n self.active_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n else:\n now = self.backend.time()\n connection.expires_at = now + self.KEEP_ALIVE_EXPIRY\n self.active_connections.remove(connection)\n self.keepalive_connections.add(connection)\n\n async def close(self) -> None:\n self.is_closed = True\n connections = list(self.keepalive_connections)\n self.keepalive_connections.clear()\n for connection in connections:\n await connection.close()\n\n def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:\n connection = self.active_connections.pop_by_origin(origin, http2_only=True)\n if connection is None:\n connection = self.keepalive_connections.pop_by_origin(origin)\n\n if connection is not None and connection.is_connection_dropped():\n self.max_connections.release()\n connection = None\n\n return connection\n", "path": "httpx/dispatch/connection_pool.py"}], "after_files": [{"content": "import typing\n\nfrom ..backends.base import BaseSemaphore, ConcurrencyBackend, lookup_backend\nfrom ..config import (\n DEFAULT_POOL_LIMITS,\n CertTypes,\n PoolLimits,\n SSLConfig,\n Timeout,\n VerifyTypes,\n)\nfrom ..exceptions import PoolTimeout\nfrom ..models import Origin, Request, Response\nfrom ..utils import get_logger\nfrom .base import Dispatcher\nfrom .connection import HTTPConnection\n\nCONNECTIONS_DICT = typing.Dict[Origin, typing.List[HTTPConnection]]\n\n\nlogger = get_logger(__name__)\n\n\nclass NullSemaphore(BaseSemaphore):\n async def acquire(self, timeout: float = None) -> None:\n return\n\n def release(self) -> None:\n return\n\n\nclass ConnectionStore:\n \"\"\"\n We need to maintain collections of connections in a way that allows us to:\n\n * Lookup connections by origin.\n * Iterate over connections by insertion time.\n * Return the total number of connections.\n \"\"\"\n\n def __init__(self) -> None:\n self.all: typing.Dict[HTTPConnection, float] = {}\n self.by_origin: typing.Dict[Origin, typing.Dict[HTTPConnection, float]] = {}\n\n def pop_by_origin(\n self, origin: Origin, http2_only: bool = False\n ) -> typing.Optional[HTTPConnection]:\n try:\n connections = self.by_origin[origin]\n except KeyError:\n return None\n\n connection = next(reversed(list(connections.keys())))\n if http2_only and not connection.is_http2:\n return None\n\n del connections[connection]\n if not connections:\n del self.by_origin[origin]\n del self.all[connection]\n\n return connection\n\n def add(self, connection: HTTPConnection) -> None:\n self.all[connection] = 0.0\n try:\n self.by_origin[connection.origin][connection] = 0.0\n except KeyError:\n self.by_origin[connection.origin] = {connection: 0.0}\n\n def remove(self, connection: HTTPConnection) -> None:\n del self.all[connection]\n del self.by_origin[connection.origin][connection]\n if not self.by_origin[connection.origin]:\n del self.by_origin[connection.origin]\n\n def clear(self) -> None:\n self.all.clear()\n self.by_origin.clear()\n\n def __iter__(self) -> typing.Iterator[HTTPConnection]:\n return iter(self.all.keys())\n\n def __len__(self) -> int:\n return len(self.all)\n\n\nclass ConnectionPool(Dispatcher):\n KEEP_ALIVE_EXPIRY = 5.0\n\n def __init__(\n self,\n *,\n verify: VerifyTypes = True,\n cert: CertTypes = None,\n trust_env: bool = None,\n pool_limits: PoolLimits = DEFAULT_POOL_LIMITS,\n http2: bool = False,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n uds: typing.Optional[str] = None,\n ):\n self.ssl = SSLConfig(verify=verify, cert=cert, trust_env=trust_env, http2=http2)\n self.pool_limits = pool_limits\n self.is_closed = False\n self.uds = uds\n\n self.keepalive_connections = ConnectionStore()\n self.active_connections = ConnectionStore()\n\n self.backend = lookup_backend(backend)\n self.next_keepalive_check = 0.0\n\n @property\n def max_connections(self) -> BaseSemaphore:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_max_connections\"):\n limit = self.pool_limits.hard_limit\n if limit:\n self._max_connections = self.backend.create_semaphore(\n limit, exc_class=PoolTimeout\n )\n else:\n self._max_connections = NullSemaphore()\n\n return self._max_connections\n\n @property\n def num_connections(self) -> int:\n return len(self.keepalive_connections) + len(self.active_connections)\n\n async def check_keepalive_expiry(self) -> None:\n now = self.backend.time()\n if now < self.next_keepalive_check:\n return\n self.next_keepalive_check = now + 1.0\n\n # Iterate through all the keep alive connections.\n # We create a list here to avoid any 'changed during iteration' errors.\n keepalives = list(self.keepalive_connections.all.keys())\n for connection in keepalives:\n if connection.expires_at is not None and now > connection.expires_at:\n self.keepalive_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n await self.check_keepalive_expiry()\n connection = await self.acquire_connection(\n origin=request.url.origin, timeout=timeout\n )\n try:\n response = await connection.send(request, timeout=timeout)\n except BaseException as exc:\n self.active_connections.remove(connection)\n self.max_connections.release()\n raise exc\n\n return response\n\n async def acquire_connection(\n self, origin: Origin, timeout: Timeout = None\n ) -> HTTPConnection:\n logger.trace(f\"acquire_connection origin={origin!r}\")\n connection = self.pop_connection(origin)\n\n if connection is None:\n pool_timeout = None if timeout is None else timeout.pool_timeout\n\n await self.max_connections.acquire(timeout=pool_timeout)\n connection = HTTPConnection(\n origin,\n ssl=self.ssl,\n backend=self.backend,\n release_func=self.release_connection,\n uds=self.uds,\n )\n logger.trace(f\"new_connection connection={connection!r}\")\n else:\n logger.trace(f\"reuse_connection connection={connection!r}\")\n\n self.active_connections.add(connection)\n\n return connection\n\n async def release_connection(self, connection: HTTPConnection) -> None:\n logger.trace(f\"release_connection connection={connection!r}\")\n if connection.is_closed:\n self.active_connections.remove(connection)\n self.max_connections.release()\n elif (\n self.pool_limits.soft_limit is not None\n and self.num_connections > self.pool_limits.soft_limit\n ):\n self.active_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n else:\n now = self.backend.time()\n connection.expires_at = now + self.KEEP_ALIVE_EXPIRY\n self.active_connections.remove(connection)\n self.keepalive_connections.add(connection)\n\n async def close(self) -> None:\n self.is_closed = True\n connections = list(self.keepalive_connections)\n self.keepalive_connections.clear()\n for connection in connections:\n self.max_connections.release()\n await connection.close()\n\n def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:\n connection = self.active_connections.pop_by_origin(origin, http2_only=True)\n if connection is None:\n connection = self.keepalive_connections.pop_by_origin(origin)\n\n if connection is not None and connection.is_connection_dropped():\n self.max_connections.release()\n connection = None\n\n return connection\n", "path": "httpx/dispatch/connection_pool.py"}]} |
gh_patches_debug_1492 | rasdani/github-patches | git_diff | wright-group__WrightTools-899 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
print_tree should print value and units for variables with size 1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/_dataset.py`
Content:
```
1 """Dataset base class."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import collections
8
9 import numpy as np
10
11 import h5py
12
13 from . import exceptions as wt_exceptions
14 from . import kit as wt_kit
15 from . import units as wt_units
16
17
18 # --- class ---------------------------------------------------------------------------------------
19
20
21 class Dataset(h5py.Dataset):
22 """Array-like data container."""
23
24 _instances = {}
25 class_name = "Dataset"
26
27 def __getitem__(self, index):
28 if not hasattr(index, "__iter__"):
29 index = [index]
30 index = wt_kit.valid_index(index, self.shape)
31 return super().__getitem__(index)
32
33 def __iadd__(self, value):
34 def f(dataset, s, value):
35 if hasattr(value, "shape"):
36 dataset[s] += value[wt_kit.valid_index(s, value.shape)]
37 else:
38 dataset[s] += value
39
40 self.chunkwise(f, value=value)
41 return self
42
43 def __imul__(self, value):
44 def f(dataset, s, value):
45 if hasattr(value, "shape"):
46 dataset[s] *= value[wt_kit.valid_index(s, value.shape)]
47 else:
48 dataset[s] *= value
49
50 self.chunkwise(f, value=value)
51 return self
52
53 def __ipow__(self, value):
54 def f(dataset, s, value):
55 if hasattr(value, "shape"):
56 dataset[s] **= value[wt_kit.valid_index(s, value.shape)]
57 else:
58 dataset[s] **= value
59
60 self.chunkwise(f, value=value)
61 return self
62
63 def __isub__(self, value):
64 def f(dataset, s, value):
65 if hasattr(value, "shape"):
66 dataset[s] -= value[wt_kit.valid_index(s, value.shape)]
67 else:
68 dataset[s] -= value
69
70 self.chunkwise(f, value=value)
71 return self
72
73 def __itruediv__(self, value):
74 def f(dataset, s, value):
75 if hasattr(value, "shape"):
76 dataset[s] /= value[wt_kit.valid_index(s, value.shape)]
77 else:
78 dataset[s] /= value
79
80 self.chunkwise(f, value=value)
81 return self
82
83 def __init__(self, *args, **kwargs):
84 super().__init__(*args, **kwargs)
85
86 def __new__(cls, parent, id, **kwargs):
87 """New object formation handler."""
88 fullpath = parent.fullpath + h5py.h5i.get_name(id).decode()
89 fullpath = fullpath.replace("//", "/")
90 if fullpath in cls._instances.keys():
91 return cls._instances[fullpath]
92 else:
93 instance = super(Dataset, cls).__new__(cls)
94 cls.__init__(instance, parent, id, **kwargs)
95 cls._instances[fullpath] = instance
96 return instance
97
98 def __repr__(self):
99 return "<WrightTools.{0} '{1}' at {2}>".format(
100 self.class_name, self.natural_name, self.fullpath
101 )
102
103 def __setitem__(self, index, value):
104 self._clear_array_attributes_cache()
105 return super().__setitem__(index, value)
106
107 def _clear_array_attributes_cache(self):
108 if "max" in self.attrs.keys():
109 del self.attrs["max"]
110 if "min" in self.attrs.keys():
111 del self.attrs["min"]
112 if "argmax" in self.attrs.keys():
113 del self.attrs["argmax"]
114 if "argmin" in self.attrs.keys():
115 del self.attrs["argmin"]
116
117 @property
118 def _leaf(self):
119 out = self.natural_name
120 if self.units is not None:
121 out += " ({0})".format(self.units)
122 out += " {0}".format(self.shape)
123 return out
124
125 @property
126 def full(self):
127 arr = self[:]
128 for i in range(arr.ndim):
129 if arr.shape[i] == 1:
130 arr = np.repeat(arr, self.parent.shape[i], axis=i)
131 return arr
132
133 @property
134 def fullpath(self):
135 """Full path: file and internal structure."""
136 return self.parent.filepath + "::" + self.name
137
138 @property
139 def natural_name(self):
140 """Natural name of the dataset. May be different from name."""
141 try:
142 assert self._natural_name is not None
143 except (AssertionError, AttributeError):
144 self._natural_name = self.attrs["name"]
145 finally:
146 return self._natural_name
147
148 @natural_name.setter
149 def natural_name(self, value):
150 self.attrs["name"] = value
151 self._natural_name = None
152
153 @property
154 def parent(self):
155 """Parent."""
156 return self._parent
157
158 @property
159 def points(self):
160 """Squeezed array."""
161 return np.squeeze(self[:])
162
163 @property
164 def units(self):
165 """Units."""
166 if "units" in self.attrs.keys():
167 # This try-except here for compatibility with v1.0.0 of WT5 format
168 try:
169 self.attrs["units"] = self.attrs["units"].decode()
170 except AttributeError:
171 pass # already a string, not bytes
172 return self.attrs["units"]
173 return None
174
175 @units.setter
176 def units(self, value):
177 """Set units."""
178 if value is None:
179 if "units" in self.attrs.keys():
180 self.attrs.pop("units")
181 else:
182 try:
183 self.attrs["units"] = value
184 except AttributeError:
185 self.attrs["units"] = value
186
187 def argmax(self):
188 """Index of the maximum, ignorning nans."""
189 if "argmax" not in self.attrs.keys():
190
191 def f(dataset, s):
192 arr = dataset[s]
193 try:
194 amin = np.nanargmax(arr)
195 except ValueError:
196 amin = 0
197 idx = np.unravel_index(amin, arr.shape)
198 val = arr[idx]
199 return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)
200
201 chunk_res = self.chunkwise(f)
202 idxs = [i[0] for i in chunk_res.values()]
203 vals = [i[1] for i in chunk_res.values()]
204 self.attrs["argmax"] = idxs[np.nanargmax(vals)]
205 return tuple(self.attrs["argmax"])
206
207 def argmin(self):
208 """Index of the minimum, ignoring nans."""
209 if "argmin" not in self.attrs.keys():
210
211 def f(dataset, s):
212 arr = dataset[s]
213 try:
214 amin = np.nanargmin(arr)
215 except ValueError:
216 amin = 0
217 idx = np.unravel_index(amin, arr.shape)
218 val = arr[idx]
219 return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)
220
221 chunk_res = self.chunkwise(f)
222 idxs = [i[0] for i in chunk_res.values()]
223 vals = [i[1] for i in chunk_res.values()]
224 self.attrs["argmin"] = idxs[np.nanargmin(vals)]
225 return tuple(self.attrs["argmin"])
226
227 def chunkwise(self, func, *args, **kwargs):
228 """Execute a function for each chunk in the dataset.
229
230 Order of excecution is not guaranteed.
231
232 Parameters
233 ----------
234 func : function
235 Function to execute. First two arguments must be dataset,
236 slices.
237 args (optional)
238 Additional (unchanging) arguments passed to func.
239 kwargs (optional)
240 Additional (unchanging) keyword arguments passed to func.
241
242 Returns
243 -------
244 collections OrderedDict
245 Dictionary of index: function output. Index is to lowest corner
246 of each chunk.
247 """
248 out = collections.OrderedDict()
249 for s in self.slices():
250 key = tuple(sss.start for sss in s)
251 out[key] = func(self, s, *args, **kwargs)
252 self._clear_array_attributes_cache()
253 return out
254
255 def clip(self, min=None, max=None, replace=np.nan):
256 """Clip values outside of a defined range.
257
258 Parameters
259 ----------
260 min : number (optional)
261 New channel minimum. Default is None.
262 max : number (optional)
263 New channel maximum. Default is None.
264 replace : number or 'value' (optional)
265 Replace behavior. Default is nan.
266 """
267 if max is None:
268 max = self.max()
269 if min is None:
270 min = self.min()
271
272 def f(dataset, s, min, max, replace):
273 if hasattr(min, "shape"):
274 min = min[wt_kit.valid_index(s, min.shape)]
275 if hasattr(max, "shape"):
276 max = max[wt_kit.valid_index(s, max.shape)]
277 if hasattr(replace, "shape"):
278 replace = replace[wt_kit.valid_index(s, replace.shape)]
279 arr = dataset[s]
280 if replace == "value":
281 dataset[s] = np.clip(arr, min, max)
282 else:
283 arr[arr < min] = replace
284 arr[arr > max] = replace
285 dataset[s] = arr
286
287 self.chunkwise(f, min=min, max=max, replace=replace)
288
289 def convert(self, destination_units):
290 """Convert units.
291
292 Parameters
293 ----------
294 destination_units : string (optional)
295 Units to convert into.
296 """
297 if not wt_units.is_valid_conversion(self.units, destination_units):
298 kind = wt_units.kind(self.units)
299 valid = list(wt_units.dicts[kind].keys())
300 raise wt_exceptions.UnitsError(valid, destination_units)
301 if self.units is None:
302 return
303
304 def f(dataset, s, destination_units):
305 dataset[s] = wt_units.converter(dataset[s], dataset.units, destination_units)
306
307 self.chunkwise(f, destination_units=destination_units)
308 self.units = destination_units
309
310 def log(self, base=np.e, floor=None):
311 """Take the log of the entire dataset.
312
313 Parameters
314 ----------
315 base : number (optional)
316 Base of log. Default is e.
317 floor : number (optional)
318 Clip values below floor after log. Default is None.
319 """
320
321 def f(dataset, s, base, floor):
322 arr = dataset[s]
323 arr = np.log(arr)
324 if base != np.e:
325 arr /= np.log(base)
326 if floor is not None:
327 arr[arr < floor] = floor
328 dataset[s] = arr
329
330 self.chunkwise(f, base=base, floor=floor)
331
332 def log10(self, floor=None):
333 """Take the log base 10 of the entire dataset.
334
335 Parameters
336 ----------
337 floor : number (optional)
338 Clip values below floor after log. Default is None.
339 """
340
341 def f(dataset, s, floor):
342 arr = dataset[s]
343 arr = np.log10(arr)
344 if floor is not None:
345 arr[arr < floor] = floor
346 dataset[s] = arr
347
348 self.chunkwise(f, floor=floor)
349
350 def log2(self, floor=None):
351 """Take the log base 2 of the entire dataset.
352
353 Parameters
354 ----------
355 floor : number (optional)
356 Clip values below floor after log. Default is None.
357 """
358
359 def f(dataset, s, floor):
360 arr = dataset[s]
361 arr = np.log2(arr)
362 if floor is not None:
363 arr[arr < floor] = floor
364 dataset[s] = arr
365
366 self.chunkwise(f, floor=floor)
367
368 def max(self):
369 """Maximum, ignorning nans."""
370 if "max" not in self.attrs.keys():
371
372 def f(dataset, s):
373 return np.nanmax(dataset[s])
374
375 self.attrs["max"] = np.nanmax(list(self.chunkwise(f).values()))
376 return self.attrs["max"]
377
378 def min(self):
379 """Minimum, ignoring nans."""
380 if "min" not in self.attrs.keys():
381
382 def f(dataset, s):
383 return np.nanmin(dataset[s])
384
385 self.attrs["min"] = np.nanmin(list(self.chunkwise(f).values()))
386 return self.attrs["min"]
387
388 def slices(self):
389 """Returns a generator yielding tuple of slice objects.
390
391 Order is not guaranteed.
392 """
393 if self.chunks is None:
394 yield tuple(slice(None, s) for s in self.shape)
395 else:
396 ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks))
397 for idx in np.ndindex(ceilings): # could also use itertools.product
398 out = []
399 for i, c, s in zip(idx, self.chunks, self.shape):
400 start = i * c
401 stop = min(start + c, s + 1)
402 out.append(slice(start, stop, 1))
403 yield tuple(out)
404
405 def symmetric_root(self, root=2):
406 def f(dataset, s, root):
407 dataset[s] = np.sign(dataset[s]) * (np.abs(dataset[s]) ** (1 / root))
408
409 self.chunkwise(f, root=root)
410
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/_dataset.py b/WrightTools/_dataset.py
--- a/WrightTools/_dataset.py
+++ b/WrightTools/_dataset.py
@@ -117,9 +117,12 @@
@property
def _leaf(self):
out = self.natural_name
+ if self.size == 1:
+ out += f" = {self.points}"
if self.units is not None:
out += " ({0})".format(self.units)
- out += " {0}".format(self.shape)
+ if self.size != 1:
+ out += " {0}".format(self.shape)
return out
@property
| {"golden_diff": "diff --git a/WrightTools/_dataset.py b/WrightTools/_dataset.py\n--- a/WrightTools/_dataset.py\n+++ b/WrightTools/_dataset.py\n@@ -117,9 +117,12 @@\n @property\n def _leaf(self):\n out = self.natural_name\n+ if self.size == 1:\n+ out += f\" = {self.points}\"\n if self.units is not None:\n out += \" ({0})\".format(self.units)\n- out += \" {0}\".format(self.shape)\n+ if self.size != 1:\n+ out += \" {0}\".format(self.shape)\n return out\n \n @property\n", "issue": "print_tree should print value and units for variables with size 1\n\n", "before_files": [{"content": "\"\"\"Dataset base class.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport collections\n\nimport numpy as np\n\nimport h5py\n\nfrom . import exceptions as wt_exceptions\nfrom . import kit as wt_kit\nfrom . import units as wt_units\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n \"\"\"Array-like data container.\"\"\"\n\n _instances = {}\n class_name = \"Dataset\"\n\n def __getitem__(self, index):\n if not hasattr(index, \"__iter__\"):\n index = [index]\n index = wt_kit.valid_index(index, self.shape)\n return super().__getitem__(index)\n\n def __iadd__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] += value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] += value\n\n self.chunkwise(f, value=value)\n return self\n\n def __imul__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] *= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] *= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __ipow__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] **= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] **= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __isub__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] -= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] -= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __itruediv__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] /= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] /= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __new__(cls, parent, id, **kwargs):\n \"\"\"New object formation handler.\"\"\"\n fullpath = parent.fullpath + h5py.h5i.get_name(id).decode()\n fullpath = fullpath.replace(\"//\", \"/\")\n if fullpath in cls._instances.keys():\n return cls._instances[fullpath]\n else:\n instance = super(Dataset, cls).__new__(cls)\n cls.__init__(instance, parent, id, **kwargs)\n cls._instances[fullpath] = instance\n return instance\n\n def __repr__(self):\n return \"<WrightTools.{0} '{1}' at {2}>\".format(\n self.class_name, self.natural_name, self.fullpath\n )\n\n def __setitem__(self, index, value):\n self._clear_array_attributes_cache()\n return super().__setitem__(index, value)\n\n def _clear_array_attributes_cache(self):\n if \"max\" in self.attrs.keys():\n del self.attrs[\"max\"]\n if \"min\" in self.attrs.keys():\n del self.attrs[\"min\"]\n if \"argmax\" in self.attrs.keys():\n del self.attrs[\"argmax\"]\n if \"argmin\" in self.attrs.keys():\n del self.attrs[\"argmin\"]\n\n @property\n def _leaf(self):\n out = self.natural_name\n if self.units is not None:\n out += \" ({0})\".format(self.units)\n out += \" {0}\".format(self.shape)\n return out\n\n @property\n def full(self):\n arr = self[:]\n for i in range(arr.ndim):\n if arr.shape[i] == 1:\n arr = np.repeat(arr, self.parent.shape[i], axis=i)\n return arr\n\n @property\n def fullpath(self):\n \"\"\"Full path: file and internal structure.\"\"\"\n return self.parent.filepath + \"::\" + self.name\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs[\"name\"]\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n self.attrs[\"name\"] = value\n self._natural_name = None\n\n @property\n def parent(self):\n \"\"\"Parent.\"\"\"\n return self._parent\n\n @property\n def points(self):\n \"\"\"Squeezed array.\"\"\"\n return np.squeeze(self[:])\n\n @property\n def units(self):\n \"\"\"Units.\"\"\"\n if \"units\" in self.attrs.keys():\n # This try-except here for compatibility with v1.0.0 of WT5 format\n try:\n self.attrs[\"units\"] = self.attrs[\"units\"].decode()\n except AttributeError:\n pass # already a string, not bytes\n return self.attrs[\"units\"]\n return None\n\n @units.setter\n def units(self, value):\n \"\"\"Set units.\"\"\"\n if value is None:\n if \"units\" in self.attrs.keys():\n self.attrs.pop(\"units\")\n else:\n try:\n self.attrs[\"units\"] = value\n except AttributeError:\n self.attrs[\"units\"] = value\n\n def argmax(self):\n \"\"\"Index of the maximum, ignorning nans.\"\"\"\n if \"argmax\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmax(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmax\"] = idxs[np.nanargmax(vals)]\n return tuple(self.attrs[\"argmax\"])\n\n def argmin(self):\n \"\"\"Index of the minimum, ignoring nans.\"\"\"\n if \"argmin\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmin(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmin\"] = idxs[np.nanargmin(vals)]\n return tuple(self.attrs[\"argmin\"])\n\n def chunkwise(self, func, *args, **kwargs):\n \"\"\"Execute a function for each chunk in the dataset.\n\n Order of excecution is not guaranteed.\n\n Parameters\n ----------\n func : function\n Function to execute. First two arguments must be dataset,\n slices.\n args (optional)\n Additional (unchanging) arguments passed to func.\n kwargs (optional)\n Additional (unchanging) keyword arguments passed to func.\n\n Returns\n -------\n collections OrderedDict\n Dictionary of index: function output. Index is to lowest corner\n of each chunk.\n \"\"\"\n out = collections.OrderedDict()\n for s in self.slices():\n key = tuple(sss.start for sss in s)\n out[key] = func(self, s, *args, **kwargs)\n self._clear_array_attributes_cache()\n return out\n\n def clip(self, min=None, max=None, replace=np.nan):\n \"\"\"Clip values outside of a defined range.\n\n Parameters\n ----------\n min : number (optional)\n New channel minimum. Default is None.\n max : number (optional)\n New channel maximum. Default is None.\n replace : number or 'value' (optional)\n Replace behavior. Default is nan.\n \"\"\"\n if max is None:\n max = self.max()\n if min is None:\n min = self.min()\n\n def f(dataset, s, min, max, replace):\n if hasattr(min, \"shape\"):\n min = min[wt_kit.valid_index(s, min.shape)]\n if hasattr(max, \"shape\"):\n max = max[wt_kit.valid_index(s, max.shape)]\n if hasattr(replace, \"shape\"):\n replace = replace[wt_kit.valid_index(s, replace.shape)]\n arr = dataset[s]\n if replace == \"value\":\n dataset[s] = np.clip(arr, min, max)\n else:\n arr[arr < min] = replace\n arr[arr > max] = replace\n dataset[s] = arr\n\n self.chunkwise(f, min=min, max=max, replace=replace)\n\n def convert(self, destination_units):\n \"\"\"Convert units.\n\n Parameters\n ----------\n destination_units : string (optional)\n Units to convert into.\n \"\"\"\n if not wt_units.is_valid_conversion(self.units, destination_units):\n kind = wt_units.kind(self.units)\n valid = list(wt_units.dicts[kind].keys())\n raise wt_exceptions.UnitsError(valid, destination_units)\n if self.units is None:\n return\n\n def f(dataset, s, destination_units):\n dataset[s] = wt_units.converter(dataset[s], dataset.units, destination_units)\n\n self.chunkwise(f, destination_units=destination_units)\n self.units = destination_units\n\n def log(self, base=np.e, floor=None):\n \"\"\"Take the log of the entire dataset.\n\n Parameters\n ----------\n base : number (optional)\n Base of log. Default is e.\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, base, floor):\n arr = dataset[s]\n arr = np.log(arr)\n if base != np.e:\n arr /= np.log(base)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, base=base, floor=floor)\n\n def log10(self, floor=None):\n \"\"\"Take the log base 10 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log10(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def log2(self, floor=None):\n \"\"\"Take the log base 2 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log2(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def max(self):\n \"\"\"Maximum, ignorning nans.\"\"\"\n if \"max\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmax(dataset[s])\n\n self.attrs[\"max\"] = np.nanmax(list(self.chunkwise(f).values()))\n return self.attrs[\"max\"]\n\n def min(self):\n \"\"\"Minimum, ignoring nans.\"\"\"\n if \"min\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmin(dataset[s])\n\n self.attrs[\"min\"] = np.nanmin(list(self.chunkwise(f).values()))\n return self.attrs[\"min\"]\n\n def slices(self):\n \"\"\"Returns a generator yielding tuple of slice objects.\n\n Order is not guaranteed.\n \"\"\"\n if self.chunks is None:\n yield tuple(slice(None, s) for s in self.shape)\n else:\n ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks))\n for idx in np.ndindex(ceilings): # could also use itertools.product\n out = []\n for i, c, s in zip(idx, self.chunks, self.shape):\n start = i * c\n stop = min(start + c, s + 1)\n out.append(slice(start, stop, 1))\n yield tuple(out)\n\n def symmetric_root(self, root=2):\n def f(dataset, s, root):\n dataset[s] = np.sign(dataset[s]) * (np.abs(dataset[s]) ** (1 / root))\n\n self.chunkwise(f, root=root)\n", "path": "WrightTools/_dataset.py"}], "after_files": [{"content": "\"\"\"Dataset base class.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport collections\n\nimport numpy as np\n\nimport h5py\n\nfrom . import exceptions as wt_exceptions\nfrom . import kit as wt_kit\nfrom . import units as wt_units\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n \"\"\"Array-like data container.\"\"\"\n\n _instances = {}\n class_name = \"Dataset\"\n\n def __getitem__(self, index):\n if not hasattr(index, \"__iter__\"):\n index = [index]\n index = wt_kit.valid_index(index, self.shape)\n return super().__getitem__(index)\n\n def __iadd__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] += value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] += value\n\n self.chunkwise(f, value=value)\n return self\n\n def __imul__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] *= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] *= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __ipow__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] **= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] **= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __isub__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] -= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] -= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __itruediv__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] /= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] /= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __new__(cls, parent, id, **kwargs):\n \"\"\"New object formation handler.\"\"\"\n fullpath = parent.fullpath + h5py.h5i.get_name(id).decode()\n fullpath = fullpath.replace(\"//\", \"/\")\n if fullpath in cls._instances.keys():\n return cls._instances[fullpath]\n else:\n instance = super(Dataset, cls).__new__(cls)\n cls.__init__(instance, parent, id, **kwargs)\n cls._instances[fullpath] = instance\n return instance\n\n def __repr__(self):\n return \"<WrightTools.{0} '{1}' at {2}>\".format(\n self.class_name, self.natural_name, self.fullpath\n )\n\n def __setitem__(self, index, value):\n self._clear_array_attributes_cache()\n return super().__setitem__(index, value)\n\n def _clear_array_attributes_cache(self):\n if \"max\" in self.attrs.keys():\n del self.attrs[\"max\"]\n if \"min\" in self.attrs.keys():\n del self.attrs[\"min\"]\n if \"argmax\" in self.attrs.keys():\n del self.attrs[\"argmax\"]\n if \"argmin\" in self.attrs.keys():\n del self.attrs[\"argmin\"]\n\n @property\n def _leaf(self):\n out = self.natural_name\n if self.size == 1:\n out += f\" = {self.points}\"\n if self.units is not None:\n out += \" ({0})\".format(self.units)\n if self.size != 1:\n out += \" {0}\".format(self.shape)\n return out\n\n @property\n def full(self):\n arr = self[:]\n for i in range(arr.ndim):\n if arr.shape[i] == 1:\n arr = np.repeat(arr, self.parent.shape[i], axis=i)\n return arr\n\n @property\n def fullpath(self):\n \"\"\"Full path: file and internal structure.\"\"\"\n return self.parent.filepath + \"::\" + self.name\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs[\"name\"]\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n self.attrs[\"name\"] = value\n self._natural_name = None\n\n @property\n def parent(self):\n \"\"\"Parent.\"\"\"\n return self._parent\n\n @property\n def points(self):\n \"\"\"Squeezed array.\"\"\"\n return np.squeeze(self[:])\n\n @property\n def units(self):\n \"\"\"Units.\"\"\"\n if \"units\" in self.attrs.keys():\n # This try-except here for compatibility with v1.0.0 of WT5 format\n try:\n self.attrs[\"units\"] = self.attrs[\"units\"].decode()\n except AttributeError:\n pass # already a string, not bytes\n return self.attrs[\"units\"]\n return None\n\n @units.setter\n def units(self, value):\n \"\"\"Set units.\"\"\"\n if value is None:\n if \"units\" in self.attrs.keys():\n self.attrs.pop(\"units\")\n else:\n try:\n self.attrs[\"units\"] = value\n except AttributeError:\n self.attrs[\"units\"] = value\n\n def argmax(self):\n \"\"\"Index of the maximum, ignorning nans.\"\"\"\n if \"argmax\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmax(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmax\"] = idxs[np.nanargmax(vals)]\n return tuple(self.attrs[\"argmax\"])\n\n def argmin(self):\n \"\"\"Index of the minimum, ignoring nans.\"\"\"\n if \"argmin\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmin(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmin\"] = idxs[np.nanargmin(vals)]\n return tuple(self.attrs[\"argmin\"])\n\n def chunkwise(self, func, *args, **kwargs):\n \"\"\"Execute a function for each chunk in the dataset.\n\n Order of excecution is not guaranteed.\n\n Parameters\n ----------\n func : function\n Function to execute. First two arguments must be dataset,\n slices.\n args (optional)\n Additional (unchanging) arguments passed to func.\n kwargs (optional)\n Additional (unchanging) keyword arguments passed to func.\n\n Returns\n -------\n collections OrderedDict\n Dictionary of index: function output. Index is to lowest corner\n of each chunk.\n \"\"\"\n out = collections.OrderedDict()\n for s in self.slices():\n key = tuple(sss.start for sss in s)\n out[key] = func(self, s, *args, **kwargs)\n self._clear_array_attributes_cache()\n return out\n\n def clip(self, min=None, max=None, replace=np.nan):\n \"\"\"Clip values outside of a defined range.\n\n Parameters\n ----------\n min : number (optional)\n New channel minimum. Default is None.\n max : number (optional)\n New channel maximum. Default is None.\n replace : number or 'value' (optional)\n Replace behavior. Default is nan.\n \"\"\"\n if max is None:\n max = self.max()\n if min is None:\n min = self.min()\n\n def f(dataset, s, min, max, replace):\n if hasattr(min, \"shape\"):\n min = min[wt_kit.valid_index(s, min.shape)]\n if hasattr(max, \"shape\"):\n max = max[wt_kit.valid_index(s, max.shape)]\n if hasattr(replace, \"shape\"):\n replace = replace[wt_kit.valid_index(s, replace.shape)]\n arr = dataset[s]\n if replace == \"value\":\n dataset[s] = np.clip(arr, min, max)\n else:\n arr[arr < min] = replace\n arr[arr > max] = replace\n dataset[s] = arr\n\n self.chunkwise(f, min=min, max=max, replace=replace)\n\n def convert(self, destination_units):\n \"\"\"Convert units.\n\n Parameters\n ----------\n destination_units : string (optional)\n Units to convert into.\n \"\"\"\n if not wt_units.is_valid_conversion(self.units, destination_units):\n kind = wt_units.kind(self.units)\n valid = list(wt_units.dicts[kind].keys())\n raise wt_exceptions.UnitsError(valid, destination_units)\n if self.units is None:\n return\n\n def f(dataset, s, destination_units):\n dataset[s] = wt_units.converter(dataset[s], dataset.units, destination_units)\n\n self.chunkwise(f, destination_units=destination_units)\n self.units = destination_units\n\n def log(self, base=np.e, floor=None):\n \"\"\"Take the log of the entire dataset.\n\n Parameters\n ----------\n base : number (optional)\n Base of log. Default is e.\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, base, floor):\n arr = dataset[s]\n arr = np.log(arr)\n if base != np.e:\n arr /= np.log(base)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, base=base, floor=floor)\n\n def log10(self, floor=None):\n \"\"\"Take the log base 10 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log10(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def log2(self, floor=None):\n \"\"\"Take the log base 2 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log2(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def max(self):\n \"\"\"Maximum, ignorning nans.\"\"\"\n if \"max\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmax(dataset[s])\n\n self.attrs[\"max\"] = np.nanmax(list(self.chunkwise(f).values()))\n return self.attrs[\"max\"]\n\n def min(self):\n \"\"\"Minimum, ignoring nans.\"\"\"\n if \"min\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmin(dataset[s])\n\n self.attrs[\"min\"] = np.nanmin(list(self.chunkwise(f).values()))\n return self.attrs[\"min\"]\n\n def slices(self):\n \"\"\"Returns a generator yielding tuple of slice objects.\n\n Order is not guaranteed.\n \"\"\"\n if self.chunks is None:\n yield tuple(slice(None, s) for s in self.shape)\n else:\n ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks))\n for idx in np.ndindex(ceilings): # could also use itertools.product\n out = []\n for i, c, s in zip(idx, self.chunks, self.shape):\n start = i * c\n stop = min(start + c, s + 1)\n out.append(slice(start, stop, 1))\n yield tuple(out)\n\n def symmetric_root(self, root=2):\n def f(dataset, s, root):\n dataset[s] = np.sign(dataset[s]) * (np.abs(dataset[s]) ** (1 / root))\n\n self.chunkwise(f, root=root)\n", "path": "WrightTools/_dataset.py"}]} |
gh_patches_debug_1493 | rasdani/github-patches | git_diff | pulp__pulpcore-4156 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/download/base.py`
Content:
```
1 from gettext import gettext as _
2
3 import asyncio
4 from collections import namedtuple
5 import logging
6 import os
7 import tempfile
8 from urllib.parse import urlsplit
9
10 from pulpcore.app import pulp_hashlib
11 from pulpcore.app.models import Artifact
12 from pulpcore.exceptions import (
13 DigestValidationError,
14 SizeValidationError,
15 TimeoutException,
16 UnsupportedDigestValidationError,
17 )
18
19
20 log = logging.getLogger(__name__)
21
22
23 DownloadResult = namedtuple("DownloadResult", ["url", "artifact_attributes", "path", "headers"])
24 """
25 Args:
26 url (str): The url corresponding with the download.
27 path (str): The absolute path to the saved file
28 artifact_attributes (dict): Contains keys corresponding with
29 :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values
30 along with size information.
31 headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The
32 values are header content. None when not using the HttpDownloader or sublclass.
33 """
34
35
36 class BaseDownloader:
37 """
38 The base class of all downloaders, providing digest calculation, validation, and file handling.
39
40 This is an abstract class and is meant to be subclassed. Subclasses are required to implement
41 the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:
42
43 1. Pass all downloaded data to
44 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.
45
46 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has
47 been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
48
49 Passing all downloaded data the into
50 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to
51 be computed while data is written to disk. The digests computed are required if the download is
52 to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the
53 data later.
54
55 The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default
56 writes to a random file in the current working directory.
57
58 The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all
59 data written to the file-like object is quiesced to disk before the file-like object has
60 `close()` called on it.
61
62 Attributes:
63 url (str): The url to download.
64 expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
65 value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
66 expected_size (int): The number of bytes the download is expected to have.
67 path (str): The full path to the file containing the downloaded data.
68 """
69
70 def __init__(
71 self,
72 url,
73 expected_digests=None,
74 expected_size=None,
75 semaphore=None,
76 *args,
77 **kwargs,
78 ):
79 """
80 Create a BaseDownloader object. This is expected to be called by all subclasses.
81
82 Args:
83 url (str): The url to download.
84 expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
85 value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
86 expected_size (int): The number of bytes the download is expected to have.
87 semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.
88 Useful for limiting the number of outstanding downloaders in various ways.
89 """
90
91 self.url = url
92 self._writer = None
93 self.path = None
94 self.expected_digests = expected_digests
95 self.expected_size = expected_size
96 if semaphore:
97 self.semaphore = semaphore
98 else:
99 self.semaphore = asyncio.Semaphore() # This will always be acquired
100 self._digests = {}
101 self._size = 0
102 if self.expected_digests:
103 if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):
104 raise UnsupportedDigestValidationError(
105 _(
106 "Content at the URL '{}' does not contain at least one trusted hasher which"
107 " is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The"
108 " downloader expected one of the following hashers: {}"
109 ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))
110 )
111
112 def _ensure_writer_has_open_file(self):
113 """
114 Create a temporary file on demand.
115
116 Create a temporary file when it's actually used,
117 allowing plugin writers to instantiate many downloaders in memory.
118 """
119 if not self._writer:
120 filename = urlsplit(self.url).path.split("/")[-1]
121 # linux allows any character except NUL or / in a filename and has a length limit of
122 # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded
123 # paths should be OK
124 is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length
125 # if the filename isn't legal then we just fall back to no suffix (random name)
126 suffix = "-" + filename if is_legal_filename else None
127 # write the file to the current working directory with a random prefix and the
128 # desired suffix. we always want the random prefix as it is possible to download
129 # the same filename from two different URLs, and the files may not be the same.
130 self._writer = tempfile.NamedTemporaryFile(dir=".", suffix=suffix, delete=False)
131 self.path = self._writer.name
132 self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}
133 self._size = 0
134
135 async def handle_data(self, data):
136 """
137 A coroutine that writes data to the file object and compute its digests.
138
139 All subclassed downloaders are expected to pass all data downloaded to this method. Similar
140 to the hashlib docstring, repeated calls are equivalent to a single call with
141 the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to
142 m.handle_data(a+b).
143
144 Args:
145 data (bytes): The data to be handled by the downloader.
146 """
147 self._ensure_writer_has_open_file()
148 self._writer.write(data)
149 self._record_size_and_digests_for_data(data)
150
151 async def finalize(self):
152 """
153 A coroutine to flush downloaded data, close the file writer, and validate the data.
154
155 All subclasses are required to call this method after all data has been passed to
156 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
157
158 Raises:
159 :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``
160 values don't match the digest of the data passed to
161 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
162 :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value
163 doesn't match the size of the data passed to
164 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
165 """
166 self._ensure_writer_has_open_file()
167 self._writer.flush()
168 os.fsync(self._writer.fileno())
169 self._writer.close()
170 self._writer = None
171 self.validate_digests()
172 self.validate_size()
173 log.debug(f"Downloaded file from {self.url}")
174
175 def fetch(self):
176 """
177 Run the download synchronously and return the `DownloadResult`.
178
179 Returns:
180 :class:`~pulpcore.plugin.download.DownloadResult`
181
182 Raises:
183 Exception: Any fatal exception emitted during downloading
184 """
185 done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
186 return done.pop().result()
187
188 def _record_size_and_digests_for_data(self, data):
189 """
190 Record the size and digest for an available chunk of data.
191
192 Args:
193 data (bytes): The data to have its size and digest values recorded.
194 """
195 for algorithm in self._digests.values():
196 algorithm.update(data)
197 self._size += len(data)
198
199 @property
200 def artifact_attributes(self):
201 """
202 A property that returns a dictionary with size and digest information. The keys of this
203 dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.
204 """
205 attributes = {"size": self._size}
206 for algorithm in self._digests:
207 attributes[algorithm] = self._digests[algorithm].hexdigest()
208 return attributes
209
210 def validate_digests(self):
211 """
212 Validate all digests validate if ``expected_digests`` is set
213
214 Raises:
215 :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``
216 values don't match the digest of the data passed to
217 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
218 """
219 if self.expected_digests:
220 for algorithm, expected_digest in self.expected_digests.items():
221 actual_digest = self._digests[algorithm].hexdigest()
222 if actual_digest != expected_digest:
223 raise DigestValidationError(actual_digest, expected_digest, url=self.url)
224
225 def validate_size(self):
226 """
227 Validate the size if ``expected_size`` is set
228
229 Raises:
230 :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value
231 doesn't match the size of the data passed to
232 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
233 """
234 if self.expected_size:
235 actual_size = self._size
236 expected_size = self.expected_size
237 if actual_size != expected_size:
238 raise SizeValidationError(actual_size, expected_size, url=self.url)
239
240 async def run(self, extra_data=None):
241 """
242 Run the downloader with concurrency restriction.
243
244 This method acquires `self.semaphore` before calling the actual download implementation
245 contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`
246 decorator on `_run()`, handles backoff-and-retry logic.
247
248 Args:
249 extra_data (dict): Extra data passed to the downloader.
250
251 Returns:
252 :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.
253
254 """
255 async with self.semaphore:
256 try:
257 return await self._run(extra_data=extra_data)
258 except asyncio.TimeoutError:
259 raise TimeoutException(self.url)
260
261 async def _run(self, extra_data=None):
262 """
263 Run the downloader.
264
265 This is a coroutine that asyncio can schedule to complete downloading. Subclasses are
266 required to implement this method and do two things:
267
268 1. Pass all downloaded data to
269 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
270
271 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has
272 been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
273
274 It is also expected that the subclass implementation return a
275 :class:`~pulpcore.plugin.download.DownloadResult` object. The
276 ``artifact_attributes`` value of the
277 :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the
278 :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.
279
280 This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which
281 handles concurrency restriction. Thus, by the time this method is called, the download can
282 occur without violating the concurrency restriction.
283
284 Args:
285 extra_data (dict): Extra data passed to the downloader.
286
287 Returns:
288 :class:`~pulpcore.plugin.download.DownloadResult`
289
290 Raises:
291 Validation errors could be emitted when subclassed implementations call
292 :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.
293 """
294 raise NotImplementedError("Subclasses must define a _run() method that returns a coroutine")
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| {"golden_diff": "diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py\n--- a/pulpcore/download/base.py\n+++ b/pulpcore/download/base.py\n@@ -182,8 +182,8 @@\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))\n- return done.pop().result()\n+ result = asyncio.get_event_loop().run_until_complete(self.run())\n+ return result\n \n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n", "issue": "BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11\nPython 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .\r\n\r\nPython provides the solution in the error message: \"Passing coroutines is forbidden, use tasks explicitly.\"\r\n\r\nI believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`\n", "before_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))\n return done.pop().result()\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n", "path": "pulpcore/download/base.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n result = asyncio.get_event_loop().run_until_complete(self.run())\n return result\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n", "path": "pulpcore/download/base.py"}]} |
gh_patches_debug_1494 | rasdani/github-patches | git_diff | ludwig-ai__ludwig-1702 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Shape mismatch when introducing multiple levels of dependencies
**Describe the bug**
When introducing multiple levels of dependencies, the shape of the _concatenated hidden states_ does not match the _input size for the dense layer of the output feature_.
In my case, the text output feature `qty_frac` depends on text output feature `summary`, and numerical output feature `qty` in turn depends on `qty_frac`.
I get the following error when running `ludwig train`:
```python-traceback
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**To Reproduce**
Minimal, reproducible example using bash and docker as only dependencies:
```bash
#!/usr/bin/env bash
FEATURE_LIST=$(
docker run -i mikefarah/yq -o json -I 0 e '.' - <<EOF
- name: document
type: text
- name: summary
type: text
- name: qty_frac
type: text
- name: qty
type: numerical
EOF
)
mkdir /tmp/ludwig-debug
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
synthesize_dataset \
--features $FEATURE_LIST \
--dataset_size 10 \
--output_path /workdir/synthetic_data.csv
cat <<EOF >/tmp/ludwig-debug/config.yml
input_features:
- name: document
type: text
level: word
output_features:
- name: summary
type: text
level: word
decoder: generator
- name: qty_frac
type: text
level: word
decoder: generator
dependencies:
- summary
- name: qty
type: numerical
dependencies:
- qty_frac
EOF
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
train \
--dataset /workdir/synthetic_data.csv \
--config_file /workdir/config.yml \
--output_directory /workdir/results
```
**Expected behavior**
Training starts without error.
**Screenshots**
Excerpt from the traceback:
```python-traceback
File "/usr/local/lib/python3.7/site-packages/ludwig/features/numerical_feature.py", line 269, in logits
return self.decoder_obj(hidden)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/decoders/generic_decoders.py", line 58, in forward
return self.dense(inputs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/utils/torch_utils.py", line 212, in forward
output = torch.squeeze(self.dense(input), dim=-1)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 103, in forward
return F.linear(input, self.weight, self.bias)
File "/usr/local/lib/python3.7/site-packages/torch/nn/functional.py", line 1848, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**Environment:**
See reproducible example, run in environment with:
- bash: `GNU bash, version 5.0.17(1)-release (x86_64-pc-linux-gnu)`
- docker: `Docker version 20.10.11+azure-3, build dea9396e184290f638ea873c76db7c80efd5a1d2`
The `ludwigai/ludwig:nightly` Docker image was built from main at 89d18365c41c4ded68edd2095349ce4a6caf5d18.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ludwig/features/base_feature.py`
Content:
```
1 # Copyright (c) 2019 Uber Technologies, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 import copy
16 import logging
17 from abc import ABC, abstractmethod, abstractstaticmethod
18 from typing import Any, Dict, Optional
19
20 import torch
21 from torch import Tensor
22
23 from ludwig.constants import COLUMN, HIDDEN, LENGTHS, LOGITS, LOSS, NAME, PREDICTIONS, PROBABILITIES, PROC_COLUMN, TYPE
24 from ludwig.decoders.registry import get_decoder_cls
25 from ludwig.encoders.registry import get_encoder_cls
26 from ludwig.features.feature_utils import compute_feature_hash, get_input_size_with_dependencies
27 from ludwig.modules.fully_connected_modules import FCStack
28 from ludwig.modules.loss_modules import get_loss_cls
29 from ludwig.modules.metric_registry import get_metric_classes, get_metric_cls
30 from ludwig.modules.reduction_modules import SequenceReducer
31 from ludwig.utils import output_feature_utils
32 from ludwig.utils.metric_utils import get_scalar_from_ludwig_metric
33 from ludwig.utils.misc_utils import merge_dict
34 from ludwig.utils.torch_utils import LudwigModule
35 from ludwig.utils.types import DataFrame
36
37 logger = logging.getLogger(__name__)
38
39
40 class BaseFeatureMixin(ABC):
41 """Parent class for feature mixins.
42
43 Feature mixins support preprocessing functionality shared across input and output features.
44 """
45
46 @abstractstaticmethod
47 def type() -> str:
48 """Returns the type of feature this mixin supports."""
49 raise NotImplementedError
50
51 @abstractstaticmethod
52 def preprocessing_defaults() -> Dict[str, Any]:
53 """Returns dict of preprocessing defaults."""
54 raise NotImplementedError
55
56 @abstractstaticmethod
57 def preprocessing_schema() -> Dict[str, Any]:
58 """Returns schema for the preprocessing configuration."""
59 raise NotImplementedError
60
61 @abstractstaticmethod
62 def cast_column(column: DataFrame, backend) -> DataFrame:
63 """Returns a copy of the dataset column for the given feature, potentially after a type cast.
64
65 Args:
66 column: Pandas column of values.
67 backend: (Union[Backend, str]) Backend to use for feature data processing.
68 """
69 raise NotImplementedError
70
71 @abstractstaticmethod
72 def get_feature_meta(column: DataFrame, preprocessing_parameters: Dict[str, Any], backend) -> Dict[str, Any]:
73 """Returns a dictionary of feature metadata.
74
75 Args:
76 column: Pandas column of values.
77 preprocessing_parameters: Preprocessing configuration for this feature.
78 backend: (Union[Backend, str]) Backend to use for feature data processing.
79 """
80 raise NotImplementedError
81
82 @abstractstaticmethod
83 def add_feature_data(
84 feature_config: Dict[str, Any],
85 input_df: DataFrame,
86 proc_df: Dict[str, DataFrame],
87 metadata: Dict[str, Any],
88 preprocessing_parameters: Dict[str, Any],
89 backend, # Union[Backend, str]
90 skip_save_processed_input: bool,
91 ) -> None:
92 """Runs preprocessing on the input_df and stores results in the proc_df and metadata dictionaries.
93
94 Args:
95 feature_config: Feature configuration.
96 input_df: Pandas column of values.
97 proc_df: Dict of processed columns of data. Feature data is added to this.
98 metadata: Metadata returned by get_feature_meta(). Additional information may be added to this.
99 preprocessing_parameters: Preprocessing configuration for this feature.
100 backend: (Union[Backend, str]) Backend to use for feature data processing.
101 skip_save_processed_input: Whether to skip saving the processed input.
102 """
103 raise NotImplementedError
104
105
106 class PredictModule(torch.nn.Module):
107 """Base class for all modules that convert model outputs to predictions.
108
109 Explicit member variables needed here for scripting, as Torchscript will not be able to recognize global variables
110 during scripting.
111 """
112
113 def __init__(self):
114 super().__init__()
115 self.predictions_key = PREDICTIONS
116 self.probabilities_key = PROBABILITIES
117 self.logits_key = LOGITS
118
119
120 class BaseFeature:
121 """Base class for all features.
122
123 Note that this class is not-cooperative (does not forward kwargs), so when constructing feature class hierarchies,
124 there should be only one parent class that derives from base feature. Other functionality should be put into mixin
125 classes to avoid the diamond pattern.
126 """
127
128 def __init__(self, feature, *args, **kwargs):
129 super().__init__()
130
131 if NAME not in feature:
132 raise ValueError("Missing feature name")
133 self.feature_name = feature[NAME]
134
135 if COLUMN not in feature:
136 feature[COLUMN] = self.feature_name
137 self.column = feature[COLUMN]
138
139 if PROC_COLUMN not in feature:
140 feature[PROC_COLUMN] = compute_feature_hash(feature)
141 self.proc_column = feature[PROC_COLUMN]
142
143 def overwrite_defaults(self, feature):
144 attributes = set(self.__dict__.keys())
145 attributes.update(self.__class__.__dict__.keys())
146
147 for k in feature.keys():
148 if k in attributes:
149 if isinstance(feature[k], dict) and hasattr(self, k) and isinstance(getattr(self, k), dict):
150 setattr(self, k, merge_dict(getattr(self, k), feature[k]))
151 else:
152 setattr(self, k, feature[k])
153
154
155 class InputFeature(BaseFeature, LudwigModule, ABC):
156 """Parent class for all input features."""
157
158 def __init__(self, *args, **kwargs):
159 super().__init__(*args, **kwargs)
160
161 def create_sample_input(self):
162 # Used by get_model_inputs(), which is used for tracing-based torchscript generation.
163 return torch.rand([2, *self.input_shape]).to(self.input_dtype)
164
165 @staticmethod
166 @abstractmethod
167 def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):
168 pass
169
170 @staticmethod
171 @abstractmethod
172 def populate_defaults(input_feature):
173 pass
174
175 def initialize_encoder(self, encoder_parameters):
176 return get_encoder_cls(self.type(), self.encoder)(**encoder_parameters)
177
178 @staticmethod
179 def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:
180 raise NotImplementedError("Torchscript tracing not supported for feature")
181
182
183 class OutputFeature(BaseFeature, LudwigModule, ABC):
184 """Parent class for all output features."""
185
186 def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, "OutputFeature"], *args, **kwargs):
187 """Defines defaults, overwrites them based on the feature dictionary, and sets up dependencies.
188
189 Any output feature can depend on one or more other output features. The `other_output_features` input dictionary
190 should contain entries for any dependent output features, which is accomplished by constructing output features
191 in topographically sorted order. Attributes of any dependent output features are used to properly initialize
192 this feature's sizes.
193 """
194 super().__init__(*args, feature=feature, **kwargs)
195
196 self.reduce_input = None
197 self.reduce_dependencies = None
198
199 # List of feature names that this output feature is depdendent on.
200 self.dependencies = []
201
202 self.fc_layers = None
203 self.num_fc_layers = 0
204 self.output_size = 256
205 self.use_bias = True
206 self.weights_initializer = "xavier_uniform"
207 self.bias_initializer = "zeros"
208 self.norm = None
209 self.norm_params = None
210 self.activation = "relu"
211 self.dropout = 0
212 self.input_size = None
213
214 self.overwrite_defaults(feature)
215
216 logger.debug(" output feature fully connected layers")
217 logger.debug(" FCStack")
218
219 self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)
220
221 self.fc_stack = FCStack(
222 first_layer_input_size=self.input_size,
223 layers=self.fc_layers,
224 num_layers=self.num_fc_layers,
225 default_output_size=self.output_size,
226 default_use_bias=self.use_bias,
227 default_weights_initializer=self.weights_initializer,
228 default_bias_initializer=self.bias_initializer,
229 default_norm=self.norm,
230 default_norm_params=self.norm_params,
231 default_activation=self.activation,
232 default_dropout=self.dropout,
233 )
234 self._prediction_module = self.create_predict_module()
235
236 # set up two sequence reducers, one for inputs and other for dependencies
237 self.reduce_sequence_input = SequenceReducer(reduce_mode=self.reduce_input)
238 if self.dependencies:
239 self.dependency_reducers = torch.nn.ModuleDict()
240 # todo: re-evaluate need for separate handling of `attention` reducer
241 # currently this code does not support `attention`
242 for dependency in self.dependencies:
243 self.dependency_reducers[dependency] = SequenceReducer(reduce_mode=self.reduce_dependencies)
244
245 def create_sample_output(self):
246 return torch.rand(self.output_shape, dtype=self.get_output_dtype())
247
248 @abstractmethod
249 def get_prediction_set(self):
250 """Returns the set of prediction keys returned by this feature."""
251 raise NotImplementedError("OutputFeature is missing implementation for get_prediction_set.")
252
253 @classmethod
254 @abstractmethod
255 def get_output_dtype(cls):
256 """Returns the Tensor data type feature outputs."""
257 pass
258
259 @property
260 @abstractmethod
261 def metric_functions(self) -> Dict:
262 pass
263
264 def initialize_decoder(self, decoder_parameters):
265 decoder_parameters_copy = copy.copy(decoder_parameters)
266 # Input to the decoder is the output feature's FC hidden layer.
267 decoder_parameters_copy["input_size"] = self.fc_stack.output_shape[-1]
268 if "decoder" in decoder_parameters:
269 decoder = decoder_parameters["decoder"]
270 else:
271 decoder = self.decoder
272 return get_decoder_cls(self.type(), decoder)(**decoder_parameters_copy)
273
274 def train_loss(self, targets: Tensor, predictions: Dict[str, Tensor], feature_name):
275 loss_class = type(self.train_loss_function)
276 prediction_key = output_feature_utils.get_feature_concat_name(feature_name, loss_class.get_loss_inputs())
277 return self.train_loss_function(predictions[prediction_key], targets)
278
279 def eval_loss(self, targets: Tensor, predictions: Dict[str, Tensor]):
280 loss_class = type(self.train_loss_function)
281 prediction_key = loss_class.get_loss_inputs()
282 return self.eval_loss_function(predictions[prediction_key].detach(), targets)
283
284 def _setup_loss(self):
285 loss_kwargs = self.loss_kwargs()
286 self.train_loss_function = get_loss_cls(self.type(), self.loss[TYPE])(**loss_kwargs)
287 self.eval_loss_function = get_metric_cls(self.type(), self.loss[TYPE])(**loss_kwargs)
288
289 def _setup_metrics(self):
290 # needed to shadow class variable
291 self.metric_functions = {
292 LOSS: self.eval_loss_function,
293 **{
294 name: cls(**self.loss_kwargs(), **self.metric_kwargs())
295 for name, cls in get_metric_classes(self.type()).items()
296 if cls.can_report(self)
297 },
298 }
299
300 @abstractmethod
301 def create_predict_module(self) -> PredictModule:
302 """Creates and returns a `nn.Module` that converts raw model outputs (logits) to predictions.
303
304 Thos module is needed when generating the Torchscript model using scripting.
305 """
306 raise NotImplementedError()
307
308 @property
309 def prediction_module(self) -> PredictModule:
310 """Returns the PredictModule used to convert model outputs to predictions."""
311 return self._prediction_module
312
313 def predictions(self, all_decoder_outputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:
314 """Computes actual predictions from the outputs of feature decoders.
315
316 TODO(Justin): Consider refactoring this to accept feature-specific decoder outputs.
317
318 Args:
319 all_decoder_outputs: A dictionary of {feature name}::{tensor_name} -> output tensor.
320 Returns:
321 Dictionary of tensors with predictions as well as any additional tensors that may be
322 necessary for computing evaluation metrics.
323 """
324 return self.prediction_module(all_decoder_outputs, feature_name)
325
326 @abstractmethod
327 def logits(self, combiner_outputs: Dict[str, torch.Tensor], target=None, **kwargs) -> Dict[str, torch.Tensor]:
328 """Unpacks and feeds combiner_outputs to the decoder. Invoked as part of the output feature's forward pass.
329
330 If target is not None, then we are in training.
331
332 Args:
333 combiner_outputs: Dictionary of tensors from the combiner's forward pass.
334 Returns:
335 Dictionary of decoder's output tensors (non-normalized), as well as any additional
336 tensors that may be necessary for computing predictions or evaluation metrics.
337 """
338 raise NotImplementedError("OutputFeature is missing logits() implementation.")
339
340 def loss_kwargs(self) -> Dict[str, Any]:
341 """Returns arguments that are used to instantiate an instance of the loss class."""
342 return {}
343
344 def metric_kwargs(self) -> Dict[str, Any]:
345 """Returns arguments that are used to instantiate an instance of each metric class."""
346 return {}
347
348 def update_metrics(self, targets: Tensor, predictions: Dict[str, Tensor]) -> None:
349 """Updates metrics with the given targets and predictions.
350
351 Args:
352 targets: Tensor with target values for this output feature.
353 predictions: Dict of tensors returned by predictions().
354 """
355 for _, metric_fn in self.metric_functions.items():
356 metric_class = type(metric_fn)
357 prediction_key = metric_class.get_inputs()
358 # TODO(shreya): Metrics should ideally just move to the correct device
359 # and not require the user to do this. This is a temporary fix. See
360 # if this can be removed before merging the PR.
361 metric_fn = metric_fn.to(predictions[prediction_key].device)
362 metric_fn.update(predictions[prediction_key].detach(), targets)
363
364 def get_metrics(self):
365 metric_vals = {}
366 for metric_name, metric_fn in self.metric_functions.items():
367 try:
368 metric_vals[metric_name] = get_scalar_from_ludwig_metric(metric_fn)
369 except Exception as e:
370 logger.error(f"Caught exception computing metric: {metric_name}. Exception: {e}")
371 return metric_vals
372
373 def reset_metrics(self):
374 for _, metric_fn in self.metric_functions.items():
375 if metric_fn is not None:
376 metric_fn.reset()
377
378 def forward(
379 self,
380 combiner_outputs: Dict[str, torch.Tensor],
381 other_output_feature_outputs: Dict[str, torch.Tensor],
382 mask: Optional[torch.Tensor] = None,
383 target: Optional[torch.Tensor] = None,
384 ) -> Dict[str, torch.Tensor]:
385 """Forward pass that takes in output from the combiner, and passes it through to the decoder.
386
387 Args:
388 combiner_outputs: Dict of outputs from the combiner.
389 other_output_feature_outputs: Dict of tensors from other output features. Used for resolving dependencies.
390 mask: (Unused). Tensor for masking.
391 target: Tensor with targets. During training, targets != None. During prediction, targets = None.
392
393 Returns:
394 Dict of output tensors, with at least 'last_hidden' and 'logits' as keys, as well as any additional tensor
395 results from the decoder.
396 """
397 # extract the combined hidden layer
398 combiner_hidden = combiner_outputs["combiner_output"]
399 hidden = self.prepare_decoder_inputs(combiner_hidden, other_output_feature_outputs, mask=mask)
400
401 # ================ Predictions ================
402 logits_input = {HIDDEN: hidden}
403 # pass supplemental data from encoders to decoder
404 if "encoder_output_state" in combiner_outputs:
405 logits_input["encoder_output_state"] = combiner_outputs["encoder_output_state"]
406 if LENGTHS in combiner_outputs:
407 logits_input[LENGTHS] = combiner_outputs[LENGTHS]
408
409 logits = self.logits(logits_input, target=target)
410
411 # For binary and numerical features, self.logits() is a tensor.
412 # There are two special cases where self.logits() is a dict:
413 # categorical
414 # keys: logits, projection_input
415 # sequence
416 # keys: logits
417 # TODO(Justin): Clean this up.
418 if isinstance(logits, Tensor):
419 logits = {"logits": logits}
420
421 # For multi-class features, we must choose a consistent tuple subset.
422 return {
423 # last_hidden used for dependencies processing
424 "last_hidden": hidden,
425 **logits,
426 }
427
428 def overall_statistics_metadata(self):
429 """Additional metadata used to extend `training_set_metadata`.
430
431 Used when calculating the overall statistics.
432 """
433 return {}
434
435 @property
436 @abstractmethod
437 def default_validation_metric(self):
438 pass
439
440 @abstractmethod
441 def postprocess_predictions(
442 self,
443 result: Dict[str, Tensor],
444 metadata: Dict[str, Any],
445 output_directory: str,
446 backend,
447 ):
448 raise NotImplementedError
449
450 @staticmethod
451 def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:
452 raise NotImplementedError("Torchscript tracing not supported for feature")
453
454 @staticmethod
455 @abstractmethod
456 def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):
457 pass
458
459 @staticmethod
460 @abstractmethod
461 def calculate_overall_stats(predictions, targets, train_set_metadata):
462 pass
463
464 @staticmethod
465 @abstractmethod
466 def populate_defaults(input_feature):
467 pass
468
469 def output_specific_fully_connected(self, inputs, mask=None):
470 feature_hidden = inputs
471 original_feature_hidden = inputs
472
473 # flatten inputs
474 if len(original_feature_hidden.shape) > 2:
475 feature_hidden = torch.reshape(feature_hidden, (-1, list(feature_hidden.shape)[-1]))
476
477 # pass it through fc_stack
478 feature_hidden = self.fc_stack(feature_hidden, mask=mask)
479 feature_hidden_size = feature_hidden.shape[-1]
480
481 # reshape back to original first and second dimension
482 if len(original_feature_hidden.shape) > 2:
483 sequence_length = original_feature_hidden.shape[1]
484 feature_hidden = torch.reshape(feature_hidden, (-1, sequence_length, feature_hidden_size))
485
486 return feature_hidden
487
488 def prepare_decoder_inputs(
489 self, combiner_hidden: Tensor, other_output_features: Dict[str, Tensor], mask=None
490 ) -> Tensor:
491 """Takes the combiner output and the outputs of other outputs features computed so far and performs:
492
493 - reduction of combiner outputs (if needed)
494 - concatenating the outputs of dependent features (if needed)
495 - output_specific fully connected layers (if needed)
496
497 Args:
498 combiner_hidden: hidden state of the combiner
499 other_output_features: output tensors from other output features
500 """
501 # ================ Reduce Inputs ================
502 feature_hidden = combiner_hidden
503 if self.reduce_input is not None and len(combiner_hidden.shape) > 2:
504 feature_hidden = self.reduce_sequence_input(combiner_hidden)
505
506 # ================ Concat Dependencies ================
507 if self.dependencies:
508 feature_hidden = output_feature_utils.concat_dependencies(
509 self.column, self.dependencies, self.dependency_reducers, feature_hidden, other_output_features
510 )
511
512 # ================ Output-wise Fully Connected ================
513 feature_hidden = self.output_specific_fully_connected(feature_hidden, mask=mask)
514
515 return feature_hidden
516
517 def flatten(self, df: DataFrame) -> DataFrame:
518 """Converts the output of batch_predict to a 1D array."""
519 return df
520
521 def unflatten(self, df: DataFrame) -> DataFrame:
522 """Reshapes a flattened 1D array into its original shape."""
523 return df
524
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ludwig/features/base_feature.py b/ludwig/features/base_feature.py
--- a/ludwig/features/base_feature.py
+++ b/ludwig/features/base_feature.py
@@ -217,6 +217,7 @@
logger.debug(" FCStack")
self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)
+ feature["input_size"] = self.input_size # needed for future overrides
self.fc_stack = FCStack(
first_layer_input_size=self.input_size,
| {"golden_diff": "diff --git a/ludwig/features/base_feature.py b/ludwig/features/base_feature.py\n--- a/ludwig/features/base_feature.py\n+++ b/ludwig/features/base_feature.py\n@@ -217,6 +217,7 @@\n logger.debug(\" FCStack\")\n \n self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)\n+ feature[\"input_size\"] = self.input_size # needed for future overrides\n \n self.fc_stack = FCStack(\n first_layer_input_size=self.input_size,\n", "issue": "Shape mismatch when introducing multiple levels of dependencies\n**Describe the bug**\r\n\r\nWhen introducing multiple levels of dependencies, the shape of the _concatenated hidden states_ does not match the _input size for the dense layer of the output feature_.\r\n\r\nIn my case, the text output feature `qty_frac` depends on text output feature `summary`, and numerical output feature `qty` in turn depends on `qty_frac`.\r\n\r\nI get the following error when running `ludwig train`:\r\n```python-traceback\r\nRuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)\r\n```\r\n\r\n\r\n\r\n**To Reproduce**\r\n\r\nMinimal, reproducible example using bash and docker as only dependencies:\r\n```bash\r\n#!/usr/bin/env bash\r\nFEATURE_LIST=$(\r\n docker run -i mikefarah/yq -o json -I 0 e '.' - <<EOF\r\n- name: document\r\n type: text\r\n- name: summary\r\n type: text\r\n- name: qty_frac\r\n type: text\r\n- name: qty\r\n type: numerical\r\nEOF\r\n)\r\n\r\nmkdir /tmp/ludwig-debug\r\ndocker run \\\r\n -it \\\r\n -v /tmp/ludwig-debug/:/workdir \\\r\n ludwigai/ludwig:nightly \\\r\n synthesize_dataset \\\r\n --features $FEATURE_LIST \\\r\n --dataset_size 10 \\\r\n --output_path /workdir/synthetic_data.csv\r\n\r\ncat <<EOF >/tmp/ludwig-debug/config.yml\r\ninput_features:\r\n - name: document\r\n type: text\r\n level: word\r\noutput_features:\r\n - name: summary\r\n type: text\r\n level: word\r\n decoder: generator\r\n - name: qty_frac\r\n type: text\r\n level: word\r\n decoder: generator\r\n dependencies:\r\n - summary\r\n - name: qty\r\n type: numerical\r\n dependencies:\r\n - qty_frac\r\nEOF\r\n\r\ndocker run \\\r\n -it \\\r\n -v /tmp/ludwig-debug/:/workdir \\\r\n ludwigai/ludwig:nightly \\\r\n train \\\r\n --dataset /workdir/synthetic_data.csv \\\r\n --config_file /workdir/config.yml \\\r\n --output_directory /workdir/results\r\n```\r\n\r\n**Expected behavior**\r\n\r\nTraining starts without error.\r\n\r\n**Screenshots**\r\n\r\nExcerpt from the traceback:\r\n```python-traceback\r\n File \"/usr/local/lib/python3.7/site-packages/ludwig/features/numerical_feature.py\", line 269, in logits\r\n return self.decoder_obj(hidden)\r\n File \"/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/ludwig/decoders/generic_decoders.py\", line 58, in forward\r\n return self.dense(inputs)\r\n File \"/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/ludwig/utils/torch_utils.py\", line 212, in forward\r\n output = torch.squeeze(self.dense(input), dim=-1)\r\n File \"/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py\", line 103, in forward\r\n return F.linear(input, self.weight, self.bias)\r\n File \"/usr/local/lib/python3.7/site-packages/torch/nn/functional.py\", line 1848, in linear\r\n return torch._C._nn.linear(input, weight, bias)\r\nRuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)\r\n```\r\n\r\n**Environment:**\r\n\r\nSee reproducible example, run in environment with:\r\n- bash: `GNU bash, version 5.0.17(1)-release (x86_64-pc-linux-gnu)`\r\n- docker: `Docker version 20.10.11+azure-3, build dea9396e184290f638ea873c76db7c80efd5a1d2`\r\n\r\nThe `ludwigai/ludwig:nightly` Docker image was built from main at 89d18365c41c4ded68edd2095349ce4a6caf5d18.\r\n\n", "before_files": [{"content": "# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport copy\nimport logging\nfrom abc import ABC, abstractmethod, abstractstaticmethod\nfrom typing import Any, Dict, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom ludwig.constants import COLUMN, HIDDEN, LENGTHS, LOGITS, LOSS, NAME, PREDICTIONS, PROBABILITIES, PROC_COLUMN, TYPE\nfrom ludwig.decoders.registry import get_decoder_cls\nfrom ludwig.encoders.registry import get_encoder_cls\nfrom ludwig.features.feature_utils import compute_feature_hash, get_input_size_with_dependencies\nfrom ludwig.modules.fully_connected_modules import FCStack\nfrom ludwig.modules.loss_modules import get_loss_cls\nfrom ludwig.modules.metric_registry import get_metric_classes, get_metric_cls\nfrom ludwig.modules.reduction_modules import SequenceReducer\nfrom ludwig.utils import output_feature_utils\nfrom ludwig.utils.metric_utils import get_scalar_from_ludwig_metric\nfrom ludwig.utils.misc_utils import merge_dict\nfrom ludwig.utils.torch_utils import LudwigModule\nfrom ludwig.utils.types import DataFrame\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseFeatureMixin(ABC):\n \"\"\"Parent class for feature mixins.\n\n Feature mixins support preprocessing functionality shared across input and output features.\n \"\"\"\n\n @abstractstaticmethod\n def type() -> str:\n \"\"\"Returns the type of feature this mixin supports.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_defaults() -> Dict[str, Any]:\n \"\"\"Returns dict of preprocessing defaults.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_schema() -> Dict[str, Any]:\n \"\"\"Returns schema for the preprocessing configuration.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def cast_column(column: DataFrame, backend) -> DataFrame:\n \"\"\"Returns a copy of the dataset column for the given feature, potentially after a type cast.\n\n Args:\n column: Pandas column of values.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def get_feature_meta(column: DataFrame, preprocessing_parameters: Dict[str, Any], backend) -> Dict[str, Any]:\n \"\"\"Returns a dictionary of feature metadata.\n\n Args:\n column: Pandas column of values.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def add_feature_data(\n feature_config: Dict[str, Any],\n input_df: DataFrame,\n proc_df: Dict[str, DataFrame],\n metadata: Dict[str, Any],\n preprocessing_parameters: Dict[str, Any],\n backend, # Union[Backend, str]\n skip_save_processed_input: bool,\n ) -> None:\n \"\"\"Runs preprocessing on the input_df and stores results in the proc_df and metadata dictionaries.\n\n Args:\n feature_config: Feature configuration.\n input_df: Pandas column of values.\n proc_df: Dict of processed columns of data. Feature data is added to this.\n metadata: Metadata returned by get_feature_meta(). Additional information may be added to this.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n skip_save_processed_input: Whether to skip saving the processed input.\n \"\"\"\n raise NotImplementedError\n\n\nclass PredictModule(torch.nn.Module):\n \"\"\"Base class for all modules that convert model outputs to predictions.\n\n Explicit member variables needed here for scripting, as Torchscript will not be able to recognize global variables\n during scripting.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.predictions_key = PREDICTIONS\n self.probabilities_key = PROBABILITIES\n self.logits_key = LOGITS\n\n\nclass BaseFeature:\n \"\"\"Base class for all features.\n\n Note that this class is not-cooperative (does not forward kwargs), so when constructing feature class hierarchies,\n there should be only one parent class that derives from base feature. Other functionality should be put into mixin\n classes to avoid the diamond pattern.\n \"\"\"\n\n def __init__(self, feature, *args, **kwargs):\n super().__init__()\n\n if NAME not in feature:\n raise ValueError(\"Missing feature name\")\n self.feature_name = feature[NAME]\n\n if COLUMN not in feature:\n feature[COLUMN] = self.feature_name\n self.column = feature[COLUMN]\n\n if PROC_COLUMN not in feature:\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n self.proc_column = feature[PROC_COLUMN]\n\n def overwrite_defaults(self, feature):\n attributes = set(self.__dict__.keys())\n attributes.update(self.__class__.__dict__.keys())\n\n for k in feature.keys():\n if k in attributes:\n if isinstance(feature[k], dict) and hasattr(self, k) and isinstance(getattr(self, k), dict):\n setattr(self, k, merge_dict(getattr(self, k), feature[k]))\n else:\n setattr(self, k, feature[k])\n\n\nclass InputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all input features.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def create_sample_input(self):\n # Used by get_model_inputs(), which is used for tracing-based torchscript generation.\n return torch.rand([2, *self.input_shape]).to(self.input_dtype)\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def initialize_encoder(self, encoder_parameters):\n return get_encoder_cls(self.type(), self.encoder)(**encoder_parameters)\n\n @staticmethod\n def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n\nclass OutputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all output features.\"\"\"\n\n def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, \"OutputFeature\"], *args, **kwargs):\n \"\"\"Defines defaults, overwrites them based on the feature dictionary, and sets up dependencies.\n\n Any output feature can depend on one or more other output features. The `other_output_features` input dictionary\n should contain entries for any dependent output features, which is accomplished by constructing output features\n in topographically sorted order. Attributes of any dependent output features are used to properly initialize\n this feature's sizes.\n \"\"\"\n super().__init__(*args, feature=feature, **kwargs)\n\n self.reduce_input = None\n self.reduce_dependencies = None\n\n # List of feature names that this output feature is depdendent on.\n self.dependencies = []\n\n self.fc_layers = None\n self.num_fc_layers = 0\n self.output_size = 256\n self.use_bias = True\n self.weights_initializer = \"xavier_uniform\"\n self.bias_initializer = \"zeros\"\n self.norm = None\n self.norm_params = None\n self.activation = \"relu\"\n self.dropout = 0\n self.input_size = None\n\n self.overwrite_defaults(feature)\n\n logger.debug(\" output feature fully connected layers\")\n logger.debug(\" FCStack\")\n\n self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)\n\n self.fc_stack = FCStack(\n first_layer_input_size=self.input_size,\n layers=self.fc_layers,\n num_layers=self.num_fc_layers,\n default_output_size=self.output_size,\n default_use_bias=self.use_bias,\n default_weights_initializer=self.weights_initializer,\n default_bias_initializer=self.bias_initializer,\n default_norm=self.norm,\n default_norm_params=self.norm_params,\n default_activation=self.activation,\n default_dropout=self.dropout,\n )\n self._prediction_module = self.create_predict_module()\n\n # set up two sequence reducers, one for inputs and other for dependencies\n self.reduce_sequence_input = SequenceReducer(reduce_mode=self.reduce_input)\n if self.dependencies:\n self.dependency_reducers = torch.nn.ModuleDict()\n # todo: re-evaluate need for separate handling of `attention` reducer\n # currently this code does not support `attention`\n for dependency in self.dependencies:\n self.dependency_reducers[dependency] = SequenceReducer(reduce_mode=self.reduce_dependencies)\n\n def create_sample_output(self):\n return torch.rand(self.output_shape, dtype=self.get_output_dtype())\n\n @abstractmethod\n def get_prediction_set(self):\n \"\"\"Returns the set of prediction keys returned by this feature.\"\"\"\n raise NotImplementedError(\"OutputFeature is missing implementation for get_prediction_set.\")\n\n @classmethod\n @abstractmethod\n def get_output_dtype(cls):\n \"\"\"Returns the Tensor data type feature outputs.\"\"\"\n pass\n\n @property\n @abstractmethod\n def metric_functions(self) -> Dict:\n pass\n\n def initialize_decoder(self, decoder_parameters):\n decoder_parameters_copy = copy.copy(decoder_parameters)\n # Input to the decoder is the output feature's FC hidden layer.\n decoder_parameters_copy[\"input_size\"] = self.fc_stack.output_shape[-1]\n if \"decoder\" in decoder_parameters:\n decoder = decoder_parameters[\"decoder\"]\n else:\n decoder = self.decoder\n return get_decoder_cls(self.type(), decoder)(**decoder_parameters_copy)\n\n def train_loss(self, targets: Tensor, predictions: Dict[str, Tensor], feature_name):\n loss_class = type(self.train_loss_function)\n prediction_key = output_feature_utils.get_feature_concat_name(feature_name, loss_class.get_loss_inputs())\n return self.train_loss_function(predictions[prediction_key], targets)\n\n def eval_loss(self, targets: Tensor, predictions: Dict[str, Tensor]):\n loss_class = type(self.train_loss_function)\n prediction_key = loss_class.get_loss_inputs()\n return self.eval_loss_function(predictions[prediction_key].detach(), targets)\n\n def _setup_loss(self):\n loss_kwargs = self.loss_kwargs()\n self.train_loss_function = get_loss_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n self.eval_loss_function = get_metric_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n\n def _setup_metrics(self):\n # needed to shadow class variable\n self.metric_functions = {\n LOSS: self.eval_loss_function,\n **{\n name: cls(**self.loss_kwargs(), **self.metric_kwargs())\n for name, cls in get_metric_classes(self.type()).items()\n if cls.can_report(self)\n },\n }\n\n @abstractmethod\n def create_predict_module(self) -> PredictModule:\n \"\"\"Creates and returns a `nn.Module` that converts raw model outputs (logits) to predictions.\n\n Thos module is needed when generating the Torchscript model using scripting.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def prediction_module(self) -> PredictModule:\n \"\"\"Returns the PredictModule used to convert model outputs to predictions.\"\"\"\n return self._prediction_module\n\n def predictions(self, all_decoder_outputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:\n \"\"\"Computes actual predictions from the outputs of feature decoders.\n\n TODO(Justin): Consider refactoring this to accept feature-specific decoder outputs.\n\n Args:\n all_decoder_outputs: A dictionary of {feature name}::{tensor_name} -> output tensor.\n Returns:\n Dictionary of tensors with predictions as well as any additional tensors that may be\n necessary for computing evaluation metrics.\n \"\"\"\n return self.prediction_module(all_decoder_outputs, feature_name)\n\n @abstractmethod\n def logits(self, combiner_outputs: Dict[str, torch.Tensor], target=None, **kwargs) -> Dict[str, torch.Tensor]:\n \"\"\"Unpacks and feeds combiner_outputs to the decoder. Invoked as part of the output feature's forward pass.\n\n If target is not None, then we are in training.\n\n Args:\n combiner_outputs: Dictionary of tensors from the combiner's forward pass.\n Returns:\n Dictionary of decoder's output tensors (non-normalized), as well as any additional\n tensors that may be necessary for computing predictions or evaluation metrics.\n \"\"\"\n raise NotImplementedError(\"OutputFeature is missing logits() implementation.\")\n\n def loss_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of the loss class.\"\"\"\n return {}\n\n def metric_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of each metric class.\"\"\"\n return {}\n\n def update_metrics(self, targets: Tensor, predictions: Dict[str, Tensor]) -> None:\n \"\"\"Updates metrics with the given targets and predictions.\n\n Args:\n targets: Tensor with target values for this output feature.\n predictions: Dict of tensors returned by predictions().\n \"\"\"\n for _, metric_fn in self.metric_functions.items():\n metric_class = type(metric_fn)\n prediction_key = metric_class.get_inputs()\n # TODO(shreya): Metrics should ideally just move to the correct device\n # and not require the user to do this. This is a temporary fix. See\n # if this can be removed before merging the PR.\n metric_fn = metric_fn.to(predictions[prediction_key].device)\n metric_fn.update(predictions[prediction_key].detach(), targets)\n\n def get_metrics(self):\n metric_vals = {}\n for metric_name, metric_fn in self.metric_functions.items():\n try:\n metric_vals[metric_name] = get_scalar_from_ludwig_metric(metric_fn)\n except Exception as e:\n logger.error(f\"Caught exception computing metric: {metric_name}. Exception: {e}\")\n return metric_vals\n\n def reset_metrics(self):\n for _, metric_fn in self.metric_functions.items():\n if metric_fn is not None:\n metric_fn.reset()\n\n def forward(\n self,\n combiner_outputs: Dict[str, torch.Tensor],\n other_output_feature_outputs: Dict[str, torch.Tensor],\n mask: Optional[torch.Tensor] = None,\n target: Optional[torch.Tensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"Forward pass that takes in output from the combiner, and passes it through to the decoder.\n\n Args:\n combiner_outputs: Dict of outputs from the combiner.\n other_output_feature_outputs: Dict of tensors from other output features. Used for resolving dependencies.\n mask: (Unused). Tensor for masking.\n target: Tensor with targets. During training, targets != None. During prediction, targets = None.\n\n Returns:\n Dict of output tensors, with at least 'last_hidden' and 'logits' as keys, as well as any additional tensor\n results from the decoder.\n \"\"\"\n # extract the combined hidden layer\n combiner_hidden = combiner_outputs[\"combiner_output\"]\n hidden = self.prepare_decoder_inputs(combiner_hidden, other_output_feature_outputs, mask=mask)\n\n # ================ Predictions ================\n logits_input = {HIDDEN: hidden}\n # pass supplemental data from encoders to decoder\n if \"encoder_output_state\" in combiner_outputs:\n logits_input[\"encoder_output_state\"] = combiner_outputs[\"encoder_output_state\"]\n if LENGTHS in combiner_outputs:\n logits_input[LENGTHS] = combiner_outputs[LENGTHS]\n\n logits = self.logits(logits_input, target=target)\n\n # For binary and numerical features, self.logits() is a tensor.\n # There are two special cases where self.logits() is a dict:\n # categorical\n # keys: logits, projection_input\n # sequence\n # keys: logits\n # TODO(Justin): Clean this up.\n if isinstance(logits, Tensor):\n logits = {\"logits\": logits}\n\n # For multi-class features, we must choose a consistent tuple subset.\n return {\n # last_hidden used for dependencies processing\n \"last_hidden\": hidden,\n **logits,\n }\n\n def overall_statistics_metadata(self):\n \"\"\"Additional metadata used to extend `training_set_metadata`.\n\n Used when calculating the overall statistics.\n \"\"\"\n return {}\n\n @property\n @abstractmethod\n def default_validation_metric(self):\n pass\n\n @abstractmethod\n def postprocess_predictions(\n self,\n result: Dict[str, Tensor],\n metadata: Dict[str, Any],\n output_directory: str,\n backend,\n ):\n raise NotImplementedError\n\n @staticmethod\n def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def calculate_overall_stats(predictions, targets, train_set_metadata):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def output_specific_fully_connected(self, inputs, mask=None):\n feature_hidden = inputs\n original_feature_hidden = inputs\n\n # flatten inputs\n if len(original_feature_hidden.shape) > 2:\n feature_hidden = torch.reshape(feature_hidden, (-1, list(feature_hidden.shape)[-1]))\n\n # pass it through fc_stack\n feature_hidden = self.fc_stack(feature_hidden, mask=mask)\n feature_hidden_size = feature_hidden.shape[-1]\n\n # reshape back to original first and second dimension\n if len(original_feature_hidden.shape) > 2:\n sequence_length = original_feature_hidden.shape[1]\n feature_hidden = torch.reshape(feature_hidden, (-1, sequence_length, feature_hidden_size))\n\n return feature_hidden\n\n def prepare_decoder_inputs(\n self, combiner_hidden: Tensor, other_output_features: Dict[str, Tensor], mask=None\n ) -> Tensor:\n \"\"\"Takes the combiner output and the outputs of other outputs features computed so far and performs:\n\n - reduction of combiner outputs (if needed)\n - concatenating the outputs of dependent features (if needed)\n - output_specific fully connected layers (if needed)\n\n Args:\n combiner_hidden: hidden state of the combiner\n other_output_features: output tensors from other output features\n \"\"\"\n # ================ Reduce Inputs ================\n feature_hidden = combiner_hidden\n if self.reduce_input is not None and len(combiner_hidden.shape) > 2:\n feature_hidden = self.reduce_sequence_input(combiner_hidden)\n\n # ================ Concat Dependencies ================\n if self.dependencies:\n feature_hidden = output_feature_utils.concat_dependencies(\n self.column, self.dependencies, self.dependency_reducers, feature_hidden, other_output_features\n )\n\n # ================ Output-wise Fully Connected ================\n feature_hidden = self.output_specific_fully_connected(feature_hidden, mask=mask)\n\n return feature_hidden\n\n def flatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Converts the output of batch_predict to a 1D array.\"\"\"\n return df\n\n def unflatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Reshapes a flattened 1D array into its original shape.\"\"\"\n return df\n", "path": "ludwig/features/base_feature.py"}], "after_files": [{"content": "# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport copy\nimport logging\nfrom abc import ABC, abstractmethod, abstractstaticmethod\nfrom typing import Any, Dict, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom ludwig.constants import COLUMN, HIDDEN, LENGTHS, LOGITS, LOSS, NAME, PREDICTIONS, PROBABILITIES, PROC_COLUMN, TYPE\nfrom ludwig.decoders.registry import get_decoder_cls\nfrom ludwig.encoders.registry import get_encoder_cls\nfrom ludwig.features.feature_utils import compute_feature_hash, get_input_size_with_dependencies\nfrom ludwig.modules.fully_connected_modules import FCStack\nfrom ludwig.modules.loss_modules import get_loss_cls\nfrom ludwig.modules.metric_registry import get_metric_classes, get_metric_cls\nfrom ludwig.modules.reduction_modules import SequenceReducer\nfrom ludwig.utils import output_feature_utils\nfrom ludwig.utils.metric_utils import get_scalar_from_ludwig_metric\nfrom ludwig.utils.misc_utils import merge_dict\nfrom ludwig.utils.torch_utils import LudwigModule\nfrom ludwig.utils.types import DataFrame\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseFeatureMixin(ABC):\n \"\"\"Parent class for feature mixins.\n\n Feature mixins support preprocessing functionality shared across input and output features.\n \"\"\"\n\n @abstractstaticmethod\n def type() -> str:\n \"\"\"Returns the type of feature this mixin supports.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_defaults() -> Dict[str, Any]:\n \"\"\"Returns dict of preprocessing defaults.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_schema() -> Dict[str, Any]:\n \"\"\"Returns schema for the preprocessing configuration.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def cast_column(column: DataFrame, backend) -> DataFrame:\n \"\"\"Returns a copy of the dataset column for the given feature, potentially after a type cast.\n\n Args:\n column: Pandas column of values.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def get_feature_meta(column: DataFrame, preprocessing_parameters: Dict[str, Any], backend) -> Dict[str, Any]:\n \"\"\"Returns a dictionary of feature metadata.\n\n Args:\n column: Pandas column of values.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def add_feature_data(\n feature_config: Dict[str, Any],\n input_df: DataFrame,\n proc_df: Dict[str, DataFrame],\n metadata: Dict[str, Any],\n preprocessing_parameters: Dict[str, Any],\n backend, # Union[Backend, str]\n skip_save_processed_input: bool,\n ) -> None:\n \"\"\"Runs preprocessing on the input_df and stores results in the proc_df and metadata dictionaries.\n\n Args:\n feature_config: Feature configuration.\n input_df: Pandas column of values.\n proc_df: Dict of processed columns of data. Feature data is added to this.\n metadata: Metadata returned by get_feature_meta(). Additional information may be added to this.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n skip_save_processed_input: Whether to skip saving the processed input.\n \"\"\"\n raise NotImplementedError\n\n\nclass PredictModule(torch.nn.Module):\n \"\"\"Base class for all modules that convert model outputs to predictions.\n\n Explicit member variables needed here for scripting, as Torchscript will not be able to recognize global variables\n during scripting.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.predictions_key = PREDICTIONS\n self.probabilities_key = PROBABILITIES\n self.logits_key = LOGITS\n\n\nclass BaseFeature:\n \"\"\"Base class for all features.\n\n Note that this class is not-cooperative (does not forward kwargs), so when constructing feature class hierarchies,\n there should be only one parent class that derives from base feature. Other functionality should be put into mixin\n classes to avoid the diamond pattern.\n \"\"\"\n\n def __init__(self, feature, *args, **kwargs):\n super().__init__()\n\n if NAME not in feature:\n raise ValueError(\"Missing feature name\")\n self.feature_name = feature[NAME]\n\n if COLUMN not in feature:\n feature[COLUMN] = self.feature_name\n self.column = feature[COLUMN]\n\n if PROC_COLUMN not in feature:\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n self.proc_column = feature[PROC_COLUMN]\n\n def overwrite_defaults(self, feature):\n attributes = set(self.__dict__.keys())\n attributes.update(self.__class__.__dict__.keys())\n\n for k in feature.keys():\n if k in attributes:\n if isinstance(feature[k], dict) and hasattr(self, k) and isinstance(getattr(self, k), dict):\n setattr(self, k, merge_dict(getattr(self, k), feature[k]))\n else:\n setattr(self, k, feature[k])\n\n\nclass InputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all input features.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def create_sample_input(self):\n # Used by get_model_inputs(), which is used for tracing-based torchscript generation.\n return torch.rand([2, *self.input_shape]).to(self.input_dtype)\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def initialize_encoder(self, encoder_parameters):\n return get_encoder_cls(self.type(), self.encoder)(**encoder_parameters)\n\n @staticmethod\n def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n\nclass OutputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all output features.\"\"\"\n\n def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, \"OutputFeature\"], *args, **kwargs):\n \"\"\"Defines defaults, overwrites them based on the feature dictionary, and sets up dependencies.\n\n Any output feature can depend on one or more other output features. The `other_output_features` input dictionary\n should contain entries for any dependent output features, which is accomplished by constructing output features\n in topographically sorted order. Attributes of any dependent output features are used to properly initialize\n this feature's sizes.\n \"\"\"\n super().__init__(*args, feature=feature, **kwargs)\n\n self.reduce_input = None\n self.reduce_dependencies = None\n\n # List of feature names that this output feature is depdendent on.\n self.dependencies = []\n\n self.fc_layers = None\n self.num_fc_layers = 0\n self.output_size = 256\n self.use_bias = True\n self.weights_initializer = \"xavier_uniform\"\n self.bias_initializer = \"zeros\"\n self.norm = None\n self.norm_params = None\n self.activation = \"relu\"\n self.dropout = 0\n self.input_size = None\n\n self.overwrite_defaults(feature)\n\n logger.debug(\" output feature fully connected layers\")\n logger.debug(\" FCStack\")\n\n self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)\n feature[\"input_size\"] = self.input_size # needed for future overrides\n\n self.fc_stack = FCStack(\n first_layer_input_size=self.input_size,\n layers=self.fc_layers,\n num_layers=self.num_fc_layers,\n default_output_size=self.output_size,\n default_use_bias=self.use_bias,\n default_weights_initializer=self.weights_initializer,\n default_bias_initializer=self.bias_initializer,\n default_norm=self.norm,\n default_norm_params=self.norm_params,\n default_activation=self.activation,\n default_dropout=self.dropout,\n )\n self._prediction_module = self.create_predict_module()\n\n # set up two sequence reducers, one for inputs and other for dependencies\n self.reduce_sequence_input = SequenceReducer(reduce_mode=self.reduce_input)\n if self.dependencies:\n self.dependency_reducers = torch.nn.ModuleDict()\n # todo: re-evaluate need for separate handling of `attention` reducer\n # currently this code does not support `attention`\n for dependency in self.dependencies:\n self.dependency_reducers[dependency] = SequenceReducer(reduce_mode=self.reduce_dependencies)\n\n def create_sample_output(self):\n return torch.rand(self.output_shape, dtype=self.get_output_dtype())\n\n @abstractmethod\n def get_prediction_set(self):\n \"\"\"Returns the set of prediction keys returned by this feature.\"\"\"\n raise NotImplementedError(\"OutputFeature is missing implementation for get_prediction_set.\")\n\n @classmethod\n @abstractmethod\n def get_output_dtype(cls):\n \"\"\"Returns the Tensor data type feature outputs.\"\"\"\n pass\n\n @property\n @abstractmethod\n def metric_functions(self) -> Dict:\n pass\n\n def initialize_decoder(self, decoder_parameters):\n decoder_parameters_copy = copy.copy(decoder_parameters)\n # Input to the decoder is the output feature's FC hidden layer.\n decoder_parameters_copy[\"input_size\"] = self.fc_stack.output_shape[-1]\n if \"decoder\" in decoder_parameters:\n decoder = decoder_parameters[\"decoder\"]\n else:\n decoder = self.decoder\n return get_decoder_cls(self.type(), decoder)(**decoder_parameters_copy)\n\n def train_loss(self, targets: Tensor, predictions: Dict[str, Tensor], feature_name):\n loss_class = type(self.train_loss_function)\n prediction_key = output_feature_utils.get_feature_concat_name(feature_name, loss_class.get_loss_inputs())\n return self.train_loss_function(predictions[prediction_key], targets)\n\n def eval_loss(self, targets: Tensor, predictions: Dict[str, Tensor]):\n loss_class = type(self.train_loss_function)\n prediction_key = loss_class.get_loss_inputs()\n return self.eval_loss_function(predictions[prediction_key].detach(), targets)\n\n def _setup_loss(self):\n loss_kwargs = self.loss_kwargs()\n self.train_loss_function = get_loss_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n self.eval_loss_function = get_metric_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n\n def _setup_metrics(self):\n # needed to shadow class variable\n self.metric_functions = {\n LOSS: self.eval_loss_function,\n **{\n name: cls(**self.loss_kwargs(), **self.metric_kwargs())\n for name, cls in get_metric_classes(self.type()).items()\n if cls.can_report(self)\n },\n }\n\n @abstractmethod\n def create_predict_module(self) -> PredictModule:\n \"\"\"Creates and returns a `nn.Module` that converts raw model outputs (logits) to predictions.\n\n Thos module is needed when generating the Torchscript model using scripting.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def prediction_module(self) -> PredictModule:\n \"\"\"Returns the PredictModule used to convert model outputs to predictions.\"\"\"\n return self._prediction_module\n\n def predictions(self, all_decoder_outputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:\n \"\"\"Computes actual predictions from the outputs of feature decoders.\n\n TODO(Justin): Consider refactoring this to accept feature-specific decoder outputs.\n\n Args:\n all_decoder_outputs: A dictionary of {feature name}::{tensor_name} -> output tensor.\n Returns:\n Dictionary of tensors with predictions as well as any additional tensors that may be\n necessary for computing evaluation metrics.\n \"\"\"\n return self.prediction_module(all_decoder_outputs, feature_name)\n\n @abstractmethod\n def logits(self, combiner_outputs: Dict[str, torch.Tensor], target=None, **kwargs) -> Dict[str, torch.Tensor]:\n \"\"\"Unpacks and feeds combiner_outputs to the decoder. Invoked as part of the output feature's forward pass.\n\n If target is not None, then we are in training.\n\n Args:\n combiner_outputs: Dictionary of tensors from the combiner's forward pass.\n Returns:\n Dictionary of decoder's output tensors (non-normalized), as well as any additional\n tensors that may be necessary for computing predictions or evaluation metrics.\n \"\"\"\n raise NotImplementedError(\"OutputFeature is missing logits() implementation.\")\n\n def loss_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of the loss class.\"\"\"\n return {}\n\n def metric_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of each metric class.\"\"\"\n return {}\n\n def update_metrics(self, targets: Tensor, predictions: Dict[str, Tensor]) -> None:\n \"\"\"Updates metrics with the given targets and predictions.\n\n Args:\n targets: Tensor with target values for this output feature.\n predictions: Dict of tensors returned by predictions().\n \"\"\"\n for _, metric_fn in self.metric_functions.items():\n metric_class = type(metric_fn)\n prediction_key = metric_class.get_inputs()\n # TODO(shreya): Metrics should ideally just move to the correct device\n # and not require the user to do this. This is a temporary fix. See\n # if this can be removed before merging the PR.\n metric_fn = metric_fn.to(predictions[prediction_key].device)\n metric_fn.update(predictions[prediction_key].detach(), targets)\n\n def get_metrics(self):\n metric_vals = {}\n for metric_name, metric_fn in self.metric_functions.items():\n try:\n metric_vals[metric_name] = get_scalar_from_ludwig_metric(metric_fn)\n except Exception as e:\n logger.error(f\"Caught exception computing metric: {metric_name}. Exception: {e}\")\n return metric_vals\n\n def reset_metrics(self):\n for _, metric_fn in self.metric_functions.items():\n if metric_fn is not None:\n metric_fn.reset()\n\n def forward(\n self,\n combiner_outputs: Dict[str, torch.Tensor],\n other_output_feature_outputs: Dict[str, torch.Tensor],\n mask: Optional[torch.Tensor] = None,\n target: Optional[torch.Tensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"Forward pass that takes in output from the combiner, and passes it through to the decoder.\n\n Args:\n combiner_outputs: Dict of outputs from the combiner.\n other_output_feature_outputs: Dict of tensors from other output features. Used for resolving dependencies.\n mask: (Unused). Tensor for masking.\n target: Tensor with targets. During training, targets != None. During prediction, targets = None.\n\n Returns:\n Dict of output tensors, with at least 'last_hidden' and 'logits' as keys, as well as any additional tensor\n results from the decoder.\n \"\"\"\n # extract the combined hidden layer\n combiner_hidden = combiner_outputs[\"combiner_output\"]\n hidden = self.prepare_decoder_inputs(combiner_hidden, other_output_feature_outputs, mask=mask)\n\n # ================ Predictions ================\n logits_input = {HIDDEN: hidden}\n # pass supplemental data from encoders to decoder\n if \"encoder_output_state\" in combiner_outputs:\n logits_input[\"encoder_output_state\"] = combiner_outputs[\"encoder_output_state\"]\n if LENGTHS in combiner_outputs:\n logits_input[LENGTHS] = combiner_outputs[LENGTHS]\n\n logits = self.logits(logits_input, target=target)\n\n # For binary and numerical features, self.logits() is a tensor.\n # There are two special cases where self.logits() is a dict:\n # categorical\n # keys: logits, projection_input\n # sequence\n # keys: logits\n # TODO(Justin): Clean this up.\n if isinstance(logits, Tensor):\n logits = {\"logits\": logits}\n\n # For multi-class features, we must choose a consistent tuple subset.\n return {\n # last_hidden used for dependencies processing\n \"last_hidden\": hidden,\n **logits,\n }\n\n def overall_statistics_metadata(self):\n \"\"\"Additional metadata used to extend `training_set_metadata`.\n\n Used when calculating the overall statistics.\n \"\"\"\n return {}\n\n @property\n @abstractmethod\n def default_validation_metric(self):\n pass\n\n @abstractmethod\n def postprocess_predictions(\n self,\n result: Dict[str, Tensor],\n metadata: Dict[str, Any],\n output_directory: str,\n backend,\n ):\n raise NotImplementedError\n\n @staticmethod\n def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def calculate_overall_stats(predictions, targets, train_set_metadata):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def output_specific_fully_connected(self, inputs, mask=None):\n feature_hidden = inputs\n original_feature_hidden = inputs\n\n # flatten inputs\n if len(original_feature_hidden.shape) > 2:\n feature_hidden = torch.reshape(feature_hidden, (-1, list(feature_hidden.shape)[-1]))\n\n # pass it through fc_stack\n feature_hidden = self.fc_stack(feature_hidden, mask=mask)\n feature_hidden_size = feature_hidden.shape[-1]\n\n # reshape back to original first and second dimension\n if len(original_feature_hidden.shape) > 2:\n sequence_length = original_feature_hidden.shape[1]\n feature_hidden = torch.reshape(feature_hidden, (-1, sequence_length, feature_hidden_size))\n\n return feature_hidden\n\n def prepare_decoder_inputs(\n self, combiner_hidden: Tensor, other_output_features: Dict[str, Tensor], mask=None\n ) -> Tensor:\n \"\"\"Takes the combiner output and the outputs of other outputs features computed so far and performs:\n\n - reduction of combiner outputs (if needed)\n - concatenating the outputs of dependent features (if needed)\n - output_specific fully connected layers (if needed)\n\n Args:\n combiner_hidden: hidden state of the combiner\n other_output_features: output tensors from other output features\n \"\"\"\n # ================ Reduce Inputs ================\n feature_hidden = combiner_hidden\n if self.reduce_input is not None and len(combiner_hidden.shape) > 2:\n feature_hidden = self.reduce_sequence_input(combiner_hidden)\n\n # ================ Concat Dependencies ================\n if self.dependencies:\n feature_hidden = output_feature_utils.concat_dependencies(\n self.column, self.dependencies, self.dependency_reducers, feature_hidden, other_output_features\n )\n\n # ================ Output-wise Fully Connected ================\n feature_hidden = self.output_specific_fully_connected(feature_hidden, mask=mask)\n\n return feature_hidden\n\n def flatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Converts the output of batch_predict to a 1D array.\"\"\"\n return df\n\n def unflatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Reshapes a flattened 1D array into its original shape.\"\"\"\n return df\n", "path": "ludwig/features/base_feature.py"}]} |
gh_patches_debug_1495 | rasdani/github-patches | git_diff | pulp__pulpcore-4010 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/viewsets/replica.py`
Content:
```
1 """
2 ViewSet for replicating repositories and distributions from an upstream Pulp
3 """
4 from django.conf import settings
5 from drf_spectacular.utils import extend_schema
6 from rest_framework import mixins
7 from rest_framework.decorators import action
8
9 from pulpcore.app.models import TaskGroup, UpstreamPulp
10 from pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer
11 from pulpcore.app.viewsets import NamedModelViewSet
12 from pulpcore.app.response import TaskGroupOperationResponse
13 from pulpcore.app.tasks import replicate_distributions
14 from pulpcore.tasking.tasks import dispatch
15
16
17 class UpstreamPulpViewSet(
18 NamedModelViewSet,
19 mixins.CreateModelMixin,
20 mixins.RetrieveModelMixin,
21 mixins.ListModelMixin,
22 mixins.DestroyModelMixin,
23 mixins.UpdateModelMixin,
24 ):
25 """API for configuring an upstream Pulp to replicate. This API is provided as a tech preview."""
26
27 queryset = UpstreamPulp.objects.all()
28 endpoint_name = "upstream-pulps"
29 serializer_class = UpstreamPulpSerializer
30 ordering = "-pulp_created"
31
32 @extend_schema(
33 summary="Replicate",
34 description="Trigger an asynchronous repository replication task group. This API is "
35 "provided as a tech preview.",
36 responses={202: AsyncOperationResponseSerializer},
37 )
38 @action(detail=True, methods=["post"])
39 def replicate(self, request, pk):
40 """
41 Triggers an asynchronous repository replication operation.
42 """
43 server = UpstreamPulp.objects.get(pk=pk)
44 task_group = TaskGroup.objects.create(description=f"Replication of {server.name}")
45
46 uri = "/api/v3/servers/"
47 if settings.DOMAIN_ENABLED:
48 uri = f"/{request.domain.name}{uri}"
49
50 dispatch(
51 replicate_distributions,
52 exclusive_resources=[uri],
53 kwargs={"server_pk": pk},
54 task_group=task_group,
55 )
56
57 return TaskGroupOperationResponse(task_group, request)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py\n--- a/pulpcore/app/viewsets/replica.py\n+++ b/pulpcore/app/viewsets/replica.py\n@@ -33,6 +33,7 @@\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n+ request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n", "issue": "RESTAPI document fix for Upstream Pulp Replication API\n**Version**\r\nPulp installed through the Python modules.\r\n\"core:3.28.0\"\r\n\"certguard:3.28.0\"\r\n\"file:3.28.0\"\r\n\"python:3.28.0\"\r\n\"rpm:3.28.0\"\r\n\r\n**Describe the bug**\r\nWhy the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate\" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request \"https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/\"?\r\n\r\n**To Reproduce**\r\nNone.\r\n\r\n**Expected behavior**\r\nA fix is required in the REST API document.\r\n\r\n**Additional context**\r\nCreate Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create\r\nUpstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}], "after_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}]} |
gh_patches_debug_1496 | rasdani/github-patches | git_diff | searx__searx-672 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Infinite scroll: answer are repeated on each page
How to reproduce : search for ["user agent"](https://searx.me/?q=user+agent) with Infinite scroll activated.
Should the answer be disabled except the first page ? or should Infinite Scroll hide the answer ?
I vote for the first option : disabled answers except on the first page on the server side.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/plugins/self_info.py`
Content:
```
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 '''
17 from flask_babel import gettext
18 import re
19 name = "Self Informations"
20 description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
21 default_on = True
22
23
24 # Self User Agent regex
25 p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
26
27
28 # attach callback to the post search hook
29 # request: flask request object
30 # ctx: the whole local context of the pre search hook
31 def post_search(request, ctx):
32 if ctx['search'].query == 'ip':
33 x_forwarded_for = request.headers.getlist("X-Forwarded-For")
34 if x_forwarded_for:
35 ip = x_forwarded_for[0]
36 else:
37 ip = request.remote_addr
38 ctx['search'].result_container.answers.clear()
39 ctx['search'].result_container.answers.add(ip)
40 elif p.match(ctx['search'].query):
41 ua = request.user_agent
42 ctx['search'].result_container.answers.clear()
43 ctx['search'].result_container.answers.add(ua)
44 return True
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py
--- a/searx/plugins/self_info.py
+++ b/searx/plugins/self_info.py
@@ -29,6 +29,8 @@
# request: flask request object
# ctx: the whole local context of the pre search hook
def post_search(request, ctx):
+ if ctx['search'].pageno > 1:
+ return True
if ctx['search'].query == 'ip':
x_forwarded_for = request.headers.getlist("X-Forwarded-For")
if x_forwarded_for:
| {"golden_diff": "diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py\n--- a/searx/plugins/self_info.py\n+++ b/searx/plugins/self_info.py\n@@ -29,6 +29,8 @@\n # request: flask request object\n # ctx: the whole local context of the pre search hook\n def post_search(request, ctx):\n+ if ctx['search'].pageno > 1:\n+ return True\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n", "issue": "Infinite scroll: answer are repeated on each page\nHow to reproduce : search for [\"user agent\"](https://searx.me/?q=user+agent) with Infinite scroll activated.\n\nShould the answer be disabled except the first page ? or should Infinite Scroll hide the answer ?\n\nI vote for the first option : disabled answers except on the first page on the server side. \n\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = \"Self Informations\"\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, ctx):\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ip)\n elif p.match(ctx['search'].query):\n ua = request.user_agent\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ua)\n return True\n", "path": "searx/plugins/self_info.py"}], "after_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = \"Self Informations\"\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, ctx):\n if ctx['search'].pageno > 1:\n return True\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ip)\n elif p.match(ctx['search'].query):\n ua = request.user_agent\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ua)\n return True\n", "path": "searx/plugins/self_info.py"}]} |
gh_patches_debug_1497 | rasdani/github-patches | git_diff | fedora-infra__bodhi-4148 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash in automatic update handler when submitting work_on_bugs_task
From bodhi-consumer logs:
```
2020-10-25 11:17:14,460 INFO [fedora_messaging.twisted.protocol][MainThread] Consuming message from topic org.fedoraproject.prod.buildsys.tag (message id c2d97737-444f-49b4-b4ca-1efb3a05e941)
2020-10-25 11:17:14,463 INFO [bodhi][PoolThread-twisted.internet.reactor-1] Received message from fedora-messaging with topic: org.fedoraproject.prod.buildsys.tag
2020-10-25 11:17:14,463 INFO [bodhi][PoolThread-twisted.internet.reactor-1] ginac-1.7.9-5.fc34 tagged into f34-updates-candidate
2020-10-25 11:17:14,469 INFO [bodhi][PoolThread-twisted.internet.reactor-1] Build was not submitted, skipping
2020-10-25 11:17:14,838 INFO [bodhi.server][PoolThread-twisted.internet.reactor-1] Sending mail to [email protected]: [Fedora Update] [comment] ginac-1.7.9-5.fc34
2020-10-25 11:17:15,016 ERROR [bodhi][PoolThread-twisted.internet.reactor-1] Instance <Update at 0x7fa3740f5910> is not bound to a Session; attribute refresh operation cannot proceed (Background on this error at: http://sqlalche.me/e/13/bhk3): Unable to handle message in Automatic Update handler: Id: c2d97737-444f-49b4-b4ca-1efb3a05e941
Topic: org.fedoraproject.prod.buildsys.tag
Headers: {
"fedora_messaging_schema": "base.message",
"fedora_messaging_severity": 20,
"sent-at": "2020-10-25T11:17:14+00:00"
}
Body: {
"build_id": 1634116,
"instance": "primary",
"name": "ginac",
"owner": "---",
"release": "5.fc34",
"tag": "f34-updates-candidate",
"tag_id": 27040,
"user": "---",
"version": "1.7.9"
}
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/bodhi/server/consumers/__init__.py", line 79, in __call__
handler_info.handler(msg)
File "/usr/local/lib/python3.8/site-packages/bodhi/server/consumers/automatic_updates.py", line 197, in __call__
alias = update.alias
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 287, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 718, in get
value = state._load_expired(state, passive)
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/state.py", line 652, in _load_expired
self.manager.deferred_scalar_loader(self, toload)
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/loading.py", line 944, in load_scalar_attributes
raise orm_exc.DetachedInstanceError(
sqlalchemy.orm.exc.DetachedInstanceError: Instance <Update at 0x7fa3740f5910> is not bound to a Session; attribute refresh operation cannot proceed (Background on this error at: http://sqlalche.me/e/13/bhk3 )
2020-10-25 11:17:15,053 WARNI [fedora_messaging.twisted.protocol][MainThread] Returning message id c2d97737-444f-49b4-b4ca-1efb3a05e941 to the queue
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/consumers/automatic_updates.py`
Content:
```
1 # Copyright © 2019 Red Hat, Inc. and others.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The Bodhi handler that creates updates automatically from tagged builds.
20
21 This module is responsible for the process of creating updates when builds are
22 tagged with certain tags.
23 """
24
25 import logging
26 import re
27
28 import fedora_messaging
29
30 from bodhi.server import buildsys
31 from bodhi.server.config import config
32 from bodhi.server.models import (
33 Bug, Build, ContentType, Package, Release, Update, UpdateStatus, UpdateType, User)
34 from bodhi.server.tasks import work_on_bugs_task
35 from bodhi.server.util import transactional_session_maker
36
37 log = logging.getLogger('bodhi')
38
39
40 class AutomaticUpdateHandler:
41 """
42 The Bodhi Automatic Update Handler.
43
44 A consumer that listens for messages about tagged builds and creates
45 updates from them.
46 """
47
48 def __init__(self, db_factory: transactional_session_maker = None):
49 """
50 Initialize the Automatic Update Handler.
51
52 Args:
53 db_factory: If given, used as the db_factory for this handler. If
54 None (the default), a new TransactionalSessionMaker is created and
55 used.
56 """
57 if not db_factory:
58 self.db_factory = transactional_session_maker()
59 else:
60 self.db_factory = db_factory
61
62 def __call__(self, message: fedora_messaging.api.Message) -> None:
63 """Create updates from appropriately tagged builds.
64
65 Args:
66 message: The message we are processing.
67 """
68 body = message.body
69
70 missing = []
71 for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):
72 if mandatory not in body:
73 missing.append(mandatory)
74 if missing:
75 log.debug(f"Received incomplete tag message. Missing: {', '.join(missing)}")
76 return
77
78 btag = body['tag']
79 bnvr = '{name}-{version}-{release}'.format(**body)
80
81 koji = buildsys.get_session()
82
83 kbuildinfo = koji.getBuild(bnvr)
84 if not kbuildinfo:
85 log.debug(f"Can't find Koji build for {bnvr}.")
86 return
87
88 if 'nvr' not in kbuildinfo:
89 log.debug(f"Koji build info for {bnvr} doesn't contain 'nvr'.")
90 return
91
92 if 'owner_name' not in kbuildinfo:
93 log.debug(f"Koji build info for {bnvr} doesn't contain 'owner_name'.")
94 return
95
96 if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):
97 log.debug(f"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in "
98 "automatic_updates_blacklist, skipping.")
99 return
100
101 # some APIs want the Koji build info, some others want the same
102 # wrapped in a larger (request?) structure
103 rbuildinfo = {
104 'info': kbuildinfo,
105 'nvr': kbuildinfo['nvr'].rsplit('-', 2),
106 }
107
108 with self.db_factory() as dbsession:
109 rel = dbsession.query(Release).filter_by(create_automatic_updates=True,
110 candidate_tag=btag).first()
111 if not rel:
112 log.debug(f"Ignoring build being tagged into {btag!r}, no release configured for "
113 "automatic updates for it found.")
114 return
115
116 bcls = ContentType.infer_content_class(Build, kbuildinfo)
117 build = bcls.get(bnvr)
118 if build and build.update:
119 log.info(f"Build, active update for {bnvr} exists already, skipping.")
120 return
121
122 if not build:
123 log.debug(f"Build for {bnvr} doesn't exist yet, creating.")
124
125 # Package.get_or_create() infers content type already
126 log.debug("Getting/creating related package object.")
127 pkg = Package.get_or_create(dbsession, rbuildinfo)
128
129 log.debug("Creating build object, adding it to the DB.")
130 build = bcls(nvr=bnvr, package=pkg, release=rel)
131 dbsession.add(build)
132
133 owner_name = kbuildinfo['owner_name']
134 user = User.get(owner_name)
135 if not user:
136 log.debug(f"Creating bodhi user for '{owner_name}'.")
137 # Leave email, groups blank, these will be filled
138 # in or updated when they log into Bodhi next time, see
139 # bodhi.server.security:remember_me().
140 user = User(name=owner_name)
141 dbsession.add(user)
142
143 log.debug(f"Creating new update for {bnvr}.")
144 changelog = build.get_changelog(lastupdate=True)
145 closing_bugs = []
146 if changelog:
147 log.debug("Adding changelog to update notes.")
148 notes = f"""Automatic update for {bnvr}.
149
150 ##### **Changelog**
151
152 ```
153 {changelog}
154 ```"""
155
156 for b in re.finditer(config.get('bz_regex'), changelog, re.IGNORECASE):
157 idx = int(b.group(1))
158 log.debug(f'Adding bug #{idx} to the update.')
159 bug = Bug.get(idx)
160 if bug is None:
161 bug = Bug(bug_id=idx)
162 dbsession.add(bug)
163 dbsession.flush()
164 if bug not in closing_bugs:
165 closing_bugs.append(bug)
166 else:
167 notes = f"Automatic update for {bnvr}."
168 update = Update(
169 release=rel,
170 builds=[build],
171 bugs=closing_bugs,
172 notes=notes,
173 type=UpdateType.unspecified,
174 stable_karma=3,
175 unstable_karma=-3,
176 autokarma=False,
177 user=user,
178 status=UpdateStatus.pending,
179 )
180
181 # Comment on the update that it was automatically created.
182 update.comment(
183 dbsession,
184 str("This update was automatically created"),
185 author="bodhi",
186 )
187
188 update.add_tag(update.release.pending_signing_tag)
189
190 log.debug("Adding new update to the database.")
191 dbsession.add(update)
192
193 log.debug("Flushing changes to the database.")
194 dbsession.flush()
195
196 # Obsolete older updates which may be stuck in testing due to failed gating
197 try:
198 update.obsolete_older_updates(dbsession)
199 except Exception as e:
200 log.error(f'Problem obsoleting older updates: {e}')
201
202 # This must be run after dbsession is closed so changes are committed to db
203 alias = update.alias
204 work_on_bugs_task.delay(alias, closing_bugs)
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/consumers/automatic_updates.py b/bodhi/server/consumers/automatic_updates.py
--- a/bodhi/server/consumers/automatic_updates.py
+++ b/bodhi/server/consumers/automatic_updates.py
@@ -199,6 +199,7 @@
except Exception as e:
log.error(f'Problem obsoleting older updates: {e}')
+ alias = update.alias
+
# This must be run after dbsession is closed so changes are committed to db
- alias = update.alias
work_on_bugs_task.delay(alias, closing_bugs)
| {"golden_diff": "diff --git a/bodhi/server/consumers/automatic_updates.py b/bodhi/server/consumers/automatic_updates.py\n--- a/bodhi/server/consumers/automatic_updates.py\n+++ b/bodhi/server/consumers/automatic_updates.py\n@@ -199,6 +199,7 @@\n except Exception as e:\n log.error(f'Problem obsoleting older updates: {e}')\n \n+ alias = update.alias\n+\n # This must be run after dbsession is closed so changes are committed to db\n- alias = update.alias\n work_on_bugs_task.delay(alias, closing_bugs)\n", "issue": "Crash in automatic update handler when submitting work_on_bugs_task\nFrom bodhi-consumer logs:\r\n```\r\n2020-10-25 11:17:14,460 INFO [fedora_messaging.twisted.protocol][MainThread] Consuming message from topic org.fedoraproject.prod.buildsys.tag (message id c2d97737-444f-49b4-b4ca-1efb3a05e941)\r\n2020-10-25 11:17:14,463 INFO [bodhi][PoolThread-twisted.internet.reactor-1] Received message from fedora-messaging with topic: org.fedoraproject.prod.buildsys.tag\r\n2020-10-25 11:17:14,463 INFO [bodhi][PoolThread-twisted.internet.reactor-1] ginac-1.7.9-5.fc34 tagged into f34-updates-candidate\r\n2020-10-25 11:17:14,469 INFO [bodhi][PoolThread-twisted.internet.reactor-1] Build was not submitted, skipping\r\n2020-10-25 11:17:14,838 INFO [bodhi.server][PoolThread-twisted.internet.reactor-1] Sending mail to [email protected]: [Fedora Update] [comment] ginac-1.7.9-5.fc34\r\n2020-10-25 11:17:15,016 ERROR [bodhi][PoolThread-twisted.internet.reactor-1] Instance <Update at 0x7fa3740f5910> is not bound to a Session; attribute refresh operation cannot proceed (Background on this error at: http://sqlalche.me/e/13/bhk3): Unable to handle message in Automatic Update handler: Id: c2d97737-444f-49b4-b4ca-1efb3a05e941\r\nTopic: org.fedoraproject.prod.buildsys.tag\r\nHeaders: {\r\n \"fedora_messaging_schema\": \"base.message\",\r\n \"fedora_messaging_severity\": 20,\r\n \"sent-at\": \"2020-10-25T11:17:14+00:00\"\r\n}\r\nBody: {\r\n \"build_id\": 1634116,\r\n \"instance\": \"primary\",\r\n \"name\": \"ginac\",\r\n \"owner\": \"---\",\r\n \"release\": \"5.fc34\",\r\n \"tag\": \"f34-updates-candidate\",\r\n \"tag_id\": 27040,\r\n \"user\": \"---\",\r\n \"version\": \"1.7.9\"\r\n}\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/bodhi/server/consumers/__init__.py\", line 79, in __call__\r\n handler_info.handler(msg)\r\n File \"/usr/local/lib/python3.8/site-packages/bodhi/server/consumers/automatic_updates.py\", line 197, in __call__\r\n alias = update.alias\r\n File \"/usr/lib64/python3.8/site-packages/sqlalchemy/orm/attributes.py\", line 287, in __get__\r\n return self.impl.get(instance_state(instance), dict_)\r\n File \"/usr/lib64/python3.8/site-packages/sqlalchemy/orm/attributes.py\", line 718, in get\r\n value = state._load_expired(state, passive)\r\n File \"/usr/lib64/python3.8/site-packages/sqlalchemy/orm/state.py\", line 652, in _load_expired\r\n self.manager.deferred_scalar_loader(self, toload)\r\n File \"/usr/lib64/python3.8/site-packages/sqlalchemy/orm/loading.py\", line 944, in load_scalar_attributes\r\n raise orm_exc.DetachedInstanceError(\r\nsqlalchemy.orm.exc.DetachedInstanceError: Instance <Update at 0x7fa3740f5910> is not bound to a Session; attribute refresh operation cannot proceed (Background on this error at: http://sqlalche.me/e/13/bhk3 )\r\n2020-10-25 11:17:15,053 WARNI [fedora_messaging.twisted.protocol][MainThread] Returning message id c2d97737-444f-49b4-b4ca-1efb3a05e941 to the queue\r\n```\n", "before_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe Bodhi handler that creates updates automatically from tagged builds.\n\nThis module is responsible for the process of creating updates when builds are\ntagged with certain tags.\n\"\"\"\n\nimport logging\nimport re\n\nimport fedora_messaging\n\nfrom bodhi.server import buildsys\nfrom bodhi.server.config import config\nfrom bodhi.server.models import (\n Bug, Build, ContentType, Package, Release, Update, UpdateStatus, UpdateType, User)\nfrom bodhi.server.tasks import work_on_bugs_task\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass AutomaticUpdateHandler:\n \"\"\"\n The Bodhi Automatic Update Handler.\n\n A consumer that listens for messages about tagged builds and creates\n updates from them.\n \"\"\"\n\n def __init__(self, db_factory: transactional_session_maker = None):\n \"\"\"\n Initialize the Automatic Update Handler.\n\n Args:\n db_factory: If given, used as the db_factory for this handler. If\n None (the default), a new TransactionalSessionMaker is created and\n used.\n \"\"\"\n if not db_factory:\n self.db_factory = transactional_session_maker()\n else:\n self.db_factory = db_factory\n\n def __call__(self, message: fedora_messaging.api.Message) -> None:\n \"\"\"Create updates from appropriately tagged builds.\n\n Args:\n message: The message we are processing.\n \"\"\"\n body = message.body\n\n missing = []\n for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):\n if mandatory not in body:\n missing.append(mandatory)\n if missing:\n log.debug(f\"Received incomplete tag message. Missing: {', '.join(missing)}\")\n return\n\n btag = body['tag']\n bnvr = '{name}-{version}-{release}'.format(**body)\n\n koji = buildsys.get_session()\n\n kbuildinfo = koji.getBuild(bnvr)\n if not kbuildinfo:\n log.debug(f\"Can't find Koji build for {bnvr}.\")\n return\n\n if 'nvr' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'nvr'.\")\n return\n\n if 'owner_name' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'owner_name'.\")\n return\n\n if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):\n log.debug(f\"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in \"\n \"automatic_updates_blacklist, skipping.\")\n return\n\n # some APIs want the Koji build info, some others want the same\n # wrapped in a larger (request?) structure\n rbuildinfo = {\n 'info': kbuildinfo,\n 'nvr': kbuildinfo['nvr'].rsplit('-', 2),\n }\n\n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n candidate_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n return\n\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n\n if not build:\n log.debug(f\"Build for {bnvr} doesn't exist yet, creating.\")\n\n # Package.get_or_create() infers content type already\n log.debug(\"Getting/creating related package object.\")\n pkg = Package.get_or_create(dbsession, rbuildinfo)\n\n log.debug(\"Creating build object, adding it to the DB.\")\n build = bcls(nvr=bnvr, package=pkg, release=rel)\n dbsession.add(build)\n\n owner_name = kbuildinfo['owner_name']\n user = User.get(owner_name)\n if not user:\n log.debug(f\"Creating bodhi user for '{owner_name}'.\")\n # Leave email, groups blank, these will be filled\n # in or updated when they log into Bodhi next time, see\n # bodhi.server.security:remember_me().\n user = User(name=owner_name)\n dbsession.add(user)\n\n log.debug(f\"Creating new update for {bnvr}.\")\n changelog = build.get_changelog(lastupdate=True)\n closing_bugs = []\n if changelog:\n log.debug(\"Adding changelog to update notes.\")\n notes = f\"\"\"Automatic update for {bnvr}.\n\n##### **Changelog**\n\n```\n{changelog}\n```\"\"\"\n\n for b in re.finditer(config.get('bz_regex'), changelog, re.IGNORECASE):\n idx = int(b.group(1))\n log.debug(f'Adding bug #{idx} to the update.')\n bug = Bug.get(idx)\n if bug is None:\n bug = Bug(bug_id=idx)\n dbsession.add(bug)\n dbsession.flush()\n if bug not in closing_bugs:\n closing_bugs.append(bug)\n else:\n notes = f\"Automatic update for {bnvr}.\"\n update = Update(\n release=rel,\n builds=[build],\n bugs=closing_bugs,\n notes=notes,\n type=UpdateType.unspecified,\n stable_karma=3,\n unstable_karma=-3,\n autokarma=False,\n user=user,\n status=UpdateStatus.pending,\n )\n\n # Comment on the update that it was automatically created.\n update.comment(\n dbsession,\n str(\"This update was automatically created\"),\n author=\"bodhi\",\n )\n\n update.add_tag(update.release.pending_signing_tag)\n\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n\n log.debug(\"Flushing changes to the database.\")\n dbsession.flush()\n\n # Obsolete older updates which may be stuck in testing due to failed gating\n try:\n update.obsolete_older_updates(dbsession)\n except Exception as e:\n log.error(f'Problem obsoleting older updates: {e}')\n\n # This must be run after dbsession is closed so changes are committed to db\n alias = update.alias\n work_on_bugs_task.delay(alias, closing_bugs)\n", "path": "bodhi/server/consumers/automatic_updates.py"}], "after_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe Bodhi handler that creates updates automatically from tagged builds.\n\nThis module is responsible for the process of creating updates when builds are\ntagged with certain tags.\n\"\"\"\n\nimport logging\nimport re\n\nimport fedora_messaging\n\nfrom bodhi.server import buildsys\nfrom bodhi.server.config import config\nfrom bodhi.server.models import (\n Bug, Build, ContentType, Package, Release, Update, UpdateStatus, UpdateType, User)\nfrom bodhi.server.tasks import work_on_bugs_task\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass AutomaticUpdateHandler:\n \"\"\"\n The Bodhi Automatic Update Handler.\n\n A consumer that listens for messages about tagged builds and creates\n updates from them.\n \"\"\"\n\n def __init__(self, db_factory: transactional_session_maker = None):\n \"\"\"\n Initialize the Automatic Update Handler.\n\n Args:\n db_factory: If given, used as the db_factory for this handler. If\n None (the default), a new TransactionalSessionMaker is created and\n used.\n \"\"\"\n if not db_factory:\n self.db_factory = transactional_session_maker()\n else:\n self.db_factory = db_factory\n\n def __call__(self, message: fedora_messaging.api.Message) -> None:\n \"\"\"Create updates from appropriately tagged builds.\n\n Args:\n message: The message we are processing.\n \"\"\"\n body = message.body\n\n missing = []\n for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):\n if mandatory not in body:\n missing.append(mandatory)\n if missing:\n log.debug(f\"Received incomplete tag message. Missing: {', '.join(missing)}\")\n return\n\n btag = body['tag']\n bnvr = '{name}-{version}-{release}'.format(**body)\n\n koji = buildsys.get_session()\n\n kbuildinfo = koji.getBuild(bnvr)\n if not kbuildinfo:\n log.debug(f\"Can't find Koji build for {bnvr}.\")\n return\n\n if 'nvr' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'nvr'.\")\n return\n\n if 'owner_name' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'owner_name'.\")\n return\n\n if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):\n log.debug(f\"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in \"\n \"automatic_updates_blacklist, skipping.\")\n return\n\n # some APIs want the Koji build info, some others want the same\n # wrapped in a larger (request?) structure\n rbuildinfo = {\n 'info': kbuildinfo,\n 'nvr': kbuildinfo['nvr'].rsplit('-', 2),\n }\n\n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n candidate_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n return\n\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n\n if not build:\n log.debug(f\"Build for {bnvr} doesn't exist yet, creating.\")\n\n # Package.get_or_create() infers content type already\n log.debug(\"Getting/creating related package object.\")\n pkg = Package.get_or_create(dbsession, rbuildinfo)\n\n log.debug(\"Creating build object, adding it to the DB.\")\n build = bcls(nvr=bnvr, package=pkg, release=rel)\n dbsession.add(build)\n\n owner_name = kbuildinfo['owner_name']\n user = User.get(owner_name)\n if not user:\n log.debug(f\"Creating bodhi user for '{owner_name}'.\")\n # Leave email, groups blank, these will be filled\n # in or updated when they log into Bodhi next time, see\n # bodhi.server.security:remember_me().\n user = User(name=owner_name)\n dbsession.add(user)\n\n log.debug(f\"Creating new update for {bnvr}.\")\n changelog = build.get_changelog(lastupdate=True)\n closing_bugs = []\n if changelog:\n log.debug(\"Adding changelog to update notes.\")\n notes = f\"\"\"Automatic update for {bnvr}.\n\n##### **Changelog**\n\n```\n{changelog}\n```\"\"\"\n\n for b in re.finditer(config.get('bz_regex'), changelog, re.IGNORECASE):\n idx = int(b.group(1))\n log.debug(f'Adding bug #{idx} to the update.')\n bug = Bug.get(idx)\n if bug is None:\n bug = Bug(bug_id=idx)\n dbsession.add(bug)\n dbsession.flush()\n if bug not in closing_bugs:\n closing_bugs.append(bug)\n else:\n notes = f\"Automatic update for {bnvr}.\"\n update = Update(\n release=rel,\n builds=[build],\n bugs=closing_bugs,\n notes=notes,\n type=UpdateType.unspecified,\n stable_karma=3,\n unstable_karma=-3,\n autokarma=False,\n user=user,\n status=UpdateStatus.pending,\n )\n\n # Comment on the update that it was automatically created.\n update.comment(\n dbsession,\n str(\"This update was automatically created\"),\n author=\"bodhi\",\n )\n\n update.add_tag(update.release.pending_signing_tag)\n\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n\n log.debug(\"Flushing changes to the database.\")\n dbsession.flush()\n\n # Obsolete older updates which may be stuck in testing due to failed gating\n try:\n update.obsolete_older_updates(dbsession)\n except Exception as e:\n log.error(f'Problem obsoleting older updates: {e}')\n\n alias = update.alias\n\n # This must be run after dbsession is closed so changes are committed to db\n work_on_bugs_task.delay(alias, closing_bugs)\n", "path": "bodhi/server/consumers/automatic_updates.py"}]} |
gh_patches_debug_1498 | rasdani/github-patches | git_diff | falconry__falcon-602 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hoist HTTPStatus into falcon top-level namespace
I.e., add an import line to `falcon/__init__.py`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/__init__.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 HTTP_METHODS = (
16 'CONNECT',
17 'DELETE',
18 'GET',
19 'HEAD',
20 'OPTIONS',
21 'PATCH',
22 'POST',
23 'PUT',
24 'TRACE',
25 )
26
27 DEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'
28
29
30 # Hoist classes and functions into the falcon namespace
31 from falcon.version import __version__ # NOQA
32 from falcon.api import API, DEFAULT_MEDIA_TYPE # NOQA
33 from falcon.status_codes import * # NOQA
34 from falcon.errors import * # NOQA
35 from falcon.redirects import * # NOQA
36 from falcon.http_error import HTTPError # NOQA
37 from falcon.util import * # NOQA
38 from falcon.hooks import before, after # NOQA
39 from falcon.request import Request, RequestOptions # NOQA
40 from falcon.response import Response # NOQA
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/__init__.py b/falcon/__init__.py
--- a/falcon/__init__.py
+++ b/falcon/__init__.py
@@ -34,6 +34,7 @@
from falcon.errors import * # NOQA
from falcon.redirects import * # NOQA
from falcon.http_error import HTTPError # NOQA
+from falcon.http_status import HTTPStatus # NOQA
from falcon.util import * # NOQA
from falcon.hooks import before, after # NOQA
from falcon.request import Request, RequestOptions # NOQA
| {"golden_diff": "diff --git a/falcon/__init__.py b/falcon/__init__.py\n--- a/falcon/__init__.py\n+++ b/falcon/__init__.py\n@@ -34,6 +34,7 @@\n from falcon.errors import * # NOQA\n from falcon.redirects import * # NOQA\n from falcon.http_error import HTTPError # NOQA\n+from falcon.http_status import HTTPStatus # NOQA\n from falcon.util import * # NOQA\n from falcon.hooks import before, after # NOQA\n from falcon.request import Request, RequestOptions # NOQA\n", "issue": "Hoist HTTPStatus into falcon top-level namespace\nI.e., add an import line to `falcon/__init__.py`\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nHTTP_METHODS = (\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n)\n\nDEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'\n\n\n# Hoist classes and functions into the falcon namespace\nfrom falcon.version import __version__ # NOQA\nfrom falcon.api import API, DEFAULT_MEDIA_TYPE # NOQA\nfrom falcon.status_codes import * # NOQA\nfrom falcon.errors import * # NOQA\nfrom falcon.redirects import * # NOQA\nfrom falcon.http_error import HTTPError # NOQA\nfrom falcon.util import * # NOQA\nfrom falcon.hooks import before, after # NOQA\nfrom falcon.request import Request, RequestOptions # NOQA\nfrom falcon.response import Response # NOQA\n", "path": "falcon/__init__.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nHTTP_METHODS = (\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n)\n\nDEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'\n\n\n# Hoist classes and functions into the falcon namespace\nfrom falcon.version import __version__ # NOQA\nfrom falcon.api import API, DEFAULT_MEDIA_TYPE # NOQA\nfrom falcon.status_codes import * # NOQA\nfrom falcon.errors import * # NOQA\nfrom falcon.redirects import * # NOQA\nfrom falcon.http_error import HTTPError # NOQA\nfrom falcon.http_status import HTTPStatus # NOQA\nfrom falcon.util import * # NOQA\nfrom falcon.hooks import before, after # NOQA\nfrom falcon.request import Request, RequestOptions # NOQA\nfrom falcon.response import Response # NOQA\n", "path": "falcon/__init__.py"}]} |
gh_patches_debug_1499 | rasdani/github-patches | git_diff | litestar-org__litestar-2244 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/cli/main.py`
Content:
```
1 from __future__ import annotations
2
3 import sys
4 from pathlib import Path
5 from typing import TYPE_CHECKING
6
7 from ._utils import RICH_CLICK_INSTALLED, LitestarEnv, LitestarExtensionGroup
8 from .commands import core, schema, sessions
9
10 if TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover
11 import click
12 from click import Context, group, option, pass_context
13 from click import Path as ClickPath
14 else:
15 import rich_click as click
16 from rich_click import Context, group, option, pass_context
17 from rich_click import Path as ClickPath
18 from rich_click.cli import patch as rich_click_patch
19
20 rich_click_patch()
21 click.rich_click.USE_RICH_MARKUP = True
22 click.rich_click.USE_MARKDOWN = False
23 click.rich_click.SHOW_ARGUMENTS = True
24 click.rich_click.GROUP_ARGUMENTS_OPTIONS = True
25 click.rich_click.SHOW_ARGUMENTS = True
26 click.rich_click.GROUP_ARGUMENTS_OPTIONS = True
27 click.rich_click.STYLE_ERRORS_SUGGESTION = "magenta italic"
28 click.rich_click.ERRORS_SUGGESTION = ""
29 click.rich_click.ERRORS_EPILOGUE = ""
30 click.rich_click.MAX_WIDTH = 100
31 click.rich_click.SHOW_METAVARS_COLUMN = True
32 click.rich_click.APPEND_METAVARS_HELP = True
33
34
35 __all__ = ("litestar_group",)
36
37
38 @group(cls=LitestarExtensionGroup, context_settings={"help_option_names": ["-h", "--help"]})
39 @option("--app", "app_path", help="Module path to a Litestar application")
40 @option(
41 "--app-dir",
42 help="Look for APP in the specified directory, by adding this to the PYTHONPATH. Defaults to the current working directory.",
43 default=None,
44 type=ClickPath(dir_okay=True, file_okay=False, path_type=Path),
45 show_default=False,
46 )
47 @pass_context
48 def litestar_group(ctx: Context, app_path: str | None, app_dir: Path | None = None) -> None:
49 """Litestar CLI."""
50 sys.path.append(str(app_dir))
51
52 if ctx.obj is None: # env has not been loaded yet, so we can lazy load it
53 ctx.obj = lambda: LitestarEnv.from_env(app_path)
54
55
56 # add sub commands here
57
58 litestar_group.add_command(core.info_command)
59 litestar_group.add_command(core.run_command)
60 litestar_group.add_command(core.routes_command)
61 litestar_group.add_command(core.version_command)
62 litestar_group.add_command(sessions.sessions_group)
63 litestar_group.add_command(schema.schema_group)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/cli/main.py b/litestar/cli/main.py
--- a/litestar/cli/main.py
+++ b/litestar/cli/main.py
@@ -27,7 +27,7 @@
click.rich_click.STYLE_ERRORS_SUGGESTION = "magenta italic"
click.rich_click.ERRORS_SUGGESTION = ""
click.rich_click.ERRORS_EPILOGUE = ""
- click.rich_click.MAX_WIDTH = 100
+ click.rich_click.MAX_WIDTH = 80
click.rich_click.SHOW_METAVARS_COLUMN = True
click.rich_click.APPEND_METAVARS_HELP = True
| {"golden_diff": "diff --git a/litestar/cli/main.py b/litestar/cli/main.py\n--- a/litestar/cli/main.py\n+++ b/litestar/cli/main.py\n@@ -27,7 +27,7 @@\n click.rich_click.STYLE_ERRORS_SUGGESTION = \"magenta italic\"\n click.rich_click.ERRORS_SUGGESTION = \"\"\n click.rich_click.ERRORS_EPILOGUE = \"\"\n- click.rich_click.MAX_WIDTH = 100\n+ click.rich_click.MAX_WIDTH = 80\n click.rich_click.SHOW_METAVARS_COLUMN = True\n click.rich_click.APPEND_METAVARS_HELP = True\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom ._utils import RICH_CLICK_INSTALLED, LitestarEnv, LitestarExtensionGroup\nfrom .commands import core, schema, sessions\n\nif TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover\n import click\n from click import Context, group, option, pass_context\n from click import Path as ClickPath\nelse:\n import rich_click as click\n from rich_click import Context, group, option, pass_context\n from rich_click import Path as ClickPath\n from rich_click.cli import patch as rich_click_patch\n\n rich_click_patch()\n click.rich_click.USE_RICH_MARKUP = True\n click.rich_click.USE_MARKDOWN = False\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.STYLE_ERRORS_SUGGESTION = \"magenta italic\"\n click.rich_click.ERRORS_SUGGESTION = \"\"\n click.rich_click.ERRORS_EPILOGUE = \"\"\n click.rich_click.MAX_WIDTH = 100\n click.rich_click.SHOW_METAVARS_COLUMN = True\n click.rich_click.APPEND_METAVARS_HELP = True\n\n\n__all__ = (\"litestar_group\",)\n\n\n@group(cls=LitestarExtensionGroup, context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\n@option(\"--app\", \"app_path\", help=\"Module path to a Litestar application\")\n@option(\n \"--app-dir\",\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH. Defaults to the current working directory.\",\n default=None,\n type=ClickPath(dir_okay=True, file_okay=False, path_type=Path),\n show_default=False,\n)\n@pass_context\ndef litestar_group(ctx: Context, app_path: str | None, app_dir: Path | None = None) -> None:\n \"\"\"Litestar CLI.\"\"\"\n sys.path.append(str(app_dir))\n\n if ctx.obj is None: # env has not been loaded yet, so we can lazy load it\n ctx.obj = lambda: LitestarEnv.from_env(app_path)\n\n\n# add sub commands here\n\nlitestar_group.add_command(core.info_command)\nlitestar_group.add_command(core.run_command)\nlitestar_group.add_command(core.routes_command)\nlitestar_group.add_command(core.version_command)\nlitestar_group.add_command(sessions.sessions_group)\nlitestar_group.add_command(schema.schema_group)\n", "path": "litestar/cli/main.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom ._utils import RICH_CLICK_INSTALLED, LitestarEnv, LitestarExtensionGroup\nfrom .commands import core, schema, sessions\n\nif TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover\n import click\n from click import Context, group, option, pass_context\n from click import Path as ClickPath\nelse:\n import rich_click as click\n from rich_click import Context, group, option, pass_context\n from rich_click import Path as ClickPath\n from rich_click.cli import patch as rich_click_patch\n\n rich_click_patch()\n click.rich_click.USE_RICH_MARKUP = True\n click.rich_click.USE_MARKDOWN = False\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.STYLE_ERRORS_SUGGESTION = \"magenta italic\"\n click.rich_click.ERRORS_SUGGESTION = \"\"\n click.rich_click.ERRORS_EPILOGUE = \"\"\n click.rich_click.MAX_WIDTH = 80\n click.rich_click.SHOW_METAVARS_COLUMN = True\n click.rich_click.APPEND_METAVARS_HELP = True\n\n\n__all__ = (\"litestar_group\",)\n\n\n@group(cls=LitestarExtensionGroup, context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\n@option(\"--app\", \"app_path\", help=\"Module path to a Litestar application\")\n@option(\n \"--app-dir\",\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH. Defaults to the current working directory.\",\n default=None,\n type=ClickPath(dir_okay=True, file_okay=False, path_type=Path),\n show_default=False,\n)\n@pass_context\ndef litestar_group(ctx: Context, app_path: str | None, app_dir: Path | None = None) -> None:\n \"\"\"Litestar CLI.\"\"\"\n sys.path.append(str(app_dir))\n\n if ctx.obj is None: # env has not been loaded yet, so we can lazy load it\n ctx.obj = lambda: LitestarEnv.from_env(app_path)\n\n\n# add sub commands here\n\nlitestar_group.add_command(core.info_command)\nlitestar_group.add_command(core.run_command)\nlitestar_group.add_command(core.routes_command)\nlitestar_group.add_command(core.version_command)\nlitestar_group.add_command(sessions.sessions_group)\nlitestar_group.add_command(schema.schema_group)\n", "path": "litestar/cli/main.py"}]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.