problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_23491 | rasdani/github-patches | git_diff | ESMCI__cime-1396 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build pFUnit on some other machines
We should build pFUnit on some other machines so we can run unit tests there:
- cheyenne
- hobart
Instructions for building pFUnit are:
1. Download pFUnit from
http://sourceforge.net/projects/pfunit/files/latest/download
2. Set the PFUNIT environment variable. For example::
```
export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP
```
3. Build pFUnit:
```
mkdir build
cd build
cmake -DMPI=YES -DOPENMP=YES ..
make -j 4
```
4. Run pFUnit's own unit tests:
```
make tests
```
Note: this didn't work on yellowstone. Instead, I had to do it by hand:
```
DAV_CORES=4 execca
mpirun.lsf tests/tests.x
```
5. Install pFUnit on your system:
```
make install INSTALL_DIR=$PFUNIT
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/BuildTools/cmakemacroswriter.py`
Content:
```
1 """Classes used to write build system files.
2
3 The classes here are used to write out settings for use by Makefile and CMake
4 build systems. The two relevant classes are CMakeMacroWriter and
5 MakeMacroWriter, which encapsulate the information necessary to write CMake and
6 Makefile formatted text, respectively. See the docstrings for those classes for
7 more.
8 """
9
10 # This is not the most useful check.
11 # pylint: disable=invalid-name
12
13 from CIME.BuildTools.macrowriterbase import MacroWriterBase
14 from CIME.XML.standard_module_setup import *
15 logger = logging.getLogger(__name__)
16
17
18 class CMakeMacroWriter(MacroWriterBase):
19
20 """Macro writer for the CMake format.
21
22 For details on the provided methods, see MacroWriterBase, which this
23 class inherits from.
24 """
25
26 def __init__(self, output):
27 """Initialize a CMake macro writer.
28
29 Arguments:
30 output - File-like object (probably an io.TextIOWrapper), which
31 will be written to.
32 """
33 super(CMakeMacroWriter, self).__init__(output)
34 # This counter is for avoiding name conflicts in temporary
35 # variables used for shell commands.
36 self._var_num = 0
37
38 def environment_variable_string(self, name):
39 """Return an environment variable reference.
40
41 >>> import io
42 >>> s = io.StringIO()
43 >>> CMakeMacroWriter(s).environment_variable_string("foo")
44 '$ENV{foo}'
45 """
46 return "$ENV{" + name + "}"
47
48 def shell_command_strings(self, command):
49 # pylint: disable=line-too-long
50 """Return strings used to get the output of a shell command.
51
52 >>> import io
53 >>> s = io.StringIO()
54 >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings("echo bar")
55 >>> set_up
56 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)'
57 >>> inline
58 '${CIME_TEMP_SHELL0}'
59 >>> tear_down
60 'unset(CIME_TEMP_SHELL0)'
61 """
62 # pylint: enable=line-too-long
63 # Create a unique variable name, then increment variable number
64 # counter so that we get a different value next time.
65 var_name = "CIME_TEMP_SHELL" + str(self._var_num)
66 self._var_num += 1
67 set_up = "execute_process(COMMAND " + command + \
68 " OUTPUT_VARIABLE " + var_name + \
69 " OUTPUT_STRIP_TRAILING_WHITESPACE)"
70 tear_down = "unset(" + var_name + ")"
71 return (set_up, "${" + var_name + "}", tear_down)
72
73 def variable_string(self, name):
74 """Return a string to refer to a variable with the given name.
75
76 >>> import io
77 >>> s = io.StringIO()
78 >>> CMakeMacroWriter(s).variable_string("foo")
79 '${foo}'
80 """
81 return "${" + name + "}"
82
83 def set_variable(self, name, value):
84 """Write out a statement setting a variable to some value.
85
86 >>> import io
87 >>> s = io.StringIO()
88 >>> CMakeMacroWriter(s).set_variable("foo", "bar")
89 >>> s.getvalue()
90 u'set(foo "bar")\\n'
91 """
92 self.write_line("set(" + name + ' "' + value + '")')
93
94 def start_ifeq(self, left, right):
95 """Write out a statement to start a conditional block.
96
97 >>> import io
98 >>> s = io.StringIO()
99 >>> CMakeMacroWriter(s).start_ifeq("foo", "bar")
100 >>> s.getvalue()
101 u'if("foo" STREQUAL "bar")\\n'
102 """
103 self.write_line('if("' + left + '" STREQUAL "' + right + '")')
104 self.indent_right()
105
106 def end_ifeq(self):
107 """Write out a statement to end a block started with start_ifeq.
108
109 >>> import io
110 >>> s = io.StringIO()
111 >>> writer = CMakeMacroWriter(s)
112 >>> writer.start_ifeq("foo", "bar")
113 >>> writer.set_variable("foo2", "bar2")
114 >>> writer.end_ifeq()
115 >>> s.getvalue()
116 u'if("foo" STREQUAL "bar")\\n set(foo2 "bar2")\\nendif()\\n'
117 """
118 self.indent_left()
119 self.write_line("endif()")
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py
--- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py
+++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py
@@ -89,7 +89,8 @@
>>> s.getvalue()
u'set(foo "bar")\\n'
"""
- self.write_line("set(" + name + ' "' + value + '")')
+ value_transformed = self._transform_value(value)
+ self.write_line("set(" + name + ' "' + value_transformed + '")')
def start_ifeq(self, left, right):
"""Write out a statement to start a conditional block.
@@ -117,3 +118,22 @@
"""
self.indent_left()
self.write_line("endif()")
+
+ def _transform_value(self, value):
+ """Some elements need their values transformed in some way for CMake to handle them properly.
+ This method does those transformations.
+
+ Args:
+ - value (str): value of element
+
+ Returns transformed value
+ """
+
+ # Not all variables need leading & trailing whitespace removed, but some
+ # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC,
+ # SCXX, SFC) are only handled correctly if leading & trailing whitespace
+ # is removed. It doesn't seem to hurt to remove whitespace from all
+ # variables.
+ value_transformed = value.strip()
+
+ return value_transformed
| {"golden_diff": "diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n--- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n+++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n@@ -89,7 +89,8 @@\n >>> s.getvalue()\n u'set(foo \"bar\")\\\\n'\n \"\"\"\n- self.write_line(\"set(\" + name + ' \"' + value + '\")')\n+ value_transformed = self._transform_value(value)\n+ self.write_line(\"set(\" + name + ' \"' + value_transformed + '\")')\n \n def start_ifeq(self, left, right):\n \"\"\"Write out a statement to start a conditional block.\n@@ -117,3 +118,22 @@\n \"\"\"\n self.indent_left()\n self.write_line(\"endif()\")\n+\n+ def _transform_value(self, value):\n+ \"\"\"Some elements need their values transformed in some way for CMake to handle them properly.\n+ This method does those transformations.\n+\n+ Args:\n+ - value (str): value of element\n+\n+ Returns transformed value\n+ \"\"\"\n+\n+ # Not all variables need leading & trailing whitespace removed, but some\n+ # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC,\n+ # SCXX, SFC) are only handled correctly if leading & trailing whitespace\n+ # is removed. It doesn't seem to hurt to remove whitespace from all\n+ # variables.\n+ value_transformed = value.strip()\n+\n+ return value_transformed\n", "issue": "Build pFUnit on some other machines\nWe should build pFUnit on some other machines so we can run unit tests there:\r\n\r\n- cheyenne\r\n- hobart\r\n\r\nInstructions for building pFUnit are:\r\n\r\n1. Download pFUnit from\r\n http://sourceforge.net/projects/pfunit/files/latest/download\r\n\r\n2. Set the PFUNIT environment variable. For example::\r\n\r\n ```\r\n export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP\r\n ```\r\n\r\n3. Build pFUnit:\r\n\r\n```\r\n mkdir build\r\n cd build\r\n cmake -DMPI=YES -DOPENMP=YES ..\r\n make -j 4\r\n```\r\n\r\n4. Run pFUnit's own unit tests:\r\n\r\n```\r\n make tests\r\n```\r\n\r\n Note: this didn't work on yellowstone. Instead, I had to do it by hand:\r\n\r\n```\r\n DAV_CORES=4 execca\r\n mpirun.lsf tests/tests.x\r\n```\r\n\r\n5. Install pFUnit on your system:\r\n\r\n```\r\n make install INSTALL_DIR=$PFUNIT\r\n```\n", "before_files": [{"content": "\"\"\"Classes used to write build system files.\n\nThe classes here are used to write out settings for use by Makefile and CMake\nbuild systems. The two relevant classes are CMakeMacroWriter and\nMakeMacroWriter, which encapsulate the information necessary to write CMake and\nMakefile formatted text, respectively. See the docstrings for those classes for\nmore.\n\"\"\"\n\n# This is not the most useful check.\n# pylint: disable=invalid-name\n\nfrom CIME.BuildTools.macrowriterbase import MacroWriterBase\nfrom CIME.XML.standard_module_setup import *\nlogger = logging.getLogger(__name__)\n\n\nclass CMakeMacroWriter(MacroWriterBase):\n\n \"\"\"Macro writer for the CMake format.\n\n For details on the provided methods, see MacroWriterBase, which this\n class inherits from.\n \"\"\"\n\n def __init__(self, output):\n \"\"\"Initialize a CMake macro writer.\n\n Arguments:\n output - File-like object (probably an io.TextIOWrapper), which\n will be written to.\n \"\"\"\n super(CMakeMacroWriter, self).__init__(output)\n # This counter is for avoiding name conflicts in temporary\n # variables used for shell commands.\n self._var_num = 0\n\n def environment_variable_string(self, name):\n \"\"\"Return an environment variable reference.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).environment_variable_string(\"foo\")\n '$ENV{foo}'\n \"\"\"\n return \"$ENV{\" + name + \"}\"\n\n def shell_command_strings(self, command):\n # pylint: disable=line-too-long\n \"\"\"Return strings used to get the output of a shell command.\n\n >>> import io\n >>> s = io.StringIO()\n >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings(\"echo bar\")\n >>> set_up\n 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)'\n >>> inline\n '${CIME_TEMP_SHELL0}'\n >>> tear_down\n 'unset(CIME_TEMP_SHELL0)'\n \"\"\"\n # pylint: enable=line-too-long\n # Create a unique variable name, then increment variable number\n # counter so that we get a different value next time.\n var_name = \"CIME_TEMP_SHELL\" + str(self._var_num)\n self._var_num += 1\n set_up = \"execute_process(COMMAND \" + command + \\\n \" OUTPUT_VARIABLE \" + var_name + \\\n \" OUTPUT_STRIP_TRAILING_WHITESPACE)\"\n tear_down = \"unset(\" + var_name + \")\"\n return (set_up, \"${\" + var_name + \"}\", tear_down)\n\n def variable_string(self, name):\n \"\"\"Return a string to refer to a variable with the given name.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).variable_string(\"foo\")\n '${foo}'\n \"\"\"\n return \"${\" + name + \"}\"\n\n def set_variable(self, name, value):\n \"\"\"Write out a statement setting a variable to some value.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).set_variable(\"foo\", \"bar\")\n >>> s.getvalue()\n u'set(foo \"bar\")\\\\n'\n \"\"\"\n self.write_line(\"set(\" + name + ' \"' + value + '\")')\n\n def start_ifeq(self, left, right):\n \"\"\"Write out a statement to start a conditional block.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).start_ifeq(\"foo\", \"bar\")\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n'\n \"\"\"\n self.write_line('if(\"' + left + '\" STREQUAL \"' + right + '\")')\n self.indent_right()\n\n def end_ifeq(self):\n \"\"\"Write out a statement to end a block started with start_ifeq.\n\n >>> import io\n >>> s = io.StringIO()\n >>> writer = CMakeMacroWriter(s)\n >>> writer.start_ifeq(\"foo\", \"bar\")\n >>> writer.set_variable(\"foo2\", \"bar2\")\n >>> writer.end_ifeq()\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n set(foo2 \"bar2\")\\\\nendif()\\\\n'\n \"\"\"\n self.indent_left()\n self.write_line(\"endif()\")\n", "path": "scripts/lib/CIME/BuildTools/cmakemacroswriter.py"}], "after_files": [{"content": "\"\"\"Classes used to write build system files.\n\nThe classes here are used to write out settings for use by Makefile and CMake\nbuild systems. The two relevant classes are CMakeMacroWriter and\nMakeMacroWriter, which encapsulate the information necessary to write CMake and\nMakefile formatted text, respectively. See the docstrings for those classes for\nmore.\n\"\"\"\n\n# This is not the most useful check.\n# pylint: disable=invalid-name\n\nfrom CIME.BuildTools.macrowriterbase import MacroWriterBase\nfrom CIME.XML.standard_module_setup import *\nlogger = logging.getLogger(__name__)\n\n\nclass CMakeMacroWriter(MacroWriterBase):\n\n \"\"\"Macro writer for the CMake format.\n\n For details on the provided methods, see MacroWriterBase, which this\n class inherits from.\n \"\"\"\n\n def __init__(self, output):\n \"\"\"Initialize a CMake macro writer.\n\n Arguments:\n output - File-like object (probably an io.TextIOWrapper), which\n will be written to.\n \"\"\"\n super(CMakeMacroWriter, self).__init__(output)\n # This counter is for avoiding name conflicts in temporary\n # variables used for shell commands.\n self._var_num = 0\n\n def environment_variable_string(self, name):\n \"\"\"Return an environment variable reference.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).environment_variable_string(\"foo\")\n '$ENV{foo}'\n \"\"\"\n return \"$ENV{\" + name + \"}\"\n\n def shell_command_strings(self, command):\n # pylint: disable=line-too-long\n \"\"\"Return strings used to get the output of a shell command.\n\n >>> import io\n >>> s = io.StringIO()\n >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings(\"echo bar\")\n >>> set_up\n 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)'\n >>> inline\n '${CIME_TEMP_SHELL0}'\n >>> tear_down\n 'unset(CIME_TEMP_SHELL0)'\n \"\"\"\n # pylint: enable=line-too-long\n # Create a unique variable name, then increment variable number\n # counter so that we get a different value next time.\n var_name = \"CIME_TEMP_SHELL\" + str(self._var_num)\n self._var_num += 1\n set_up = \"execute_process(COMMAND \" + command + \\\n \" OUTPUT_VARIABLE \" + var_name + \\\n \" OUTPUT_STRIP_TRAILING_WHITESPACE)\"\n tear_down = \"unset(\" + var_name + \")\"\n return (set_up, \"${\" + var_name + \"}\", tear_down)\n\n def variable_string(self, name):\n \"\"\"Return a string to refer to a variable with the given name.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).variable_string(\"foo\")\n '${foo}'\n \"\"\"\n return \"${\" + name + \"}\"\n\n def set_variable(self, name, value):\n \"\"\"Write out a statement setting a variable to some value.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).set_variable(\"foo\", \"bar\")\n >>> s.getvalue()\n u'set(foo \"bar\")\\\\n'\n \"\"\"\n value_transformed = self._transform_value(value)\n self.write_line(\"set(\" + name + ' \"' + value_transformed + '\")')\n\n def start_ifeq(self, left, right):\n \"\"\"Write out a statement to start a conditional block.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).start_ifeq(\"foo\", \"bar\")\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n'\n \"\"\"\n self.write_line('if(\"' + left + '\" STREQUAL \"' + right + '\")')\n self.indent_right()\n\n def end_ifeq(self):\n \"\"\"Write out a statement to end a block started with start_ifeq.\n\n >>> import io\n >>> s = io.StringIO()\n >>> writer = CMakeMacroWriter(s)\n >>> writer.start_ifeq(\"foo\", \"bar\")\n >>> writer.set_variable(\"foo2\", \"bar2\")\n >>> writer.end_ifeq()\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n set(foo2 \"bar2\")\\\\nendif()\\\\n'\n \"\"\"\n self.indent_left()\n self.write_line(\"endif()\")\n\n def _transform_value(self, value):\n \"\"\"Some elements need their values transformed in some way for CMake to handle them properly.\n This method does those transformations.\n\n Args:\n - value (str): value of element\n\n Returns transformed value\n \"\"\"\n\n # Not all variables need leading & trailing whitespace removed, but some\n # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC,\n # SCXX, SFC) are only handled correctly if leading & trailing whitespace\n # is removed. It doesn't seem to hurt to remove whitespace from all\n # variables.\n value_transformed = value.strip()\n\n return value_transformed\n", "path": "scripts/lib/CIME/BuildTools/cmakemacroswriter.py"}]} | 1,748 | 368 |
gh_patches_debug_31153 | rasdani/github-patches | git_diff | cupy__cupy-6121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`copyto` does not test shape for memcpy cases
### Description
There should be a shape check before these:
https://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L54-L55
https://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L76-L80
### To Reproduce
```py
>>> dst = cupy.ones((2, 3), dtype=int)
>>> cupy.copyto(dst, cupy.arange(6))
>>> dst
array([[0, 1, 2],
[3, 4, 5]])
>>> numpy.copyto(numpy.ones((2, 3), dtype=int), numpy.arange(6))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in copyto
ValueError: could not broadcast input array from shape (6,) into shape (2,3)
```
```py
>>> cupy.copyto(cupy.empty((3, 0, 4, 5)), cupy.empty((6, 7, 0)))
>>> numpy.copyto(numpy.empty((3, 0, 4, 5)), numpy.empty((6, 7, 0)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in copyto
ValueError: could not broadcast input array from shape (6,7,0) into shape (3,0,4,5)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/_manipulation/basic.py`
Content:
```
1 import numpy
2
3 from cupy import _core
4 from cupy._core import _fusion_interface
5 from cupy._core import fusion
6 from cupy._sorting import search
7 from cupy_backends.cuda.api import runtime
8
9
10 def copyto(dst, src, casting='same_kind', where=None):
11 """Copies values from one array to another with broadcasting.
12
13 This function can be called for arrays on different devices. In this case,
14 casting, ``where``, and broadcasting is not supported, and an exception is
15 raised if these are used.
16
17 Args:
18 dst (cupy.ndarray): Target array.
19 src (cupy.ndarray): Source array.
20 casting (str): Casting rule. See :func:`numpy.can_cast` for detail.
21 where (cupy.ndarray of bool): If specified, this array acts as a mask,
22 and an element is copied only if the corresponding element of
23 ``where`` is True.
24
25 .. seealso:: :func:`numpy.copyto`
26
27 """
28
29 src_type = type(src)
30 src_is_python_scalar = src_type in (
31 int, bool, float, complex,
32 fusion._FusionVarScalar, _fusion_interface._ScalarProxy)
33 if src_is_python_scalar:
34 src_dtype = numpy.dtype(type(src))
35 can_cast = numpy.can_cast(src, dst.dtype, casting)
36 else:
37 src_dtype = src.dtype
38 can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)
39
40 if not can_cast:
41 raise TypeError('Cannot cast %s to %s in %s casting mode' %
42 (src_dtype, dst.dtype, casting))
43
44 if not src_is_python_scalar and src.ndim > dst.ndim:
45 # NumPy allows stripping leading unit dimensions.
46 try:
47 src = src.squeeze(tuple(range(src.ndim - dst.ndim)))
48 except ValueError:
49 # "cannot select an axis to squeeze out
50 # which has size not equal to one"
51 pass # raise an error later
52
53 if fusion._is_fusing():
54 if where is None:
55 _core.elementwise_copy(src, dst)
56 else:
57 fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)
58 return
59
60 if where is not None:
61 _core.elementwise_copy(src, dst, _where=where)
62 return
63
64 if dst.size == 0:
65 return
66
67 if src_is_python_scalar:
68 dst.fill(src)
69 return
70
71 if _can_memcpy(dst, src):
72 dst.data.copy_from_async(src.data, src.nbytes)
73 return
74
75 device = dst.device
76 prev_device = runtime.getDevice()
77 try:
78 runtime.setDevice(device.id)
79 if src.device != device:
80 src = src.copy()
81 _core.elementwise_copy(src, dst)
82 finally:
83 runtime.setDevice(prev_device)
84
85
86 def _can_memcpy(dst, src):
87 c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous
88 f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous
89 return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \
90 dst.size == src.size
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py
--- a/cupy/_manipulation/basic.py
+++ b/cupy/_manipulation/basic.py
@@ -1,3 +1,5 @@
+import itertools
+
import numpy
from cupy import _core
@@ -41,22 +43,35 @@
raise TypeError('Cannot cast %s to %s in %s casting mode' %
(src_dtype, dst.dtype, casting))
- if not src_is_python_scalar and src.ndim > dst.ndim:
- # NumPy allows stripping leading unit dimensions.
- try:
- src = src.squeeze(tuple(range(src.ndim - dst.ndim)))
- except ValueError:
- # "cannot select an axis to squeeze out
- # which has size not equal to one"
- pass # raise an error later
-
if fusion._is_fusing():
+ # TODO(kataoka): NumPy allows stripping leading unit dimensions.
+ # But fusion array proxy does not currently support
+ # `shape` and `squeeze`.
+
if where is None:
_core.elementwise_copy(src, dst)
else:
fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)
return
+ if not src_is_python_scalar:
+ # Check broadcast condition
+ # - for fast-paths and
+ # - for a better error message (than ufunc's).
+ # NumPy allows stripping leading unit dimensions.
+ if not all([
+ s in (d, 1)
+ for s, d in itertools.zip_longest(
+ reversed(src.shape), reversed(dst.shape), fillvalue=1)
+ ]):
+ raise ValueError(
+ "could not broadcast input array "
+ f"from shape {src.shape} into shape {dst.shape}")
+ squeeze_ndim = src.ndim - dst.ndim
+ if squeeze_ndim > 0:
+ # always succeeds because broadcast conition is checked.
+ src = src.squeeze(tuple(range(squeeze_ndim)))
+
if where is not None:
_core.elementwise_copy(src, dst, _where=where)
return
| {"golden_diff": "diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py\n--- a/cupy/_manipulation/basic.py\n+++ b/cupy/_manipulation/basic.py\n@@ -1,3 +1,5 @@\n+import itertools\n+\n import numpy\n \n from cupy import _core\n@@ -41,22 +43,35 @@\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n \n- if not src_is_python_scalar and src.ndim > dst.ndim:\n- # NumPy allows stripping leading unit dimensions.\n- try:\n- src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n- except ValueError:\n- # \"cannot select an axis to squeeze out\n- # which has size not equal to one\"\n- pass # raise an error later\n-\n if fusion._is_fusing():\n+ # TODO(kataoka): NumPy allows stripping leading unit dimensions.\n+ # But fusion array proxy does not currently support\n+ # `shape` and `squeeze`.\n+\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n \n+ if not src_is_python_scalar:\n+ # Check broadcast condition\n+ # - for fast-paths and\n+ # - for a better error message (than ufunc's).\n+ # NumPy allows stripping leading unit dimensions.\n+ if not all([\n+ s in (d, 1)\n+ for s, d in itertools.zip_longest(\n+ reversed(src.shape), reversed(dst.shape), fillvalue=1)\n+ ]):\n+ raise ValueError(\n+ \"could not broadcast input array \"\n+ f\"from shape {src.shape} into shape {dst.shape}\")\n+ squeeze_ndim = src.ndim - dst.ndim\n+ if squeeze_ndim > 0:\n+ # always succeeds because broadcast conition is checked.\n+ src = src.squeeze(tuple(range(squeeze_ndim)))\n+\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n", "issue": "`copyto` does not test shape for memcpy cases\n### Description\r\n\r\nThere should be a shape check before these:\r\nhttps://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L54-L55\r\nhttps://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L76-L80\r\n\r\n### To Reproduce\r\n\r\n```py\r\n>>> dst = cupy.ones((2, 3), dtype=int)\r\n>>> cupy.copyto(dst, cupy.arange(6))\r\n>>> dst\r\narray([[0, 1, 2],\r\n [3, 4, 5]])\r\n>>> numpy.copyto(numpy.ones((2, 3), dtype=int), numpy.arange(6))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"<__array_function__ internals>\", line 5, in copyto\r\nValueError: could not broadcast input array from shape (6,) into shape (2,3)\r\n```\r\n\r\n```py\r\n>>> cupy.copyto(cupy.empty((3, 0, 4, 5)), cupy.empty((6, 7, 0)))\r\n>>> numpy.copyto(numpy.empty((3, 0, 4, 5)), numpy.empty((6, 7, 0)))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"<__array_function__ internals>\", line 5, in copyto\r\nValueError: could not broadcast input array from shape (6,7,0) into shape (3,0,4,5)\r\n```\r\n\n", "before_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import _fusion_interface\nfrom cupy._core import fusion\nfrom cupy._sorting import search\nfrom cupy_backends.cuda.api import runtime\n\n\ndef copyto(dst, src, casting='same_kind', where=None):\n \"\"\"Copies values from one array to another with broadcasting.\n\n This function can be called for arrays on different devices. In this case,\n casting, ``where``, and broadcasting is not supported, and an exception is\n raised if these are used.\n\n Args:\n dst (cupy.ndarray): Target array.\n src (cupy.ndarray): Source array.\n casting (str): Casting rule. See :func:`numpy.can_cast` for detail.\n where (cupy.ndarray of bool): If specified, this array acts as a mask,\n and an element is copied only if the corresponding element of\n ``where`` is True.\n\n .. seealso:: :func:`numpy.copyto`\n\n \"\"\"\n\n src_type = type(src)\n src_is_python_scalar = src_type in (\n int, bool, float, complex,\n fusion._FusionVarScalar, _fusion_interface._ScalarProxy)\n if src_is_python_scalar:\n src_dtype = numpy.dtype(type(src))\n can_cast = numpy.can_cast(src, dst.dtype, casting)\n else:\n src_dtype = src.dtype\n can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)\n\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n\n if not src_is_python_scalar and src.ndim > dst.ndim:\n # NumPy allows stripping leading unit dimensions.\n try:\n src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n except ValueError:\n # \"cannot select an axis to squeeze out\n # which has size not equal to one\"\n pass # raise an error later\n\n if fusion._is_fusing():\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n\n if dst.size == 0:\n return\n\n if src_is_python_scalar:\n dst.fill(src)\n return\n\n if _can_memcpy(dst, src):\n dst.data.copy_from_async(src.data, src.nbytes)\n return\n\n device = dst.device\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device.id)\n if src.device != device:\n src = src.copy()\n _core.elementwise_copy(src, dst)\n finally:\n runtime.setDevice(prev_device)\n\n\ndef _can_memcpy(dst, src):\n c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous\n f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous\n return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \\\n dst.size == src.size\n", "path": "cupy/_manipulation/basic.py"}], "after_files": [{"content": "import itertools\n\nimport numpy\n\nfrom cupy import _core\nfrom cupy._core import _fusion_interface\nfrom cupy._core import fusion\nfrom cupy._sorting import search\nfrom cupy_backends.cuda.api import runtime\n\n\ndef copyto(dst, src, casting='same_kind', where=None):\n \"\"\"Copies values from one array to another with broadcasting.\n\n This function can be called for arrays on different devices. In this case,\n casting, ``where``, and broadcasting is not supported, and an exception is\n raised if these are used.\n\n Args:\n dst (cupy.ndarray): Target array.\n src (cupy.ndarray): Source array.\n casting (str): Casting rule. See :func:`numpy.can_cast` for detail.\n where (cupy.ndarray of bool): If specified, this array acts as a mask,\n and an element is copied only if the corresponding element of\n ``where`` is True.\n\n .. seealso:: :func:`numpy.copyto`\n\n \"\"\"\n\n src_type = type(src)\n src_is_python_scalar = src_type in (\n int, bool, float, complex,\n fusion._FusionVarScalar, _fusion_interface._ScalarProxy)\n if src_is_python_scalar:\n src_dtype = numpy.dtype(type(src))\n can_cast = numpy.can_cast(src, dst.dtype, casting)\n else:\n src_dtype = src.dtype\n can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)\n\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n\n if fusion._is_fusing():\n # TODO(kataoka): NumPy allows stripping leading unit dimensions.\n # But fusion array proxy does not currently support\n # `shape` and `squeeze`.\n\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n\n if not src_is_python_scalar:\n # Check broadcast condition\n # - for fast-paths and\n # - for a better error message (than ufunc's).\n # NumPy allows stripping leading unit dimensions.\n if not all([\n s in (d, 1)\n for s, d in itertools.zip_longest(\n reversed(src.shape), reversed(dst.shape), fillvalue=1)\n ]):\n raise ValueError(\n \"could not broadcast input array \"\n f\"from shape {src.shape} into shape {dst.shape}\")\n squeeze_ndim = src.ndim - dst.ndim\n if squeeze_ndim > 0:\n # always succeeds because broadcast conition is checked.\n src = src.squeeze(tuple(range(squeeze_ndim)))\n\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n\n if dst.size == 0:\n return\n\n if src_is_python_scalar:\n dst.fill(src)\n return\n\n if _can_memcpy(dst, src):\n dst.data.copy_from_async(src.data, src.nbytes)\n return\n\n device = dst.device\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device.id)\n if src.device != device:\n src = src.copy()\n _core.elementwise_copy(src, dst)\n finally:\n runtime.setDevice(prev_device)\n\n\ndef _can_memcpy(dst, src):\n c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous\n f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous\n return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \\\n dst.size == src.size\n", "path": "cupy/_manipulation/basic.py"}]} | 1,538 | 488 |
gh_patches_debug_10098 | rasdani/github-patches | git_diff | liqd__a4-opin-2485 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Community debate module error messages
**expected behaviour:** Error message only shown for document upload if file is incorrect
**behaviour:** Error message is also shown when title is not added
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/communitydebate/views.py`
Content:
```
1 from django.contrib import messages
2 from django.shortcuts import render
3 from django.urls import reverse
4 from django.utils.translation import gettext as _
5 from django.views import generic
6 from rules.contrib.views import PermissionRequiredMixin
7
8 from adhocracy4.filters import views as filter_views
9 from adhocracy4.modules.models import Module
10 from euth.projects import mixins as prj_mixins
11
12 from . import forms
13 from . import models as communitydebate_models
14 from .filters import TopicFilterSet
15
16
17 class TopicListView(prj_mixins.ProjectPhaseMixin,
18 filter_views.FilteredListView):
19 model = communitydebate_models.Topic
20 paginate_by = 15
21 filter_set = TopicFilterSet
22
23 def get_queryset(self):
24 return super().get_queryset().filter(module=self.module)
25
26
27 class TopicDetailView(PermissionRequiredMixin, generic.DetailView):
28 model = communitydebate_models.Topic
29 queryset = \
30 communitydebate_models.Topic.objects\
31 .annotate_positive_rating_count() \
32 .annotate_negative_rating_count()
33 permission_required = 'euth_communitydebate.view_topic'
34
35 def get_context_data(self, **kwargs):
36 context = super().get_context_data()
37 upload_files = communitydebate_models.TopicFileUpload.objects\
38 .filter(topic=self.object)
39 context['upload_files'] = upload_files
40 return context
41
42
43 class TopicCreateView(PermissionRequiredMixin, generic.CreateView):
44 model = communitydebate_models.Topic
45 form_class = forms.TopicForm
46 permission_required = 'euth_communitydebate.propose_topic'
47 template_name = 'euth_communitydebate/topic_form.html'
48
49 @property
50 def raise_exception(self):
51 return self.request.user.is_authenticated
52
53 def dispatch(self, *args, **kwargs):
54 mod_slug = self.kwargs[self.slug_url_kwarg]
55 self.module = Module.objects.get(slug=mod_slug)
56 self.project = self.module.project
57 return super().dispatch(*args, **kwargs)
58
59 def get_permission_object(self, *args, **kwargs):
60 return self.module
61
62 def get_context_data(self, upload_forms=None, **kwargs):
63 context = super().get_context_data(**kwargs)
64 context['project'] = self.project
65 context['mode'] = 'create'
66 if not upload_forms:
67 upload_forms = forms.TopicFileUploadFormset()
68 context['upload_forms'] = upload_forms
69 return context
70
71 def form_valid(self, form):
72 form.instance.creator = self.request.user
73 form.instance.module = self.module
74 return super().form_valid(form)
75
76 def get_form_kwargs(self):
77 kwargs = super().get_form_kwargs()
78 kwargs['module'] = self.module
79 return kwargs
80
81 def post(self, request, *args, **kwargs):
82 self.object = None
83 form = self.get_form()
84 if form.is_valid():
85 topic = form.save(commit=False)
86 upload_forms = forms.TopicFileUploadFormset(request.POST,
87 request.FILES,
88 instance=topic)
89 if upload_forms.is_valid():
90 response = self.form_valid(form)
91 upload_forms.save()
92 messages.add_message(request,
93 messages.SUCCESS,
94 _('Topic '
95 'successfully created'))
96 return response
97
98 upload_forms = forms.TopicFileUploadFormset(request.POST,
99 request.FILES)
100 return render(request, self.template_name,
101 self.get_context_data(upload_forms=upload_forms))
102
103
104 class TopicUpdateView(PermissionRequiredMixin, generic.UpdateView):
105 model = communitydebate_models.Topic
106 form_class = forms.TopicForm
107 permission_required = 'euth_communitydebate.modify_topic'
108 template_name = 'euth_communitydebate/topic_form.html'
109
110 @property
111 def raise_exception(self):
112 return self.request.user.is_authenticated
113
114 def dispatch(self, *args, **kwargs):
115 self.object = self.get_object()
116 return super().dispatch(*args, **kwargs)
117
118 def get_context_data(self, upload_forms=None, **kwargs):
119 context = super().get_context_data(**kwargs)
120 context['project'] = self.object.project
121 context['mode'] = 'update'
122 if not upload_forms:
123 upload_forms = forms.TopicFileUploadFormset(
124 instance=self.get_object())
125 context['upload_forms'] = upload_forms
126 return context
127
128 def get_form_kwargs(self):
129 kwargs = super().get_form_kwargs()
130 kwargs['module'] = kwargs.get('instance').module
131 return kwargs
132
133 def post(self, request, *args, **kwargs):
134 form = self.get_form()
135 upload_forms = forms.TopicFileUploadFormset(request.POST,
136 request.FILES,
137 instance=self.object)
138 if upload_forms.is_valid() and form.is_valid():
139 response = self.form_valid(form)
140 upload_forms.save()
141 messages.add_message(request,
142 messages.SUCCESS,
143 _('Topic successfully '
144 'updated'))
145 else:
146 response = render(request,
147 self.template_name,
148 self.get_context_data(upload_forms=upload_forms))
149 return response
150
151
152 class TopicDeleteView(PermissionRequiredMixin, generic.DeleteView):
153 model = communitydebate_models.Topic
154 success_message = _("Your topic has been deleted")
155 permission_required = 'euth_communitydebate.modify_topic'
156
157 @property
158 def raise_exception(self):
159 return self.request.user.is_authenticated
160
161 def delete(self, request, *args, **kwargs):
162 messages.success(self.request, self.success_message)
163 return super(TopicDeleteView, self).delete(request, *args, **kwargs)
164
165 def get_success_url(self):
166 return reverse('project-detail',
167 kwargs={'slug': self.object.project.slug})
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/communitydebate/views.py b/euth/communitydebate/views.py
--- a/euth/communitydebate/views.py
+++ b/euth/communitydebate/views.py
@@ -95,8 +95,9 @@
'successfully created'))
return response
- upload_forms = forms.TopicFileUploadFormset(request.POST,
- request.FILES)
+ else:
+ upload_forms = forms.TopicFileUploadFormset(request.POST,
+ request.FILES)
return render(request, self.template_name,
self.get_context_data(upload_forms=upload_forms))
| {"golden_diff": "diff --git a/euth/communitydebate/views.py b/euth/communitydebate/views.py\n--- a/euth/communitydebate/views.py\n+++ b/euth/communitydebate/views.py\n@@ -95,8 +95,9 @@\n 'successfully created'))\n return response\n \n- upload_forms = forms.TopicFileUploadFormset(request.POST,\n- request.FILES)\n+ else:\n+ upload_forms = forms.TopicFileUploadFormset(request.POST,\n+ request.FILES)\n return render(request, self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n", "issue": "Community debate module error messages\n**expected behaviour:** Error message only shown for document upload if file is incorrect\r\n**behaviour:** Error message is also shown when title is not added\r\n\r\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.views import generic\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.filters import views as filter_views\nfrom adhocracy4.modules.models import Module\nfrom euth.projects import mixins as prj_mixins\n\nfrom . import forms\nfrom . import models as communitydebate_models\nfrom .filters import TopicFilterSet\n\n\nclass TopicListView(prj_mixins.ProjectPhaseMixin,\n filter_views.FilteredListView):\n model = communitydebate_models.Topic\n paginate_by = 15\n filter_set = TopicFilterSet\n\n def get_queryset(self):\n return super().get_queryset().filter(module=self.module)\n\n\nclass TopicDetailView(PermissionRequiredMixin, generic.DetailView):\n model = communitydebate_models.Topic\n queryset = \\\n communitydebate_models.Topic.objects\\\n .annotate_positive_rating_count() \\\n .annotate_negative_rating_count()\n permission_required = 'euth_communitydebate.view_topic'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n upload_files = communitydebate_models.TopicFileUpload.objects\\\n .filter(topic=self.object)\n context['upload_files'] = upload_files\n return context\n\n\nclass TopicCreateView(PermissionRequiredMixin, generic.CreateView):\n model = communitydebate_models.Topic\n form_class = forms.TopicForm\n permission_required = 'euth_communitydebate.propose_topic'\n template_name = 'euth_communitydebate/topic_form.html'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def dispatch(self, *args, **kwargs):\n mod_slug = self.kwargs[self.slug_url_kwarg]\n self.module = Module.objects.get(slug=mod_slug)\n self.project = self.module.project\n return super().dispatch(*args, **kwargs)\n\n def get_permission_object(self, *args, **kwargs):\n return self.module\n\n def get_context_data(self, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.project\n context['mode'] = 'create'\n if not upload_forms:\n upload_forms = forms.TopicFileUploadFormset()\n context['upload_forms'] = upload_forms\n return context\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.module = self.module\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = self.module\n return kwargs\n\n def post(self, request, *args, **kwargs):\n self.object = None\n form = self.get_form()\n if form.is_valid():\n topic = form.save(commit=False)\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES,\n instance=topic)\n if upload_forms.is_valid():\n response = self.form_valid(form)\n upload_forms.save()\n messages.add_message(request,\n messages.SUCCESS,\n _('Topic '\n 'successfully created'))\n return response\n\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES)\n return render(request, self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n\n\nclass TopicUpdateView(PermissionRequiredMixin, generic.UpdateView):\n model = communitydebate_models.Topic\n form_class = forms.TopicForm\n permission_required = 'euth_communitydebate.modify_topic'\n template_name = 'euth_communitydebate/topic_form.html'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def dispatch(self, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.object.project\n context['mode'] = 'update'\n if not upload_forms:\n upload_forms = forms.TopicFileUploadFormset(\n instance=self.get_object())\n context['upload_forms'] = upload_forms\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = kwargs.get('instance').module\n return kwargs\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES,\n instance=self.object)\n if upload_forms.is_valid() and form.is_valid():\n response = self.form_valid(form)\n upload_forms.save()\n messages.add_message(request,\n messages.SUCCESS,\n _('Topic successfully '\n 'updated'))\n else:\n response = render(request,\n self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n return response\n\n\nclass TopicDeleteView(PermissionRequiredMixin, generic.DeleteView):\n model = communitydebate_models.Topic\n success_message = _(\"Your topic has been deleted\")\n permission_required = 'euth_communitydebate.modify_topic'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super(TopicDeleteView, self).delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('project-detail',\n kwargs={'slug': self.object.project.slug})\n", "path": "euth/communitydebate/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.views import generic\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.filters import views as filter_views\nfrom adhocracy4.modules.models import Module\nfrom euth.projects import mixins as prj_mixins\n\nfrom . import forms\nfrom . import models as communitydebate_models\nfrom .filters import TopicFilterSet\n\n\nclass TopicListView(prj_mixins.ProjectPhaseMixin,\n filter_views.FilteredListView):\n model = communitydebate_models.Topic\n paginate_by = 15\n filter_set = TopicFilterSet\n\n def get_queryset(self):\n return super().get_queryset().filter(module=self.module)\n\n\nclass TopicDetailView(PermissionRequiredMixin, generic.DetailView):\n model = communitydebate_models.Topic\n queryset = \\\n communitydebate_models.Topic.objects\\\n .annotate_positive_rating_count() \\\n .annotate_negative_rating_count()\n permission_required = 'euth_communitydebate.view_topic'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n upload_files = communitydebate_models.TopicFileUpload.objects\\\n .filter(topic=self.object)\n context['upload_files'] = upload_files\n return context\n\n\nclass TopicCreateView(PermissionRequiredMixin, generic.CreateView):\n model = communitydebate_models.Topic\n form_class = forms.TopicForm\n permission_required = 'euth_communitydebate.propose_topic'\n template_name = 'euth_communitydebate/topic_form.html'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def dispatch(self, *args, **kwargs):\n mod_slug = self.kwargs[self.slug_url_kwarg]\n self.module = Module.objects.get(slug=mod_slug)\n self.project = self.module.project\n return super().dispatch(*args, **kwargs)\n\n def get_permission_object(self, *args, **kwargs):\n return self.module\n\n def get_context_data(self, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.project\n context['mode'] = 'create'\n if not upload_forms:\n upload_forms = forms.TopicFileUploadFormset()\n context['upload_forms'] = upload_forms\n return context\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.module = self.module\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = self.module\n return kwargs\n\n def post(self, request, *args, **kwargs):\n self.object = None\n form = self.get_form()\n if form.is_valid():\n topic = form.save(commit=False)\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES,\n instance=topic)\n if upload_forms.is_valid():\n response = self.form_valid(form)\n upload_forms.save()\n messages.add_message(request,\n messages.SUCCESS,\n _('Topic '\n 'successfully created'))\n return response\n\n else:\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES)\n return render(request, self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n\n\nclass TopicUpdateView(PermissionRequiredMixin, generic.UpdateView):\n model = communitydebate_models.Topic\n form_class = forms.TopicForm\n permission_required = 'euth_communitydebate.modify_topic'\n template_name = 'euth_communitydebate/topic_form.html'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def dispatch(self, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.object.project\n context['mode'] = 'update'\n if not upload_forms:\n upload_forms = forms.TopicFileUploadFormset(\n instance=self.get_object())\n context['upload_forms'] = upload_forms\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = kwargs.get('instance').module\n return kwargs\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES,\n instance=self.object)\n if upload_forms.is_valid() and form.is_valid():\n response = self.form_valid(form)\n upload_forms.save()\n messages.add_message(request,\n messages.SUCCESS,\n _('Topic successfully '\n 'updated'))\n else:\n response = render(request,\n self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n return response\n\n\nclass TopicDeleteView(PermissionRequiredMixin, generic.DeleteView):\n model = communitydebate_models.Topic\n success_message = _(\"Your topic has been deleted\")\n permission_required = 'euth_communitydebate.modify_topic'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super(TopicDeleteView, self).delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('project-detail',\n kwargs={'slug': self.object.project.slug})\n", "path": "euth/communitydebate/views.py"}]} | 1,883 | 123 |
gh_patches_debug_13913 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-655 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix the repo regex to handle + in URLs
The documentation said that you could do git+https:// or git+ssh:// but that was not, in fact, true.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/main.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import logging
16 import os
17 import re
18
19 from .config import get_user_config, USER_CONFIG_PATH
20 from .exceptions import InvalidModeException, RepositoryNotFound
21 from .prompt import prompt_for_config
22 from .generate import generate_context, generate_files
23 from .vcs import clone
24 from .replay import dump, load
25
26 logger = logging.getLogger(__name__)
27
28 builtin_abbreviations = {
29 'gh': 'https://github.com/{0}.git',
30 'bb': 'https://bitbucket.org/{0}',
31 }
32
33 REPO_REGEX = """
34 (
35 ((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.
36 | # or
37 (\w+@[\w\.]+) # something like user@...
38 )
39 .*
40 """
41
42
43 def is_repo_url(value):
44 """Return True if value is a repository URL."""
45 return bool(re.match(REPO_REGEX, value, re.VERBOSE))
46
47
48 def expand_abbreviations(template, config_dict):
49 """
50 Expand abbreviations in a template name.
51
52 :param template: The project template name.
53 :param config_dict: The user config, which will contain abbreviation
54 definitions.
55 """
56
57 abbreviations = builtin_abbreviations.copy()
58 abbreviations.update(config_dict.get('abbreviations', {}))
59
60 if template in abbreviations:
61 return abbreviations[template]
62
63 # Split on colon. If there is no colon, rest will be empty
64 # and prefix will be the whole template
65 prefix, sep, rest = template.partition(':')
66 if prefix in abbreviations:
67 return abbreviations[prefix].format(rest)
68
69 return template
70
71
72 def cookiecutter(
73 template, checkout=None, no_input=False, extra_context=None,
74 replay=False, overwrite_if_exists=False, output_dir='.',
75 config_file=USER_CONFIG_PATH):
76 """
77 API equivalent to using Cookiecutter at the command line.
78
79 :param template: A directory containing a project template directory,
80 or a URL to a git repository.
81 :param checkout: The branch, tag or commit ID to checkout after clone.
82 :param no_input: Prompt the user at command line for manual configuration?
83 :param extra_context: A dictionary of context that overrides default
84 and user configuration.
85 :param: overwrite_if_exists: Overwrite the contents of output directory
86 if it exists
87 :param output_dir: Where to output the generated project dir into.
88 :param config_file: User configuration file path.
89 """
90 if replay and ((no_input is not False) or (extra_context is not None)):
91 err_msg = (
92 "You can not use both replay and no_input or extra_context "
93 "at the same time."
94 )
95 raise InvalidModeException(err_msg)
96
97 # Get user config from ~/.cookiecutterrc or equivalent
98 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
99 config_dict = get_user_config(config_file=config_file)
100
101 template = expand_abbreviations(template, config_dict)
102
103 if is_repo_url(template):
104 repo_dir = clone(
105 repo_url=template,
106 checkout=checkout,
107 clone_to_dir=config_dict['cookiecutters_dir'],
108 no_input=no_input
109 )
110 else:
111 # If it's a local repo, no need to clone or copy to your
112 # cookiecutters_dir
113 repo_dir = template
114
115 if not os.path.isdir(repo_dir):
116 raise RepositoryNotFound(
117 'The repository {0} could not be located.'.format(template)
118 )
119
120 template_name = os.path.basename(template)
121
122 if replay:
123 context = load(config_dict['replay_dir'], template_name)
124 else:
125 context_file = os.path.join(repo_dir, 'cookiecutter.json')
126 logging.debug('context_file is {0}'.format(context_file))
127
128 context = generate_context(
129 context_file=context_file,
130 default_context=config_dict['default_context'],
131 extra_context=extra_context,
132 )
133
134 # prompt the user to manually configure at the command line.
135 # except when 'no-input' flag is set
136 context['cookiecutter'] = prompt_for_config(context, no_input)
137
138 dump(config_dict['replay_dir'], template_name, context)
139
140 # Create project from local context and project template.
141 return generate_files(
142 repo_dir=repo_dir,
143 context=context,
144 overwrite_if_exists=overwrite_if_exists,
145 output_dir=output_dir
146 )
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -30,19 +30,18 @@
'bb': 'https://bitbucket.org/{0}',
}
-REPO_REGEX = """
-(
-((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.
- | # or
- (\w+@[\w\.]+) # something like user@...
+REPO_REGEX = re.compile(r"""
+(?x)
+((((git|hg)\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.
+ | # or
+ (\w+@[\w\.]+) # something like user@...
)
-.*
-"""
+""")
def is_repo_url(value):
"""Return True if value is a repository URL."""
- return bool(re.match(REPO_REGEX, value, re.VERBOSE))
+ return bool(REPO_REGEX.match(value))
def expand_abbreviations(template, config_dict):
| {"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -30,19 +30,18 @@\n 'bb': 'https://bitbucket.org/{0}',\n }\n \n-REPO_REGEX = \"\"\"\n-(\n-((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n- | # or\n- (\\w+@[\\w\\.]+) # something like user@...\n+REPO_REGEX = re.compile(r\"\"\"\n+(?x)\n+((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n+ | # or\n+ (\\w+@[\\w\\.]+) # something like user@...\n )\n-.*\n-\"\"\"\n+\"\"\")\n \n \n def is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n- return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n+ return bool(REPO_REGEX.match(value))\n \n \n def expand_abbreviations(template, config_dict):\n", "issue": "Fix the repo regex to handle + in URLs\nThe documentation said that you could do git+https:// or git+ssh:// but that was not, in fact, true.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = \"\"\"\n(\n((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n.*\n\"\"\"\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}]} | 1,655 | 246 |
gh_patches_debug_20096 | rasdani/github-patches | git_diff | liqd__adhocracy4-1005 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
comments: make sure blocked comments are not searched or filtered for their category
**URL:**
**user:**
**expected behaviour:** when I search for sth. in the comments, I do not expect to find blocked comments, of which I cannot see the comment text
**behaviour:** because the search happens in the backend, blocked comments are also searched, while the comment list from the serializer does not show the comment text of searched comments
**important screensize:**
**device & browser:**
**Comment/Question:**
Screenshot?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/comments_async/filters.py`
Content:
```
1 from django.db import models
2 from rest_framework.filters import BaseFilterBackend
3 from rest_framework.filters import SearchFilter
4
5
6 class CommentCategoryFilterBackend(BaseFilterBackend):
7 """Filter the comments for the categories."""
8
9 def filter_queryset(self, request, queryset, view):
10
11 if 'comment_category' in request.GET:
12 category = request.GET['comment_category']
13 return queryset.filter(comment_categories__contains=category)
14
15 return queryset
16
17
18 class CommentOrderingFilterBackend(BaseFilterBackend):
19 """Order the comments."""
20
21 def filter_queryset(self, request, queryset, view):
22
23 if 'ordering' in request.GET:
24 ordering = request.GET['ordering']
25
26 if ordering == 'new':
27 return queryset.order_by('-created')
28 elif ordering == 'ans':
29 queryset = queryset\
30 .annotate(comment_count=models.Count(
31 'child_comments', distinct=True))
32 return queryset.order_by('-comment_count', '-created')
33 elif ordering == 'pos':
34 queryset = queryset\
35 .annotate(positive_rating_count=models.Count(
36 models.Case(
37 models.When(
38 ratings__value=1,
39 then=models.F('ratings__id')
40 ),
41 output_field=models.IntegerField()
42 ),
43 distinct=True))
44 return queryset.order_by('-positive_rating_count', '-created')
45 elif ordering == 'neg':
46 queryset = queryset\
47 .annotate(negative_rating_count=models.Count(
48 models.Case(
49 models.When(
50 ratings__value=-1,
51 then=models.F('ratings__id')
52 ),
53 output_field=models.IntegerField()
54 ),
55 distinct=True))
56 return queryset.order_by('-negative_rating_count', '-created')
57 elif ordering == 'dis':
58 return queryset.order_by(
59 models.F('last_discussed').desc(nulls_last=True),
60 '-created'
61 )
62 elif ordering == 'mom':
63 return queryset.order_by('-is_moderator_marked', '-created')
64
65 return queryset
66
67
68 class CustomSearchFilter(SearchFilter):
69
70 def filter_queryset(self, request, queryset, view):
71 qs = super().filter_queryset(request, queryset, view)
72 if self.get_search_terms(request):
73 return qs.filter(is_removed=False, is_censored=False)
74 return qs
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/comments_async/filters.py b/adhocracy4/comments_async/filters.py
--- a/adhocracy4/comments_async/filters.py
+++ b/adhocracy4/comments_async/filters.py
@@ -8,9 +8,12 @@
def filter_queryset(self, request, queryset, view):
- if 'comment_category' in request.GET:
+ if ('comment_category' in request.GET
+ and request.GET['comment_category'] != ''):
category = request.GET['comment_category']
- return queryset.filter(comment_categories__contains=category)
+ return queryset.filter(
+ comment_categories__contains=category,
+ is_blocked=False)
return queryset
@@ -70,5 +73,6 @@
def filter_queryset(self, request, queryset, view):
qs = super().filter_queryset(request, queryset, view)
if self.get_search_terms(request):
- return qs.filter(is_removed=False, is_censored=False)
+ return qs.filter(is_removed=False, is_censored=False,
+ is_blocked=False)
return qs
| {"golden_diff": "diff --git a/adhocracy4/comments_async/filters.py b/adhocracy4/comments_async/filters.py\n--- a/adhocracy4/comments_async/filters.py\n+++ b/adhocracy4/comments_async/filters.py\n@@ -8,9 +8,12 @@\n \n def filter_queryset(self, request, queryset, view):\n \n- if 'comment_category' in request.GET:\n+ if ('comment_category' in request.GET\n+ and request.GET['comment_category'] != ''):\n category = request.GET['comment_category']\n- return queryset.filter(comment_categories__contains=category)\n+ return queryset.filter(\n+ comment_categories__contains=category,\n+ is_blocked=False)\n \n return queryset\n \n@@ -70,5 +73,6 @@\n def filter_queryset(self, request, queryset, view):\n qs = super().filter_queryset(request, queryset, view)\n if self.get_search_terms(request):\n- return qs.filter(is_removed=False, is_censored=False)\n+ return qs.filter(is_removed=False, is_censored=False,\n+ is_blocked=False)\n return qs\n", "issue": "comments: make sure blocked comments are not searched or filtered for their category\n**URL:** \r\n**user:** \r\n**expected behaviour:** when I search for sth. in the comments, I do not expect to find blocked comments, of which I cannot see the comment text\r\n**behaviour:** because the search happens in the backend, blocked comments are also searched, while the comment list from the serializer does not show the comment text of searched comments\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.db import models\nfrom rest_framework.filters import BaseFilterBackend\nfrom rest_framework.filters import SearchFilter\n\n\nclass CommentCategoryFilterBackend(BaseFilterBackend):\n \"\"\"Filter the comments for the categories.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if 'comment_category' in request.GET:\n category = request.GET['comment_category']\n return queryset.filter(comment_categories__contains=category)\n\n return queryset\n\n\nclass CommentOrderingFilterBackend(BaseFilterBackend):\n \"\"\"Order the comments.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if 'ordering' in request.GET:\n ordering = request.GET['ordering']\n\n if ordering == 'new':\n return queryset.order_by('-created')\n elif ordering == 'ans':\n queryset = queryset\\\n .annotate(comment_count=models.Count(\n 'child_comments', distinct=True))\n return queryset.order_by('-comment_count', '-created')\n elif ordering == 'pos':\n queryset = queryset\\\n .annotate(positive_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-positive_rating_count', '-created')\n elif ordering == 'neg':\n queryset = queryset\\\n .annotate(negative_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=-1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-negative_rating_count', '-created')\n elif ordering == 'dis':\n return queryset.order_by(\n models.F('last_discussed').desc(nulls_last=True),\n '-created'\n )\n elif ordering == 'mom':\n return queryset.order_by('-is_moderator_marked', '-created')\n\n return queryset\n\n\nclass CustomSearchFilter(SearchFilter):\n\n def filter_queryset(self, request, queryset, view):\n qs = super().filter_queryset(request, queryset, view)\n if self.get_search_terms(request):\n return qs.filter(is_removed=False, is_censored=False)\n return qs\n", "path": "adhocracy4/comments_async/filters.py"}], "after_files": [{"content": "from django.db import models\nfrom rest_framework.filters import BaseFilterBackend\nfrom rest_framework.filters import SearchFilter\n\n\nclass CommentCategoryFilterBackend(BaseFilterBackend):\n \"\"\"Filter the comments for the categories.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if ('comment_category' in request.GET\n and request.GET['comment_category'] != ''):\n category = request.GET['comment_category']\n return queryset.filter(\n comment_categories__contains=category,\n is_blocked=False)\n\n return queryset\n\n\nclass CommentOrderingFilterBackend(BaseFilterBackend):\n \"\"\"Order the comments.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if 'ordering' in request.GET:\n ordering = request.GET['ordering']\n\n if ordering == 'new':\n return queryset.order_by('-created')\n elif ordering == 'ans':\n queryset = queryset\\\n .annotate(comment_count=models.Count(\n 'child_comments', distinct=True))\n return queryset.order_by('-comment_count', '-created')\n elif ordering == 'pos':\n queryset = queryset\\\n .annotate(positive_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-positive_rating_count', '-created')\n elif ordering == 'neg':\n queryset = queryset\\\n .annotate(negative_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=-1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-negative_rating_count', '-created')\n elif ordering == 'dis':\n return queryset.order_by(\n models.F('last_discussed').desc(nulls_last=True),\n '-created'\n )\n elif ordering == 'mom':\n return queryset.order_by('-is_moderator_marked', '-created')\n\n return queryset\n\n\nclass CustomSearchFilter(SearchFilter):\n\n def filter_queryset(self, request, queryset, view):\n qs = super().filter_queryset(request, queryset, view)\n if self.get_search_terms(request):\n return qs.filter(is_removed=False, is_censored=False,\n is_blocked=False)\n return qs\n", "path": "adhocracy4/comments_async/filters.py"}]} | 982 | 237 |
gh_patches_debug_10648 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5449 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
L'export à la demande ne fonctionne pas
Plus précisement, l'export Markdown ne fonctionne pas quand on clique sur le bouton "Exporter le contenu" alors qu'il fonctionne lors de la publication.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/tutorialv2/api/views.py`
Content:
```
1 import contextlib
2 from pathlib import Path
3
4 from django.http import Http404
5 from django.utils import translation
6 from django.utils.translation import gettext as _
7 from rest_framework import status
8 from rest_framework.fields import empty
9 from rest_framework.generics import UpdateAPIView, ListCreateAPIView, get_object_or_404
10 from rest_framework.response import Response
11 from rest_framework.serializers import Serializer, CharField, BooleanField
12 from rest_framework.permissions import IsAuthenticatedOrReadOnly
13
14 from zds.member.api.permissions import CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly, IsAuthorOrStaff
15 from zds.tutorialv2.publication_utils import PublicatorRegistry
16 from zds.tutorialv2.utils import search_container_or_404
17 from zds.utils.api.views import KarmaView
18 from zds.tutorialv2.models.database import ContentReaction, PublishableContent, PublicationEvent
19
20
21 class ContainerReadinessSerializer(Serializer):
22 parent_container_slug = CharField(allow_blank=True, allow_null=True, required=False)
23 container_slug = CharField(required=True)
24 ready_to_publish = BooleanField(required=True)
25
26 def run_validation(self, data=empty):
27 init = super().run_validation(data)
28 if not init:
29 return init
30 if not data.get('parent_container_slug', ''):
31 init.pop('parent_container_slug', '')
32 return init
33
34 def save(self, **kwargs):
35 if not self.validated_data:
36 self.is_valid(True)
37 versioned = self.instance.load_version()
38 container = search_container_or_404(versioned, self.validated_data)
39 container.ready_to_publish = self.validated_data['ready_to_publish']
40 sha = versioned.repo_update(versioned.title, versioned.get_introduction(), versioned.get_conclusion(),
41 commit_message=_('{} est {} à la publication.').format(
42 container.get_path(True),
43 _('prêt') if container.ready_to_publish else _('ignoré')))
44 PublishableContent.objects.filter(pk=self.instance.pk).update(sha_draft=sha)
45
46 def to_representation(self, instance):
47 return {}
48
49
50 class ContentReactionKarmaView(KarmaView):
51 queryset = ContentReaction.objects.all()
52 permission_classes = (IsAuthenticatedOrReadOnly, CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly)
53
54
55 class ContainerPublicationReadinessView(UpdateAPIView):
56 permission_classes = (IsAuthorOrStaff, )
57 serializer_class = ContainerReadinessSerializer
58
59 def get_object(self):
60 content = PublishableContent.objects.prefetch_related('authors')\
61 .filter(pk=int(self.kwargs.get('pk', 0)))\
62 .first()
63 if not content:
64 raise Http404()
65 self.check_object_permissions(self.request, object)
66 return content
67
68
69 class ExportView(ListCreateAPIView):
70 permission_classes = (IsAuthorOrStaff,)
71 serializer_class = Serializer
72
73 def get_queryset(self):
74 return PublicationEvent.objects.filter(published_object__content__pk=self.kwargs.get('pk', 0))
75
76 def ensure_directories(self, content: PublishableContent):
77 final_directory = Path(content.public_version.get_extra_contents_directory())
78 building_directory = Path(str(final_directory.parent) + '__building', final_directory.name)
79 with contextlib.suppress(FileExistsError):
80 final_directory.mkdir(parents=True)
81 with contextlib.suppress(FileExistsError):
82 building_directory.mkdir(parents=True)
83 return building_directory, final_directory
84
85 def create(self, request, *args, **kwargs):
86 try:
87 publishable_content = get_object_or_404(PublishableContent.objects, pk=int(kwargs.get('pk')))
88 if not publishable_content.public_version:
89 raise Http404('Not public content')
90 tmp_dir, _ = self.ensure_directories(publishable_content)
91 versioned = publishable_content.public_version.load_public_version()
92 base_name = str(Path(tmp_dir, versioned.slug))
93 md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))
94
95 PublicatorRegistry.get('md').publish(md_file_path, base_name,
96 versioned=versioned,
97 cur_language=translation.get_language())
98 PublicatorRegistry.get('watchdog').publish_from_published_content(publishable_content.public_version)
99 except ValueError:
100 return Response({}, status=status.HTTP_400_BAD_REQUEST, headers={})
101 else:
102 return Response({}, status=status.HTTP_201_CREATED, headers={})
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/tutorialv2/api/views.py b/zds/tutorialv2/api/views.py
--- a/zds/tutorialv2/api/views.py
+++ b/zds/tutorialv2/api/views.py
@@ -88,7 +88,7 @@
if not publishable_content.public_version:
raise Http404('Not public content')
tmp_dir, _ = self.ensure_directories(publishable_content)
- versioned = publishable_content.public_version.load_public_version()
+ versioned = publishable_content.load_version(public=True)
base_name = str(Path(tmp_dir, versioned.slug))
md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))
| {"golden_diff": "diff --git a/zds/tutorialv2/api/views.py b/zds/tutorialv2/api/views.py\n--- a/zds/tutorialv2/api/views.py\n+++ b/zds/tutorialv2/api/views.py\n@@ -88,7 +88,7 @@\n if not publishable_content.public_version:\n raise Http404('Not public content')\n tmp_dir, _ = self.ensure_directories(publishable_content)\n- versioned = publishable_content.public_version.load_public_version()\n+ versioned = publishable_content.load_version(public=True)\n base_name = str(Path(tmp_dir, versioned.slug))\n md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))\n", "issue": "L'export \u00e0 la demande ne fonctionne pas\nPlus pr\u00e9cisement, l'export Markdown ne fonctionne pas quand on clique sur le bouton \"Exporter le contenu\" alors qu'il fonctionne lors de la publication.\n", "before_files": [{"content": "import contextlib\nfrom pathlib import Path\n\nfrom django.http import Http404\nfrom django.utils import translation\nfrom django.utils.translation import gettext as _\nfrom rest_framework import status\nfrom rest_framework.fields import empty\nfrom rest_framework.generics import UpdateAPIView, ListCreateAPIView, get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import Serializer, CharField, BooleanField\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom zds.member.api.permissions import CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly, IsAuthorOrStaff\nfrom zds.tutorialv2.publication_utils import PublicatorRegistry\nfrom zds.tutorialv2.utils import search_container_or_404\nfrom zds.utils.api.views import KarmaView\nfrom zds.tutorialv2.models.database import ContentReaction, PublishableContent, PublicationEvent\n\n\nclass ContainerReadinessSerializer(Serializer):\n parent_container_slug = CharField(allow_blank=True, allow_null=True, required=False)\n container_slug = CharField(required=True)\n ready_to_publish = BooleanField(required=True)\n\n def run_validation(self, data=empty):\n init = super().run_validation(data)\n if not init:\n return init\n if not data.get('parent_container_slug', ''):\n init.pop('parent_container_slug', '')\n return init\n\n def save(self, **kwargs):\n if not self.validated_data:\n self.is_valid(True)\n versioned = self.instance.load_version()\n container = search_container_or_404(versioned, self.validated_data)\n container.ready_to_publish = self.validated_data['ready_to_publish']\n sha = versioned.repo_update(versioned.title, versioned.get_introduction(), versioned.get_conclusion(),\n commit_message=_('{} est {} \u00e0 la publication.').format(\n container.get_path(True),\n _('pr\u00eat') if container.ready_to_publish else _('ignor\u00e9')))\n PublishableContent.objects.filter(pk=self.instance.pk).update(sha_draft=sha)\n\n def to_representation(self, instance):\n return {}\n\n\nclass ContentReactionKarmaView(KarmaView):\n queryset = ContentReaction.objects.all()\n permission_classes = (IsAuthenticatedOrReadOnly, CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly)\n\n\nclass ContainerPublicationReadinessView(UpdateAPIView):\n permission_classes = (IsAuthorOrStaff, )\n serializer_class = ContainerReadinessSerializer\n\n def get_object(self):\n content = PublishableContent.objects.prefetch_related('authors')\\\n .filter(pk=int(self.kwargs.get('pk', 0)))\\\n .first()\n if not content:\n raise Http404()\n self.check_object_permissions(self.request, object)\n return content\n\n\nclass ExportView(ListCreateAPIView):\n permission_classes = (IsAuthorOrStaff,)\n serializer_class = Serializer\n\n def get_queryset(self):\n return PublicationEvent.objects.filter(published_object__content__pk=self.kwargs.get('pk', 0))\n\n def ensure_directories(self, content: PublishableContent):\n final_directory = Path(content.public_version.get_extra_contents_directory())\n building_directory = Path(str(final_directory.parent) + '__building', final_directory.name)\n with contextlib.suppress(FileExistsError):\n final_directory.mkdir(parents=True)\n with contextlib.suppress(FileExistsError):\n building_directory.mkdir(parents=True)\n return building_directory, final_directory\n\n def create(self, request, *args, **kwargs):\n try:\n publishable_content = get_object_or_404(PublishableContent.objects, pk=int(kwargs.get('pk')))\n if not publishable_content.public_version:\n raise Http404('Not public content')\n tmp_dir, _ = self.ensure_directories(publishable_content)\n versioned = publishable_content.public_version.load_public_version()\n base_name = str(Path(tmp_dir, versioned.slug))\n md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))\n\n PublicatorRegistry.get('md').publish(md_file_path, base_name,\n versioned=versioned,\n cur_language=translation.get_language())\n PublicatorRegistry.get('watchdog').publish_from_published_content(publishable_content.public_version)\n except ValueError:\n return Response({}, status=status.HTTP_400_BAD_REQUEST, headers={})\n else:\n return Response({}, status=status.HTTP_201_CREATED, headers={})\n", "path": "zds/tutorialv2/api/views.py"}], "after_files": [{"content": "import contextlib\nfrom pathlib import Path\n\nfrom django.http import Http404\nfrom django.utils import translation\nfrom django.utils.translation import gettext as _\nfrom rest_framework import status\nfrom rest_framework.fields import empty\nfrom rest_framework.generics import UpdateAPIView, ListCreateAPIView, get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import Serializer, CharField, BooleanField\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom zds.member.api.permissions import CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly, IsAuthorOrStaff\nfrom zds.tutorialv2.publication_utils import PublicatorRegistry\nfrom zds.tutorialv2.utils import search_container_or_404\nfrom zds.utils.api.views import KarmaView\nfrom zds.tutorialv2.models.database import ContentReaction, PublishableContent, PublicationEvent\n\n\nclass ContainerReadinessSerializer(Serializer):\n parent_container_slug = CharField(allow_blank=True, allow_null=True, required=False)\n container_slug = CharField(required=True)\n ready_to_publish = BooleanField(required=True)\n\n def run_validation(self, data=empty):\n init = super().run_validation(data)\n if not init:\n return init\n if not data.get('parent_container_slug', ''):\n init.pop('parent_container_slug', '')\n return init\n\n def save(self, **kwargs):\n if not self.validated_data:\n self.is_valid(True)\n versioned = self.instance.load_version()\n container = search_container_or_404(versioned, self.validated_data)\n container.ready_to_publish = self.validated_data['ready_to_publish']\n sha = versioned.repo_update(versioned.title, versioned.get_introduction(), versioned.get_conclusion(),\n commit_message=_('{} est {} \u00e0 la publication.').format(\n container.get_path(True),\n _('pr\u00eat') if container.ready_to_publish else _('ignor\u00e9')))\n PublishableContent.objects.filter(pk=self.instance.pk).update(sha_draft=sha)\n\n def to_representation(self, instance):\n return {}\n\n\nclass ContentReactionKarmaView(KarmaView):\n queryset = ContentReaction.objects.all()\n permission_classes = (IsAuthenticatedOrReadOnly, CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly)\n\n\nclass ContainerPublicationReadinessView(UpdateAPIView):\n permission_classes = (IsAuthorOrStaff, )\n serializer_class = ContainerReadinessSerializer\n\n def get_object(self):\n content = PublishableContent.objects.prefetch_related('authors')\\\n .filter(pk=int(self.kwargs.get('pk', 0)))\\\n .first()\n if not content:\n raise Http404()\n self.check_object_permissions(self.request, object)\n return content\n\n\nclass ExportView(ListCreateAPIView):\n permission_classes = (IsAuthorOrStaff,)\n serializer_class = Serializer\n\n def get_queryset(self):\n return PublicationEvent.objects.filter(published_object__content__pk=self.kwargs.get('pk', 0))\n\n def ensure_directories(self, content: PublishableContent):\n final_directory = Path(content.public_version.get_extra_contents_directory())\n building_directory = Path(str(final_directory.parent) + '__building', final_directory.name)\n with contextlib.suppress(FileExistsError):\n final_directory.mkdir(parents=True)\n with contextlib.suppress(FileExistsError):\n building_directory.mkdir(parents=True)\n return building_directory, final_directory\n\n def create(self, request, *args, **kwargs):\n try:\n publishable_content = get_object_or_404(PublishableContent.objects, pk=int(kwargs.get('pk')))\n if not publishable_content.public_version:\n raise Http404('Not public content')\n tmp_dir, _ = self.ensure_directories(publishable_content)\n versioned = publishable_content.load_version(public=True)\n base_name = str(Path(tmp_dir, versioned.slug))\n md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))\n\n PublicatorRegistry.get('md').publish(md_file_path, base_name,\n versioned=versioned,\n cur_language=translation.get_language())\n PublicatorRegistry.get('watchdog').publish_from_published_content(publishable_content.public_version)\n except ValueError:\n return Response({}, status=status.HTTP_400_BAD_REQUEST, headers={})\n else:\n return Response({}, status=status.HTTP_201_CREATED, headers={})\n", "path": "zds/tutorialv2/api/views.py"}]} | 1,446 | 148 |
gh_patches_debug_10271 | rasdani/github-patches | git_diff | PaddlePaddle__Paddle2ONNX-15 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need check whether protoc existed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `variables.py`
Content:
```
1 # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from onnx import helper, onnx_pb2, TensorProto
16 import paddle.fluid.core as core
17
18
19 def paddle_variable_to_onnx_tensor(paddle_var_name, block):
20 # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.
21 paddle_var = block.var(paddle_var_name)
22 return helper.make_tensor_value_info(
23 paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],
24 paddle_var.shape)
25
26
27 PADDLE_TO_ONNX_DTYPE = {
28 core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT,
29 core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16,
30 # '': onnx_pb2.TensorProto.DOUBLE,
31 core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32,
32 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16,
33 # '': onnx_pb2.TensorProto.INT8,
34 # '': onnx_pb2.TensorProto.UINT8,
35 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16,
36 core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64,
37 # '': onnx_pb2.TensorProto.STRING,
38 # '': onnx_pb2.TensorProto.COMPLEX64,
39 # '': onnx_pb2.TensorProto.COMPLEX128,
40 core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL
41 }
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/variables.py b/variables.py
--- a/variables.py
+++ b/variables.py
@@ -19,9 +19,9 @@
def paddle_variable_to_onnx_tensor(paddle_var_name, block):
# TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.
paddle_var = block.var(paddle_var_name)
- return helper.make_tensor_value_info(
- paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],
- paddle_var.shape)
+ return helper.make_tensor_value_info(paddle_var_name,
+ PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],
+ paddle_var.shape)
PADDLE_TO_ONNX_DTYPE = {
| {"golden_diff": "diff --git a/variables.py b/variables.py\n--- a/variables.py\n+++ b/variables.py\n@@ -19,9 +19,9 @@\n def paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n- return helper.make_tensor_value_info(\n- paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n- paddle_var.shape)\n+ return helper.make_tensor_value_info(paddle_var_name,\n+ PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n+ paddle_var.shape)\n \n \n PADDLE_TO_ONNX_DTYPE = {\n", "issue": "Need check whether protoc existed.\n\n", "before_files": [{"content": "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom onnx import helper, onnx_pb2, TensorProto\nimport paddle.fluid.core as core\n\n\ndef paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n return helper.make_tensor_value_info(\n paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n paddle_var.shape)\n\n\nPADDLE_TO_ONNX_DTYPE = {\n core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT,\n core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16,\n # '': onnx_pb2.TensorProto.DOUBLE,\n core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16,\n # '': onnx_pb2.TensorProto.INT8,\n # '': onnx_pb2.TensorProto.UINT8,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16,\n core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64,\n # '': onnx_pb2.TensorProto.STRING,\n # '': onnx_pb2.TensorProto.COMPLEX64,\n # '': onnx_pb2.TensorProto.COMPLEX128,\n core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL\n}\n", "path": "variables.py"}], "after_files": [{"content": "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom onnx import helper, onnx_pb2, TensorProto\nimport paddle.fluid.core as core\n\n\ndef paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n return helper.make_tensor_value_info(paddle_var_name,\n PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n paddle_var.shape)\n\n\nPADDLE_TO_ONNX_DTYPE = {\n core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT,\n core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16,\n # '': onnx_pb2.TensorProto.DOUBLE,\n core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16,\n # '': onnx_pb2.TensorProto.INT8,\n # '': onnx_pb2.TensorProto.UINT8,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16,\n core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64,\n # '': onnx_pb2.TensorProto.STRING,\n # '': onnx_pb2.TensorProto.COMPLEX64,\n # '': onnx_pb2.TensorProto.COMPLEX128,\n core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL\n}\n", "path": "variables.py"}]} | 817 | 163 |
gh_patches_debug_9971 | rasdani/github-patches | git_diff | pretix__pretix-882 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rich_text: tel schema
When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretix/base/templatetags/rich_text.py`
Content:
```
1 import urllib.parse
2
3 import bleach
4 import markdown
5 from bleach import DEFAULT_CALLBACKS
6 from django import template
7 from django.conf import settings
8 from django.core import signing
9 from django.urls import reverse
10 from django.utils.http import is_safe_url
11 from django.utils.safestring import mark_safe
12
13 register = template.Library()
14
15 ALLOWED_TAGS = [
16 'a',
17 'abbr',
18 'acronym',
19 'b',
20 'blockquote',
21 'br',
22 'code',
23 'em',
24 'i',
25 'li',
26 'ol',
27 'strong',
28 'ul',
29 'p',
30 'table',
31 'tbody',
32 'thead',
33 'tr',
34 'td',
35 'th',
36 'div',
37 'span',
38 'hr',
39 'h1',
40 'h2',
41 'h3',
42 'h4',
43 'h5',
44 'h6',
45 'pre',
46 # Update doc/user/markdown.rst if you change this!
47 ]
48
49 ALLOWED_ATTRIBUTES = {
50 'a': ['href', 'title'],
51 'abbr': ['title'],
52 'acronym': ['title'],
53 'table': ['width'],
54 'td': ['width', 'align'],
55 'div': ['class'],
56 'p': ['class'],
57 'span': ['class'],
58 # Update doc/user/markdown.rst if you change this!
59 }
60
61
62 def safelink_callback(attrs, new=False):
63 url = attrs.get((None, 'href'), '/')
64 if not is_safe_url(url) and not url.startswith('mailto:'):
65 signer = signing.Signer(salt='safe-redirect')
66 attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))
67 attrs[None, 'target'] = '_blank'
68 attrs[None, 'rel'] = 'noopener'
69 return attrs
70
71
72 def abslink_callback(attrs, new=False):
73 attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))
74 attrs[None, 'target'] = '_blank'
75 attrs[None, 'rel'] = 'noopener'
76 return attrs
77
78
79 def markdown_compile(source):
80 return bleach.clean(
81 markdown.markdown(
82 source,
83 extensions=[
84 'markdown.extensions.sane_lists',
85 # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails
86 ]
87 ),
88 tags=ALLOWED_TAGS,
89 attributes=ALLOWED_ATTRIBUTES
90 )
91
92
93 @register.filter
94 def rich_text(text: str, **kwargs):
95 """
96 Processes markdown and cleans HTML in a text input.
97 """
98 text = str(text)
99 body_md = bleach.linkify(
100 markdown_compile(text),
101 callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])
102 )
103 return mark_safe(body_md)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py
--- a/src/pretix/base/templatetags/rich_text.py
+++ b/src/pretix/base/templatetags/rich_text.py
@@ -58,6 +58,8 @@
# Update doc/user/markdown.rst if you change this!
}
+ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']
+
def safelink_callback(attrs, new=False):
url = attrs.get((None, 'href'), '/')
@@ -86,7 +88,8 @@
]
),
tags=ALLOWED_TAGS,
- attributes=ALLOWED_ATTRIBUTES
+ attributes=ALLOWED_ATTRIBUTES,
+ protocols=ALLOWED_PROTOCOLS,
)
| {"golden_diff": "diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py\n--- a/src/pretix/base/templatetags/rich_text.py\n+++ b/src/pretix/base/templatetags/rich_text.py\n@@ -58,6 +58,8 @@\n # Update doc/user/markdown.rst if you change this!\n }\n \n+ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n+\n \n def safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n@@ -86,7 +88,8 @@\n ]\n ),\n tags=ALLOWED_TAGS,\n- attributes=ALLOWED_ATTRIBUTES\n+ attributes=ALLOWED_ATTRIBUTES,\n+ protocols=ALLOWED_PROTOCOLS,\n )\n", "issue": "rich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\n", "before_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}], "after_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\nALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}]} | 1,247 | 188 |
gh_patches_debug_30999 | rasdani/github-patches | git_diff | apache__airflow-28953 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support telegram-bot v20+
### Body
Currently our telegram integration uses Telegram v13 telegram-bot library. On 1st of Jan 2023 a new, backwards incompatible version of Telegram-bot has been released : https://pypi.org/project/python-telegram-bot/20.0/#history and at least as reported by MyPy and our test suite test failures, Telegram 20 needs some changes to work:
Here is a transition guide that might be helpful.
Transition guide is here: https://github.com/python-telegram-bot/python-telegram-bot/wiki/Transition-guide-to-Version-20.0
In the meantime we limit telegram to < 20.0.0
### Committer
- [X] I acknowledge that I am a maintainer/committer of the Apache Airflow project.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/providers/telegram/hooks/telegram.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """Hook for Telegram"""
19 from __future__ import annotations
20
21 import telegram
22 import tenacity
23
24 from airflow.exceptions import AirflowException
25 from airflow.hooks.base import BaseHook
26
27
28 class TelegramHook(BaseHook):
29 """
30 This hook allows you to post messages to Telegram using the telegram python-telegram-bot library.
31
32 The library can be found here: https://github.com/python-telegram-bot/python-telegram-bot
33 It accepts both telegram bot API token directly or connection that has telegram bot API token.
34 If both supplied, token parameter will be given precedence, otherwise 'password' field in the connection
35 from telegram_conn_id will be used.
36 chat_id can also be provided in the connection using 'host' field in connection.
37 Following is the details of a telegram_connection:
38 name: 'telegram-connection-name'
39 conn_type: 'http'
40 password: 'TELEGRAM_TOKEN'
41 host: 'chat_id' (optional)
42 Examples:
43 .. code-block:: python
44
45 # Create hook
46 telegram_hook = TelegramHook(telegram_conn_id="telegram_default")
47 # or telegram_hook = TelegramHook(telegram_conn_id='telegram_default', chat_id='-1xxx')
48 # or telegram_hook = TelegramHook(token='xxx:xxx', chat_id='-1xxx')
49
50 # Call method from telegram bot client
51 telegram_hook.send_message(None, {"text": "message", "chat_id": "-1xxx"})
52 # or telegram_hook.send_message(None', {"text": "message"})
53
54 :param telegram_conn_id: connection that optionally has Telegram API token in the password field
55 :param token: optional telegram API token
56 :param chat_id: optional chat_id of the telegram chat/channel/group
57 """
58
59 def __init__(
60 self,
61 telegram_conn_id: str | None = None,
62 token: str | None = None,
63 chat_id: str | None = None,
64 ) -> None:
65 super().__init__()
66 self.token = self.__get_token(token, telegram_conn_id)
67 self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id)
68 self.connection = self.get_conn()
69
70 def get_conn(self) -> telegram.bot.Bot:
71 """
72 Returns the telegram bot client
73
74 :return: telegram bot client
75 """
76 return telegram.bot.Bot(token=self.token)
77
78 def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str:
79 """
80 Returns the telegram API token
81
82 :param token: telegram API token
83 :param telegram_conn_id: telegram connection name
84 :return: telegram API token
85 """
86 if token is not None:
87 return token
88
89 if telegram_conn_id is not None:
90 conn = self.get_connection(telegram_conn_id)
91
92 if not conn.password:
93 raise AirflowException("Missing token(password) in Telegram connection")
94
95 return conn.password
96
97 raise AirflowException("Cannot get token: No valid Telegram connection supplied.")
98
99 def __get_chat_id(self, chat_id: str | None, telegram_conn_id: str | None) -> str | None:
100 """
101 Returns the telegram chat ID for a chat/channel/group
102
103 :param chat_id: optional chat ID
104 :param telegram_conn_id: telegram connection name
105 :return: telegram chat ID
106 """
107 if chat_id is not None:
108 return chat_id
109
110 if telegram_conn_id is not None:
111 conn = self.get_connection(telegram_conn_id)
112 return conn.host
113
114 return None
115
116 @tenacity.retry(
117 retry=tenacity.retry_if_exception_type(telegram.error.TelegramError),
118 stop=tenacity.stop_after_attempt(5),
119 wait=tenacity.wait_fixed(1),
120 )
121 def send_message(self, api_params: dict) -> None:
122 """
123 Sends the message to a telegram channel or chat.
124
125 :param api_params: params for telegram_instance.send_message. It can also be used to override chat_id
126 """
127 kwargs = {
128 "chat_id": self.chat_id,
129 "parse_mode": telegram.parsemode.ParseMode.HTML,
130 "disable_web_page_preview": True,
131 }
132 kwargs.update(api_params)
133
134 if "text" not in kwargs or kwargs["text"] is None:
135 raise AirflowException("'text' must be provided for telegram message")
136
137 if kwargs["chat_id"] is None:
138 raise AirflowException("'chat_id' must be provided for telegram message")
139
140 response = self.connection.send_message(**kwargs)
141 self.log.debug(response)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/providers/telegram/hooks/telegram.py b/airflow/providers/telegram/hooks/telegram.py
--- a/airflow/providers/telegram/hooks/telegram.py
+++ b/airflow/providers/telegram/hooks/telegram.py
@@ -18,6 +18,8 @@
"""Hook for Telegram"""
from __future__ import annotations
+import asyncio
+
import telegram
import tenacity
@@ -67,13 +69,13 @@
self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id)
self.connection = self.get_conn()
- def get_conn(self) -> telegram.bot.Bot:
+ def get_conn(self) -> telegram.Bot:
"""
Returns the telegram bot client
:return: telegram bot client
"""
- return telegram.bot.Bot(token=self.token)
+ return telegram.Bot(self.token)
def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str:
"""
@@ -126,7 +128,7 @@
"""
kwargs = {
"chat_id": self.chat_id,
- "parse_mode": telegram.parsemode.ParseMode.HTML,
+ "parse_mode": telegram.constants.ParseMode.HTML,
"disable_web_page_preview": True,
}
kwargs.update(api_params)
@@ -137,5 +139,5 @@
if kwargs["chat_id"] is None:
raise AirflowException("'chat_id' must be provided for telegram message")
- response = self.connection.send_message(**kwargs)
+ response = asyncio.run(self.connection.send_message(**kwargs))
self.log.debug(response)
| {"golden_diff": "diff --git a/airflow/providers/telegram/hooks/telegram.py b/airflow/providers/telegram/hooks/telegram.py\n--- a/airflow/providers/telegram/hooks/telegram.py\n+++ b/airflow/providers/telegram/hooks/telegram.py\n@@ -18,6 +18,8 @@\n \"\"\"Hook for Telegram\"\"\"\n from __future__ import annotations\n \n+import asyncio\n+\n import telegram\n import tenacity\n \n@@ -67,13 +69,13 @@\n self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id)\n self.connection = self.get_conn()\n \n- def get_conn(self) -> telegram.bot.Bot:\n+ def get_conn(self) -> telegram.Bot:\n \"\"\"\n Returns the telegram bot client\n \n :return: telegram bot client\n \"\"\"\n- return telegram.bot.Bot(token=self.token)\n+ return telegram.Bot(self.token)\n \n def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str:\n \"\"\"\n@@ -126,7 +128,7 @@\n \"\"\"\n kwargs = {\n \"chat_id\": self.chat_id,\n- \"parse_mode\": telegram.parsemode.ParseMode.HTML,\n+ \"parse_mode\": telegram.constants.ParseMode.HTML,\n \"disable_web_page_preview\": True,\n }\n kwargs.update(api_params)\n@@ -137,5 +139,5 @@\n if kwargs[\"chat_id\"] is None:\n raise AirflowException(\"'chat_id' must be provided for telegram message\")\n \n- response = self.connection.send_message(**kwargs)\n+ response = asyncio.run(self.connection.send_message(**kwargs))\n self.log.debug(response)\n", "issue": "Support telegram-bot v20+\n### Body\n\nCurrently our telegram integration uses Telegram v13 telegram-bot library. On 1st of Jan 2023 a new, backwards incompatible version of Telegram-bot has been released : https://pypi.org/project/python-telegram-bot/20.0/#history and at least as reported by MyPy and our test suite test failures, Telegram 20 needs some changes to work:\r\n\r\nHere is a transition guide that might be helpful. \r\n\r\nTransition guide is here: https://github.com/python-telegram-bot/python-telegram-bot/wiki/Transition-guide-to-Version-20.0\r\n\r\nIn the meantime we limit telegram to < 20.0.0\n\n### Committer\n\n- [X] I acknowledge that I am a maintainer/committer of the Apache Airflow project.\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Hook for Telegram\"\"\"\nfrom __future__ import annotations\n\nimport telegram\nimport tenacity\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.base import BaseHook\n\n\nclass TelegramHook(BaseHook):\n \"\"\"\n This hook allows you to post messages to Telegram using the telegram python-telegram-bot library.\n\n The library can be found here: https://github.com/python-telegram-bot/python-telegram-bot\n It accepts both telegram bot API token directly or connection that has telegram bot API token.\n If both supplied, token parameter will be given precedence, otherwise 'password' field in the connection\n from telegram_conn_id will be used.\n chat_id can also be provided in the connection using 'host' field in connection.\n Following is the details of a telegram_connection:\n name: 'telegram-connection-name'\n conn_type: 'http'\n password: 'TELEGRAM_TOKEN'\n host: 'chat_id' (optional)\n Examples:\n .. code-block:: python\n\n # Create hook\n telegram_hook = TelegramHook(telegram_conn_id=\"telegram_default\")\n # or telegram_hook = TelegramHook(telegram_conn_id='telegram_default', chat_id='-1xxx')\n # or telegram_hook = TelegramHook(token='xxx:xxx', chat_id='-1xxx')\n\n # Call method from telegram bot client\n telegram_hook.send_message(None, {\"text\": \"message\", \"chat_id\": \"-1xxx\"})\n # or telegram_hook.send_message(None', {\"text\": \"message\"})\n\n :param telegram_conn_id: connection that optionally has Telegram API token in the password field\n :param token: optional telegram API token\n :param chat_id: optional chat_id of the telegram chat/channel/group\n \"\"\"\n\n def __init__(\n self,\n telegram_conn_id: str | None = None,\n token: str | None = None,\n chat_id: str | None = None,\n ) -> None:\n super().__init__()\n self.token = self.__get_token(token, telegram_conn_id)\n self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id)\n self.connection = self.get_conn()\n\n def get_conn(self) -> telegram.bot.Bot:\n \"\"\"\n Returns the telegram bot client\n\n :return: telegram bot client\n \"\"\"\n return telegram.bot.Bot(token=self.token)\n\n def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str:\n \"\"\"\n Returns the telegram API token\n\n :param token: telegram API token\n :param telegram_conn_id: telegram connection name\n :return: telegram API token\n \"\"\"\n if token is not None:\n return token\n\n if telegram_conn_id is not None:\n conn = self.get_connection(telegram_conn_id)\n\n if not conn.password:\n raise AirflowException(\"Missing token(password) in Telegram connection\")\n\n return conn.password\n\n raise AirflowException(\"Cannot get token: No valid Telegram connection supplied.\")\n\n def __get_chat_id(self, chat_id: str | None, telegram_conn_id: str | None) -> str | None:\n \"\"\"\n Returns the telegram chat ID for a chat/channel/group\n\n :param chat_id: optional chat ID\n :param telegram_conn_id: telegram connection name\n :return: telegram chat ID\n \"\"\"\n if chat_id is not None:\n return chat_id\n\n if telegram_conn_id is not None:\n conn = self.get_connection(telegram_conn_id)\n return conn.host\n\n return None\n\n @tenacity.retry(\n retry=tenacity.retry_if_exception_type(telegram.error.TelegramError),\n stop=tenacity.stop_after_attempt(5),\n wait=tenacity.wait_fixed(1),\n )\n def send_message(self, api_params: dict) -> None:\n \"\"\"\n Sends the message to a telegram channel or chat.\n\n :param api_params: params for telegram_instance.send_message. It can also be used to override chat_id\n \"\"\"\n kwargs = {\n \"chat_id\": self.chat_id,\n \"parse_mode\": telegram.parsemode.ParseMode.HTML,\n \"disable_web_page_preview\": True,\n }\n kwargs.update(api_params)\n\n if \"text\" not in kwargs or kwargs[\"text\"] is None:\n raise AirflowException(\"'text' must be provided for telegram message\")\n\n if kwargs[\"chat_id\"] is None:\n raise AirflowException(\"'chat_id' must be provided for telegram message\")\n\n response = self.connection.send_message(**kwargs)\n self.log.debug(response)\n", "path": "airflow/providers/telegram/hooks/telegram.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Hook for Telegram\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\n\nimport telegram\nimport tenacity\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.base import BaseHook\n\n\nclass TelegramHook(BaseHook):\n \"\"\"\n This hook allows you to post messages to Telegram using the telegram python-telegram-bot library.\n\n The library can be found here: https://github.com/python-telegram-bot/python-telegram-bot\n It accepts both telegram bot API token directly or connection that has telegram bot API token.\n If both supplied, token parameter will be given precedence, otherwise 'password' field in the connection\n from telegram_conn_id will be used.\n chat_id can also be provided in the connection using 'host' field in connection.\n Following is the details of a telegram_connection:\n name: 'telegram-connection-name'\n conn_type: 'http'\n password: 'TELEGRAM_TOKEN'\n host: 'chat_id' (optional)\n Examples:\n .. code-block:: python\n\n # Create hook\n telegram_hook = TelegramHook(telegram_conn_id=\"telegram_default\")\n # or telegram_hook = TelegramHook(telegram_conn_id='telegram_default', chat_id='-1xxx')\n # or telegram_hook = TelegramHook(token='xxx:xxx', chat_id='-1xxx')\n\n # Call method from telegram bot client\n telegram_hook.send_message(None, {\"text\": \"message\", \"chat_id\": \"-1xxx\"})\n # or telegram_hook.send_message(None', {\"text\": \"message\"})\n\n :param telegram_conn_id: connection that optionally has Telegram API token in the password field\n :param token: optional telegram API token\n :param chat_id: optional chat_id of the telegram chat/channel/group\n \"\"\"\n\n def __init__(\n self,\n telegram_conn_id: str | None = None,\n token: str | None = None,\n chat_id: str | None = None,\n ) -> None:\n super().__init__()\n self.token = self.__get_token(token, telegram_conn_id)\n self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id)\n self.connection = self.get_conn()\n\n def get_conn(self) -> telegram.Bot:\n \"\"\"\n Returns the telegram bot client\n\n :return: telegram bot client\n \"\"\"\n return telegram.Bot(self.token)\n\n def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str:\n \"\"\"\n Returns the telegram API token\n\n :param token: telegram API token\n :param telegram_conn_id: telegram connection name\n :return: telegram API token\n \"\"\"\n if token is not None:\n return token\n\n if telegram_conn_id is not None:\n conn = self.get_connection(telegram_conn_id)\n\n if not conn.password:\n raise AirflowException(\"Missing token(password) in Telegram connection\")\n\n return conn.password\n\n raise AirflowException(\"Cannot get token: No valid Telegram connection supplied.\")\n\n def __get_chat_id(self, chat_id: str | None, telegram_conn_id: str | None) -> str | None:\n \"\"\"\n Returns the telegram chat ID for a chat/channel/group\n\n :param chat_id: optional chat ID\n :param telegram_conn_id: telegram connection name\n :return: telegram chat ID\n \"\"\"\n if chat_id is not None:\n return chat_id\n\n if telegram_conn_id is not None:\n conn = self.get_connection(telegram_conn_id)\n return conn.host\n\n return None\n\n @tenacity.retry(\n retry=tenacity.retry_if_exception_type(telegram.error.TelegramError),\n stop=tenacity.stop_after_attempt(5),\n wait=tenacity.wait_fixed(1),\n )\n def send_message(self, api_params: dict) -> None:\n \"\"\"\n Sends the message to a telegram channel or chat.\n\n :param api_params: params for telegram_instance.send_message. It can also be used to override chat_id\n \"\"\"\n kwargs = {\n \"chat_id\": self.chat_id,\n \"parse_mode\": telegram.constants.ParseMode.HTML,\n \"disable_web_page_preview\": True,\n }\n kwargs.update(api_params)\n\n if \"text\" not in kwargs or kwargs[\"text\"] is None:\n raise AirflowException(\"'text' must be provided for telegram message\")\n\n if kwargs[\"chat_id\"] is None:\n raise AirflowException(\"'chat_id' must be provided for telegram message\")\n\n response = asyncio.run(self.connection.send_message(**kwargs))\n self.log.debug(response)\n", "path": "airflow/providers/telegram/hooks/telegram.py"}]} | 1,898 | 357 |
gh_patches_debug_9859 | rasdani/github-patches | git_diff | aimhubio__aim-3112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[fix] Serve files linked into the static files directory
In certain conda environments, the files in the static files root directory can be symlinked from a different place. The fixed implementation only resolves relative .. segments in the request path without resolving any symlinks. This way, it still prevents reading arbitrary files through the web server while allowing the reading of symlinked files.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aim/web/api/views.py`
Content:
```
1 import os
2 from pathlib import Path
3
4 from fastapi import HTTPException, Request
5 from fastapi.responses import FileResponse, HTMLResponse
6
7 from aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter
8 from aim.web.configs import AIM_UI_BASE_PATH
9
10 statics_router = APIRouter()
11
12
13 @statics_router.get('/static-files/{path:path}/')
14 async def serve_static_files(path):
15 import aim_ui
16
17 static_files_root = Path(aim_ui.__file__).parent / 'build'
18 # Normalize to resolve any .. segments
19 static_file_name = os.path.normpath(static_files_root / path)
20
21 # Ensure that no paths outside the root directory are accessed by checking that the
22 # root directory is a prefix of the file path
23 common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))
24 if common_prefix == static_files_root:
25 raise HTTPException(status_code=404)
26
27 compressed_file_name = Path(f'{static_file_name}.gz')
28 if compressed_file_name.exists():
29 return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})
30 return FileResponse(static_file_name)
31
32
33 # do not change the placement of this method
34 # as it also serves as a fallback for wrong url routes
35 @statics_router.get('/{path:path}/', response_class=HTMLResponse)
36 async def serve_index_html(request: Request):
37 import aim_ui
38 from jinja2 import Environment, FileSystemLoader
39
40 template_files_dir = os.path.join(os.path.dirname(aim_ui.__file__), 'build')
41 env = Environment(
42 loader=FileSystemLoader(template_files_dir),
43 autoescape=True
44 )
45 template = env.get_template('index-template.html')
46 base_path = os.environ.get(AIM_UI_BASE_PATH, '')
47 return template.render(base_path=base_path)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aim/web/api/views.py b/aim/web/api/views.py
--- a/aim/web/api/views.py
+++ b/aim/web/api/views.py
@@ -21,7 +21,7 @@
# Ensure that no paths outside the root directory are accessed by checking that the
# root directory is a prefix of the file path
common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))
- if common_prefix == static_files_root:
+ if common_prefix != static_files_root:
raise HTTPException(status_code=404)
compressed_file_name = Path(f'{static_file_name}.gz')
| {"golden_diff": "diff --git a/aim/web/api/views.py b/aim/web/api/views.py\n--- a/aim/web/api/views.py\n+++ b/aim/web/api/views.py\n@@ -21,7 +21,7 @@\n # Ensure that no paths outside the root directory are accessed by checking that the\n # root directory is a prefix of the file path\n common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))\n- if common_prefix == static_files_root:\n+ if common_prefix != static_files_root:\n raise HTTPException(status_code=404)\n \n compressed_file_name = Path(f'{static_file_name}.gz')\n", "issue": "[fix] Serve files linked into the static files directory\nIn certain conda environments, the files in the static files root directory can be symlinked from a different place. The fixed implementation only resolves relative .. segments in the request path without resolving any symlinks. This way, it still prevents reading arbitrary files through the web server while allowing the reading of symlinked files.\n", "before_files": [{"content": "import os\nfrom pathlib import Path\n\nfrom fastapi import HTTPException, Request\nfrom fastapi.responses import FileResponse, HTMLResponse\n\nfrom aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter\nfrom aim.web.configs import AIM_UI_BASE_PATH\n\nstatics_router = APIRouter()\n\n\n@statics_router.get('/static-files/{path:path}/')\nasync def serve_static_files(path):\n import aim_ui\n\n static_files_root = Path(aim_ui.__file__).parent / 'build'\n # Normalize to resolve any .. segments\n static_file_name = os.path.normpath(static_files_root / path)\n\n # Ensure that no paths outside the root directory are accessed by checking that the\n # root directory is a prefix of the file path\n common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))\n if common_prefix == static_files_root:\n raise HTTPException(status_code=404)\n\n compressed_file_name = Path(f'{static_file_name}.gz')\n if compressed_file_name.exists():\n return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})\n return FileResponse(static_file_name)\n\n\n# do not change the placement of this method\n# as it also serves as a fallback for wrong url routes\n@statics_router.get('/{path:path}/', response_class=HTMLResponse)\nasync def serve_index_html(request: Request):\n import aim_ui\n from jinja2 import Environment, FileSystemLoader\n\n template_files_dir = os.path.join(os.path.dirname(aim_ui.__file__), 'build')\n env = Environment(\n loader=FileSystemLoader(template_files_dir),\n autoescape=True\n )\n template = env.get_template('index-template.html')\n base_path = os.environ.get(AIM_UI_BASE_PATH, '')\n return template.render(base_path=base_path)\n", "path": "aim/web/api/views.py"}], "after_files": [{"content": "import os\nfrom pathlib import Path\n\nfrom fastapi import HTTPException, Request\nfrom fastapi.responses import FileResponse, HTMLResponse\n\nfrom aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter\nfrom aim.web.configs import AIM_UI_BASE_PATH\n\nstatics_router = APIRouter()\n\n\n@statics_router.get('/static-files/{path:path}/')\nasync def serve_static_files(path):\n import aim_ui\n\n static_files_root = Path(aim_ui.__file__).parent / 'build'\n # Normalize to resolve any .. segments\n static_file_name = os.path.normpath(static_files_root / path)\n\n # Ensure that no paths outside the root directory are accessed by checking that the\n # root directory is a prefix of the file path\n common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))\n if common_prefix != static_files_root:\n raise HTTPException(status_code=404)\n\n compressed_file_name = Path(f'{static_file_name}.gz')\n if compressed_file_name.exists():\n return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})\n return FileResponse(static_file_name)\n\n\n# do not change the placement of this method\n# as it also serves as a fallback for wrong url routes\n@statics_router.get('/{path:path}/', response_class=HTMLResponse)\nasync def serve_index_html(request: Request):\n import aim_ui\n from jinja2 import Environment, FileSystemLoader\n\n template_files_dir = os.path.join(os.path.dirname(aim_ui.__file__), 'build')\n env = Environment(\n loader=FileSystemLoader(template_files_dir),\n autoescape=True\n )\n template = env.get_template('index-template.html')\n base_path = os.environ.get(AIM_UI_BASE_PATH, '')\n return template.render(base_path=base_path)\n", "path": "aim/web/api/views.py"}]} | 816 | 141 |
gh_patches_debug_2972 | rasdani/github-patches | git_diff | pyodide__pyodide-325 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: invalid __array_struct__ when using js arrays of arrays and numpy
When using a matrix (array of array of numbers) in javascript and trying to convert that to a numpy array, it fails with the error `ValueError: invalid __array_struct__`
To reproduce:
JavaScript:
```
window.A = [[1,2,3],[4,5,6]];
```
Python:
```
import numpy
from js import A
m = numpy.array(A)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyodide.py`
Content:
```
1 """
2 A library of helper utilities for connecting Python to the browser environment.
3 """
4
5 import ast
6 import io
7 from textwrap import dedent
8
9 __version__ = '0.8.2'
10
11
12 def open_url(url):
13 """
14 Fetches a given *url* and returns a io.StringIO to access its contents.
15 """
16 from js import XMLHttpRequest
17
18 req = XMLHttpRequest.new()
19 req.open('GET', url, False)
20 req.send(None)
21 return io.StringIO(req.response)
22
23
24 def eval_code(code, ns):
25 """
26 Runs a string of code, the last part of which may be an expression.
27 """
28 # handle mis-indented input from multi-line strings
29 code = dedent(code)
30
31 mod = ast.parse(code)
32 if len(mod.body) == 0:
33 return None
34
35 if isinstance(mod.body[-1], ast.Expr):
36 expr = ast.Expression(mod.body[-1].value)
37 del mod.body[-1]
38 else:
39 expr = None
40
41 if len(mod.body):
42 exec(compile(mod, '<exec>', mode='exec'), ns, ns)
43 if expr is not None:
44 return eval(compile(expr, '<eval>', mode='eval'), ns, ns)
45 else:
46 return None
47
48
49 def find_imports(code):
50 """
51 Finds the imports in a string of code and returns a list of their package
52 names.
53 """
54 # handle mis-indented input from multi-line strings
55 code = dedent(code)
56
57 mod = ast.parse(code)
58 imports = set()
59 for node in ast.walk(mod):
60 if isinstance(node, ast.Import):
61 for name in node.names:
62 name = name.name
63 imports.add(name.split('.')[0])
64 elif isinstance(node, ast.ImportFrom):
65 name = node.module
66 imports.add(name.split('.')[0])
67 return list(imports)
68
69
70 __all__ = ['open_url', 'eval_code', 'find_imports']
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyodide.py b/src/pyodide.py
--- a/src/pyodide.py
+++ b/src/pyodide.py
@@ -67,4 +67,16 @@
return list(imports)
-__all__ = ['open_url', 'eval_code', 'find_imports']
+def as_nested_list(obj):
+ """
+ Assumes a Javascript object is made of (possibly nested) arrays and
+ converts them to nested Python lists.
+ """
+ try:
+ it = iter(obj)
+ return [as_nested_list(x) for x in it]
+ except TypeError:
+ return obj
+
+
+__all__ = ['open_url', 'eval_code', 'find_imports', 'as_nested_list']
| {"golden_diff": "diff --git a/src/pyodide.py b/src/pyodide.py\n--- a/src/pyodide.py\n+++ b/src/pyodide.py\n@@ -67,4 +67,16 @@\n return list(imports)\n \n \n-__all__ = ['open_url', 'eval_code', 'find_imports']\n+def as_nested_list(obj):\n+ \"\"\"\n+ Assumes a Javascript object is made of (possibly nested) arrays and\n+ converts them to nested Python lists.\n+ \"\"\"\n+ try:\n+ it = iter(obj)\n+ return [as_nested_list(x) for x in it]\n+ except TypeError:\n+ return obj\n+\n+\n+__all__ = ['open_url', 'eval_code', 'find_imports', 'as_nested_list']\n", "issue": "ValueError: invalid __array_struct__ when using js arrays of arrays and numpy\nWhen using a matrix (array of array of numbers) in javascript and trying to convert that to a numpy array, it fails with the error `ValueError: invalid __array_struct__`\r\n\r\nTo reproduce:\r\nJavaScript:\r\n```\r\nwindow.A = [[1,2,3],[4,5,6]];\r\n```\r\nPython:\r\n```\r\nimport numpy\r\nfrom js import A\r\nm = numpy.array(A)\r\n```\n", "before_files": [{"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nimport ast\nimport io\nfrom textwrap import dedent\n\n__version__ = '0.8.2'\n\n\ndef open_url(url):\n \"\"\"\n Fetches a given *url* and returns a io.StringIO to access its contents.\n \"\"\"\n from js import XMLHttpRequest\n\n req = XMLHttpRequest.new()\n req.open('GET', url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n\ndef eval_code(code, ns):\n \"\"\"\n Runs a string of code, the last part of which may be an expression.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n if len(mod.body) == 0:\n return None\n\n if isinstance(mod.body[-1], ast.Expr):\n expr = ast.Expression(mod.body[-1].value)\n del mod.body[-1]\n else:\n expr = None\n\n if len(mod.body):\n exec(compile(mod, '<exec>', mode='exec'), ns, ns)\n if expr is not None:\n return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\n else:\n return None\n\n\ndef find_imports(code):\n \"\"\"\n Finds the imports in a string of code and returns a list of their package\n names.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n imports = set()\n for node in ast.walk(mod):\n if isinstance(node, ast.Import):\n for name in node.names:\n name = name.name\n imports.add(name.split('.')[0])\n elif isinstance(node, ast.ImportFrom):\n name = node.module\n imports.add(name.split('.')[0])\n return list(imports)\n\n\n__all__ = ['open_url', 'eval_code', 'find_imports']\n", "path": "src/pyodide.py"}], "after_files": [{"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nimport ast\nimport io\nfrom textwrap import dedent\n\n__version__ = '0.8.2'\n\n\ndef open_url(url):\n \"\"\"\n Fetches a given *url* and returns a io.StringIO to access its contents.\n \"\"\"\n from js import XMLHttpRequest\n\n req = XMLHttpRequest.new()\n req.open('GET', url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n\ndef eval_code(code, ns):\n \"\"\"\n Runs a string of code, the last part of which may be an expression.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n if len(mod.body) == 0:\n return None\n\n if isinstance(mod.body[-1], ast.Expr):\n expr = ast.Expression(mod.body[-1].value)\n del mod.body[-1]\n else:\n expr = None\n\n if len(mod.body):\n exec(compile(mod, '<exec>', mode='exec'), ns, ns)\n if expr is not None:\n return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\n else:\n return None\n\n\ndef find_imports(code):\n \"\"\"\n Finds the imports in a string of code and returns a list of their package\n names.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n imports = set()\n for node in ast.walk(mod):\n if isinstance(node, ast.Import):\n for name in node.names:\n name = name.name\n imports.add(name.split('.')[0])\n elif isinstance(node, ast.ImportFrom):\n name = node.module\n imports.add(name.split('.')[0])\n return list(imports)\n\n\ndef as_nested_list(obj):\n \"\"\"\n Assumes a Javascript object is made of (possibly nested) arrays and\n converts them to nested Python lists.\n \"\"\"\n try:\n it = iter(obj)\n return [as_nested_list(x) for x in it]\n except TypeError:\n return obj\n\n\n__all__ = ['open_url', 'eval_code', 'find_imports', 'as_nested_list']\n", "path": "src/pyodide.py"}]} | 916 | 168 |
gh_patches_debug_23525 | rasdani/github-patches | git_diff | pretalx__pretalx-626 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
email sender address fallback is incorrect
## Current Behavior

the UI says emails will be sent from "[email protected]", but they are sent from "admin@localhost" (the value in the global `MAIL_FROM`).
## Expected Behavior
the sender should be "[email protected]"
## Steps to Reproduce
1. create event
2. set an orga email address `[email protected]`
3. do _not_ set a "Sender address" in mail settings, but observe the text below
4. send email
5. email is has `From: event <admin@localhost>`, not `From: event <[email protected]>`
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Version used: 5a5ab5482dd9a7a3b19b91563946c535fe5abe1e
* Environment name and version (e.g. Chrome 39, python 3.5):
* Operating System and version (desktop or mobile):
* Link to your instance, if in production:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretalx/common/mail.py`
Content:
```
1 import logging
2 from email.utils import formataddr
3 from smtplib import SMTPResponseException, SMTPSenderRefused
4 from typing import Any, Dict, Union
5
6 from django.conf import settings
7 from django.core.mail import EmailMultiAlternatives, get_connection
8 from django.core.mail.backends.smtp import EmailBackend
9 from django.utils.translation import override
10 from i18nfield.strings import LazyI18nString
11 from inlinestyler.utils import inline_css
12
13 from pretalx.celery_app import app
14 from pretalx.event.models import Event
15 from pretalx.person.models import User
16
17 logger = logging.getLogger(__name__)
18
19
20 class CustomSMTPBackend(EmailBackend):
21 def test(self, from_addr):
22 try:
23 self.open()
24 self.connection.ehlo_or_helo_if_needed()
25 (code, resp) = self.connection.mail(from_addr, [])
26 if code != 250:
27 logger.warning(
28 f'Error testing mail settings, code {code}, resp: {resp}'
29 )
30 raise SMTPSenderRefused(code, resp)
31 (code, resp) = self.connection.rcpt('[email protected]')
32 if code not in (250, 251):
33 logger.warning(
34 f'Error testing mail settings, code {code}, resp: {resp}'
35 )
36 raise SMTPSenderRefused(code, resp)
37 finally:
38 self.close()
39
40
41 class TolerantDict(dict):
42 def __missing__(self, key):
43 """Don't fail when formatting strings with a dict with missing keys."""
44 return key
45
46
47 class SendMailException(Exception):
48 pass
49
50
51 def mail(
52 user: User,
53 subject: str,
54 template: Union[str, LazyI18nString],
55 context: Dict[str, Any] = None,
56 event: Event = None,
57 locale: str = None,
58 headers: dict = None,
59 ):
60 from pretalx.mail.models import QueuedMail
61
62 headers = headers or {}
63
64 with override(locale):
65 body = str(template)
66 if context:
67 body = body.format_map(TolerantDict(context))
68 reply_to = headers.get('reply-to')
69 if reply_to and isinstance(reply_to, list):
70 reply_to = ','.join(reply_to)
71 QueuedMail(
72 event=event,
73 to=user.email,
74 subject=str(subject),
75 text=body,
76 reply_to=reply_to,
77 bcc=headers.get('bcc'),
78 ).send()
79
80
81 @app.task(bind=True)
82 def mail_send_task(
83 self,
84 to: str,
85 subject: str,
86 body: str,
87 html: str,
88 reply_to: list = None,
89 event: int = None,
90 cc: list = None,
91 bcc: list = None,
92 headers: dict = None,
93 ):
94 headers = headers or dict()
95 if event:
96 event = Event.objects.filter(id=event).first()
97 if event:
98 sender = event.settings.get('mail_from')
99 if sender == '[email protected]' or not sender:
100 sender = settings.MAIL_FROM
101 if reply_to:
102 headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to
103 backend = event.get_mail_backend()
104 sender = formataddr((str(event.name), sender))
105 else:
106 sender = formataddr(('pretalx', settings.MAIL_FROM))
107 backend = get_connection(fail_silently=False)
108
109 email = EmailMultiAlternatives(
110 subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers
111 )
112
113 if html is not None:
114 email.attach_alternative(inline_css(html), 'text/html')
115
116 try:
117 backend.send_messages([email])
118 except SMTPResponseException as exception:
119 # Retry on external problems: Connection issues (101, 111), timeouts (421), filled-up mailboxes (422),
120 # out of memory (431), network issues (442), another timeout (447), or too many mails sent (452)
121 if exception.smtp_code in (101, 111, 421, 422, 431, 442, 447, 452):
122 self.retry(max_retries=5, countdown=2 ** (self.request.retries * 2))
123 logger.exception('Error sending email')
124 raise SendMailException('Failed to send an email to {}.'.format(to))
125 except Exception:
126 logger.exception('Error sending email')
127 raise SendMailException('Failed to send an email to {}.'.format(to))
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pretalx/common/mail.py b/src/pretalx/common/mail.py
--- a/src/pretalx/common/mail.py
+++ b/src/pretalx/common/mail.py
@@ -92,14 +92,14 @@
headers: dict = None,
):
headers = headers or dict()
+ if reply_to and isinstance(reply_to, str):
+ reply_to = reply_to.split(',')
if event:
event = Event.objects.filter(id=event).first()
if event:
sender = event.settings.get('mail_from')
if sender == '[email protected]' or not sender:
- sender = settings.MAIL_FROM
- if reply_to:
- headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to
+ sender = event.email
backend = event.get_mail_backend()
sender = formataddr((str(event.name), sender))
else:
@@ -107,7 +107,7 @@
backend = get_connection(fail_silently=False)
email = EmailMultiAlternatives(
- subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers
+ subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers, reply_to=reply_to
)
if html is not None:
| {"golden_diff": "diff --git a/src/pretalx/common/mail.py b/src/pretalx/common/mail.py\n--- a/src/pretalx/common/mail.py\n+++ b/src/pretalx/common/mail.py\n@@ -92,14 +92,14 @@\n headers: dict = None,\n ):\n headers = headers or dict()\n+ if reply_to and isinstance(reply_to, str):\n+ reply_to = reply_to.split(',')\n if event:\n event = Event.objects.filter(id=event).first()\n if event:\n sender = event.settings.get('mail_from')\n if sender == '[email protected]' or not sender:\n- sender = settings.MAIL_FROM\n- if reply_to:\n- headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to\n+ sender = event.email\n backend = event.get_mail_backend()\n sender = formataddr((str(event.name), sender))\n else:\n@@ -107,7 +107,7 @@\n backend = get_connection(fail_silently=False)\n \n email = EmailMultiAlternatives(\n- subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers\n+ subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers, reply_to=reply_to\n )\n \n if html is not None:\n", "issue": "email sender address fallback is incorrect\n## Current Behavior\r\n\r\n\r\n\r\nthe UI says emails will be sent from \"[email protected]\", but they are sent from \"admin@localhost\" (the value in the global `MAIL_FROM`).\r\n\r\n## Expected Behavior\r\n\r\nthe sender should be \"[email protected]\"\r\n\r\n## Steps to Reproduce\r\n\r\n1. create event\r\n2. set an orga email address `[email protected]`\r\n3. do _not_ set a \"Sender address\" in mail settings, but observe the text below\r\n4. send email\r\n5. email is has `From: event <admin@localhost>`, not `From: event <[email protected]>`\r\n\r\n## Context\r\n\r\n<!--- How has this issue affected you? What are you trying to accomplish? -->\r\n<!--- Providing context helps us come up with a solution that is most useful in the real world -->\r\n\r\n## Your Environment\r\n\r\n<!--- Include as many relevant details about the environment you experienced the bug in -->\r\n* Version used: 5a5ab5482dd9a7a3b19b91563946c535fe5abe1e\r\n* Environment name and version (e.g. Chrome 39, python 3.5):\r\n* Operating System and version (desktop or mobile):\r\n* Link to your instance, if in production:\r\n\n", "before_files": [{"content": "import logging\nfrom email.utils import formataddr\nfrom smtplib import SMTPResponseException, SMTPSenderRefused\nfrom typing import Any, Dict, Union\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives, get_connection\nfrom django.core.mail.backends.smtp import EmailBackend\nfrom django.utils.translation import override\nfrom i18nfield.strings import LazyI18nString\nfrom inlinestyler.utils import inline_css\n\nfrom pretalx.celery_app import app\nfrom pretalx.event.models import Event\nfrom pretalx.person.models import User\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomSMTPBackend(EmailBackend):\n def test(self, from_addr):\n try:\n self.open()\n self.connection.ehlo_or_helo_if_needed()\n (code, resp) = self.connection.mail(from_addr, [])\n if code != 250:\n logger.warning(\n f'Error testing mail settings, code {code}, resp: {resp}'\n )\n raise SMTPSenderRefused(code, resp)\n (code, resp) = self.connection.rcpt('[email protected]')\n if code not in (250, 251):\n logger.warning(\n f'Error testing mail settings, code {code}, resp: {resp}'\n )\n raise SMTPSenderRefused(code, resp)\n finally:\n self.close()\n\n\nclass TolerantDict(dict):\n def __missing__(self, key):\n \"\"\"Don't fail when formatting strings with a dict with missing keys.\"\"\"\n return key\n\n\nclass SendMailException(Exception):\n pass\n\n\ndef mail(\n user: User,\n subject: str,\n template: Union[str, LazyI18nString],\n context: Dict[str, Any] = None,\n event: Event = None,\n locale: str = None,\n headers: dict = None,\n):\n from pretalx.mail.models import QueuedMail\n\n headers = headers or {}\n\n with override(locale):\n body = str(template)\n if context:\n body = body.format_map(TolerantDict(context))\n reply_to = headers.get('reply-to')\n if reply_to and isinstance(reply_to, list):\n reply_to = ','.join(reply_to)\n QueuedMail(\n event=event,\n to=user.email,\n subject=str(subject),\n text=body,\n reply_to=reply_to,\n bcc=headers.get('bcc'),\n ).send()\n\n\[email protected](bind=True)\ndef mail_send_task(\n self,\n to: str,\n subject: str,\n body: str,\n html: str,\n reply_to: list = None,\n event: int = None,\n cc: list = None,\n bcc: list = None,\n headers: dict = None,\n):\n headers = headers or dict()\n if event:\n event = Event.objects.filter(id=event).first()\n if event:\n sender = event.settings.get('mail_from')\n if sender == '[email protected]' or not sender:\n sender = settings.MAIL_FROM\n if reply_to:\n headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to\n backend = event.get_mail_backend()\n sender = formataddr((str(event.name), sender))\n else:\n sender = formataddr(('pretalx', settings.MAIL_FROM))\n backend = get_connection(fail_silently=False)\n\n email = EmailMultiAlternatives(\n subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers\n )\n\n if html is not None:\n email.attach_alternative(inline_css(html), 'text/html')\n\n try:\n backend.send_messages([email])\n except SMTPResponseException as exception:\n # Retry on external problems: Connection issues (101, 111), timeouts (421), filled-up mailboxes (422),\n # out of memory (431), network issues (442), another timeout (447), or too many mails sent (452)\n if exception.smtp_code in (101, 111, 421, 422, 431, 442, 447, 452):\n self.retry(max_retries=5, countdown=2 ** (self.request.retries * 2))\n logger.exception('Error sending email')\n raise SendMailException('Failed to send an email to {}.'.format(to))\n except Exception:\n logger.exception('Error sending email')\n raise SendMailException('Failed to send an email to {}.'.format(to))\n", "path": "src/pretalx/common/mail.py"}], "after_files": [{"content": "import logging\nfrom email.utils import formataddr\nfrom smtplib import SMTPResponseException, SMTPSenderRefused\nfrom typing import Any, Dict, Union\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives, get_connection\nfrom django.core.mail.backends.smtp import EmailBackend\nfrom django.utils.translation import override\nfrom i18nfield.strings import LazyI18nString\nfrom inlinestyler.utils import inline_css\n\nfrom pretalx.celery_app import app\nfrom pretalx.event.models import Event\nfrom pretalx.person.models import User\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomSMTPBackend(EmailBackend):\n def test(self, from_addr):\n try:\n self.open()\n self.connection.ehlo_or_helo_if_needed()\n (code, resp) = self.connection.mail(from_addr, [])\n if code != 250:\n logger.warning(\n f'Error testing mail settings, code {code}, resp: {resp}'\n )\n raise SMTPSenderRefused(code, resp)\n (code, resp) = self.connection.rcpt('[email protected]')\n if code not in (250, 251):\n logger.warning(\n f'Error testing mail settings, code {code}, resp: {resp}'\n )\n raise SMTPSenderRefused(code, resp)\n finally:\n self.close()\n\n\nclass TolerantDict(dict):\n def __missing__(self, key):\n \"\"\"Don't fail when formatting strings with a dict with missing keys.\"\"\"\n return key\n\n\nclass SendMailException(Exception):\n pass\n\n\ndef mail(\n user: User,\n subject: str,\n template: Union[str, LazyI18nString],\n context: Dict[str, Any] = None,\n event: Event = None,\n locale: str = None,\n headers: dict = None,\n):\n from pretalx.mail.models import QueuedMail\n\n headers = headers or {}\n\n with override(locale):\n body = str(template)\n if context:\n body = body.format_map(TolerantDict(context))\n reply_to = headers.get('reply-to')\n if reply_to and isinstance(reply_to, list):\n reply_to = ','.join(reply_to)\n QueuedMail(\n event=event,\n to=user.email,\n subject=str(subject),\n text=body,\n reply_to=reply_to,\n bcc=headers.get('bcc'),\n ).send()\n\n\[email protected](bind=True)\ndef mail_send_task(\n self,\n to: str,\n subject: str,\n body: str,\n html: str,\n reply_to: list = None,\n event: int = None,\n cc: list = None,\n bcc: list = None,\n headers: dict = None,\n):\n headers = headers or dict()\n if reply_to and isinstance(reply_to, str):\n reply_to = reply_to.split(',')\n if event:\n event = Event.objects.filter(id=event).first()\n if event:\n sender = event.settings.get('mail_from')\n if sender == '[email protected]' or not sender:\n sender = event.email\n backend = event.get_mail_backend()\n sender = formataddr((str(event.name), sender))\n else:\n sender = formataddr(('pretalx', settings.MAIL_FROM))\n backend = get_connection(fail_silently=False)\n\n email = EmailMultiAlternatives(\n subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers, reply_to=reply_to\n )\n\n if html is not None:\n email.attach_alternative(inline_css(html), 'text/html')\n\n try:\n backend.send_messages([email])\n except SMTPResponseException as exception:\n # Retry on external problems: Connection issues (101, 111), timeouts (421), filled-up mailboxes (422),\n # out of memory (431), network issues (442), another timeout (447), or too many mails sent (452)\n if exception.smtp_code in (101, 111, 421, 422, 431, 442, 447, 452):\n self.retry(max_retries=5, countdown=2 ** (self.request.retries * 2))\n logger.exception('Error sending email')\n raise SendMailException('Failed to send an email to {}.'.format(to))\n except Exception:\n logger.exception('Error sending email')\n raise SendMailException('Failed to send an email to {}.'.format(to))\n", "path": "src/pretalx/common/mail.py"}]} | 1,883 | 304 |
gh_patches_debug_25799 | rasdani/github-patches | git_diff | mlflow__mlflow-5121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Service metrics endpoint excludes many important routes
### Willingness to contribute
The MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?
- [x] Yes. I can contribute a fix for this bug independently.
- [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.
- [ ] No. I cannot contribute a bug fix at this time.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: no
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Debian GNU/Linux 11
- **MLflow installed from (source or binary)**: binary
- **MLflow version (run ``mlflow --version``)**: 1.21.0
- **Python version**: 3.8
- **npm version, if running the dev UI**:
- **Exact command to reproduce**: see section below
### Describe the problem
The mlflow server option to expose a Prometheus metrics endpoint is a great observability feature for MLflow. Unfortunately, the current implementation leaves an incomplete view of the server health/performance. Currently, mlflow only logs metrics to a [subset of endpoints](https://github.com/mlflow/mlflow/blob/master/mlflow/server/prometheus_exporter.py#L18).
As of MLflow version 1.21, the following routes are not being included in the service metrics:
```
['static', '_get_experiment_by_name', '_create_experiment', '_list_experiments', '_get_experiment',
'_delete_experiment', '_restore_experiment', '_update_experiment', '_update_run', '_delete_run',
'_restore_run', '_set_experiment_tag', '_delete_tag', '_get_run', '_list_artifacts', '_get_metric_history',
'_log_batch', '_log_model', '_create_registered_model', '_rename_registered_model',
'_update_registered_model', '_delete_registered_model', '_get_registered_model', '_search_registered_models',
'_list_registered_models', '_get_latest_versions', '_create_model_version', '_update_model_version',
'_transition_stage', '_delete_model_version', '_get_model_version', '_search_model_versions',
'_get_model_version_download_uri', '_set_registered_model_tag', '_set_model_version_tag',
'_delete_registered_model_tag', '_delete_model_version_tag', 'health', 'serve_artifacts',
'serve_model_version_artifact', 'serve_static_file', 'serve']
```
(see full list of endpoints)
```
from mlflow.server import app
app.view_functions.keys()
```
Filtering the set of routes to be included in the metrics endpoint seems like a potentially fragile approach as new routes are added in later versions of mlflow. It's especially problematic that the list of filtered routes cannot be configured. We currently have no way to monitor the health of the overall service given that many key routes (e.g. `log_batch`) are not included in the service metrics.
### Code to reproduce issue
Dockerfile for mlflow server
```
FROM python:3.8
RUN pip install mlflow==1.21.0
ENTRYPOINT mlflow server \
--backend-store-uri sqlite:///mlflow.sqlite \
--default-artifact-root file:///artifacts \
--host 0.0.0.0 \
--port 5000 \
--expose-prometheus /prometheus
```
Build and run the Docker container
```
docker build -t mlflow_example -f Dockerfile .
docker run -p 5000:5000 mlflow_example
```
Script with incomplete representation in metrics endpoint
```
import mlflow
import random
mlflow.set_tracking_uri("http://127.0.0.1:5000")
mlflow.set_experiment("service_metrics")
with mlflow.start_run(run_name="test"):
for _ in range(100):
mlflow.log_metrics({
'loss_a': random.random(),
'loss_b': random.random(),
'loss_c': random.random(),
})
mlflow.log_params({'a': 1, 'b': 2, 'c': 3})
```
See how metrics for these endpoints **_do not_** appear at http://127.0.0.1:5000/metrics
---
Script with expected representation in metrics endpoint
```
import mlflow
import random
mlflow.set_tracking_uri("http://127.0.0.1:5000")
mlflow.set_experiment("service_metrics")
with mlflow.start_run(run_name="test"):
for _ in range(100):
mlflow.log_metric('loss', random.random())
mlflow.log_param('param', 'test')
```
See how metrics for these endpoints appear at http://127.0.0.1:5000/metrics
### Other info / logs
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.
### What component(s), interfaces, languages, and integrations does this bug affect?
Components
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [x] `area/server-infra`: MLflow Tracking server backend
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/server/prometheus_exporter.py`
Content:
```
1 from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics
2 from flask import request
3
4
5 def activate_prometheus_exporter(app):
6 metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False)
7
8 endpoint = app.view_functions
9 histogram = metrics.histogram(
10 "mlflow_requests_by_status_and_path",
11 "Request latencies and count by status and path",
12 labels={
13 "status": lambda r: r.status_code,
14 "path": lambda: change_path_for_metric(request.path),
15 },
16 )
17 for func_name, func in endpoint.items():
18 if func_name in ["_search_runs", "_log_metric", "_log_param", "_set_tag", "_create_run"]:
19 app.view_functions[func_name] = histogram(func)
20
21 return app
22
23
24 def change_path_for_metric(path):
25 """
26 Replace the '/' in the metric path by '_' so grafana can correctly use it.
27 :param path: path of the metric (example: runs/search)
28 :return: path with '_' instead of '/'
29 """
30 if "mlflow/" in path:
31 path = path.split("mlflow/")[-1]
32 return path.replace("/", "_")
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/server/prometheus_exporter.py b/mlflow/server/prometheus_exporter.py
--- a/mlflow/server/prometheus_exporter.py
+++ b/mlflow/server/prometheus_exporter.py
@@ -1,32 +1,19 @@
from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics
from flask import request
+from mlflow.version import VERSION
+
def activate_prometheus_exporter(app):
- metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False)
+ def mlflow_version(_: request):
+ return VERSION
- endpoint = app.view_functions
- histogram = metrics.histogram(
- "mlflow_requests_by_status_and_path",
- "Request latencies and count by status and path",
- labels={
- "status": lambda r: r.status_code,
- "path": lambda: change_path_for_metric(request.path),
- },
+ metrics = GunicornInternalPrometheusMetrics(
+ app,
+ export_defaults=True,
+ defaults_prefix="mlflow",
+ excluded_paths=["/health"],
+ group_by=mlflow_version,
)
- for func_name, func in endpoint.items():
- if func_name in ["_search_runs", "_log_metric", "_log_param", "_set_tag", "_create_run"]:
- app.view_functions[func_name] = histogram(func)
-
- return app
-
-def change_path_for_metric(path):
- """
- Replace the '/' in the metric path by '_' so grafana can correctly use it.
- :param path: path of the metric (example: runs/search)
- :return: path with '_' instead of '/'
- """
- if "mlflow/" in path:
- path = path.split("mlflow/")[-1]
- return path.replace("/", "_")
+ return metrics
| {"golden_diff": "diff --git a/mlflow/server/prometheus_exporter.py b/mlflow/server/prometheus_exporter.py\n--- a/mlflow/server/prometheus_exporter.py\n+++ b/mlflow/server/prometheus_exporter.py\n@@ -1,32 +1,19 @@\n from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics\r\n from flask import request\r\n \r\n+from mlflow.version import VERSION\r\n+\r\n \r\n def activate_prometheus_exporter(app):\r\n- metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False)\r\n+ def mlflow_version(_: request):\r\n+ return VERSION\r\n \r\n- endpoint = app.view_functions\r\n- histogram = metrics.histogram(\r\n- \"mlflow_requests_by_status_and_path\",\r\n- \"Request latencies and count by status and path\",\r\n- labels={\r\n- \"status\": lambda r: r.status_code,\r\n- \"path\": lambda: change_path_for_metric(request.path),\r\n- },\r\n+ metrics = GunicornInternalPrometheusMetrics(\r\n+ app,\r\n+ export_defaults=True,\r\n+ defaults_prefix=\"mlflow\",\r\n+ excluded_paths=[\"/health\"],\r\n+ group_by=mlflow_version,\r\n )\r\n- for func_name, func in endpoint.items():\r\n- if func_name in [\"_search_runs\", \"_log_metric\", \"_log_param\", \"_set_tag\", \"_create_run\"]:\r\n- app.view_functions[func_name] = histogram(func)\r\n-\r\n- return app\r\n-\r\n \r\n-def change_path_for_metric(path):\r\n- \"\"\"\r\n- Replace the '/' in the metric path by '_' so grafana can correctly use it.\r\n- :param path: path of the metric (example: runs/search)\r\n- :return: path with '_' instead of '/'\r\n- \"\"\"\r\n- if \"mlflow/\" in path:\r\n- path = path.split(\"mlflow/\")[-1]\r\n- return path.replace(\"/\", \"_\")\r\n+ return metrics\n", "issue": "[BUG] Service metrics endpoint excludes many important routes\n### Willingness to contribute\r\nThe MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?\r\n\r\n- [x] Yes. I can contribute a fix for this bug independently.\r\n- [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.\r\n- [ ] No. I cannot contribute a bug fix at this time.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: no\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Debian GNU/Linux 11\r\n- **MLflow installed from (source or binary)**: binary\r\n- **MLflow version (run ``mlflow --version``)**: 1.21.0\r\n- **Python version**: 3.8\r\n- **npm version, if running the dev UI**:\r\n- **Exact command to reproduce**: see section below\r\n\r\n### Describe the problem\r\n\r\nThe mlflow server option to expose a Prometheus metrics endpoint is a great observability feature for MLflow. Unfortunately, the current implementation leaves an incomplete view of the server health/performance. Currently, mlflow only logs metrics to a [subset of endpoints](https://github.com/mlflow/mlflow/blob/master/mlflow/server/prometheus_exporter.py#L18).\r\n\r\nAs of MLflow version 1.21, the following routes are not being included in the service metrics:\r\n\r\n```\r\n['static', '_get_experiment_by_name', '_create_experiment', '_list_experiments', '_get_experiment',\r\n'_delete_experiment', '_restore_experiment', '_update_experiment', '_update_run', '_delete_run', \r\n'_restore_run', '_set_experiment_tag', '_delete_tag', '_get_run', '_list_artifacts', '_get_metric_history',\r\n'_log_batch', '_log_model', '_create_registered_model', '_rename_registered_model', \r\n'_update_registered_model', '_delete_registered_model', '_get_registered_model', '_search_registered_models', \r\n'_list_registered_models', '_get_latest_versions', '_create_model_version', '_update_model_version', \r\n'_transition_stage', '_delete_model_version', '_get_model_version', '_search_model_versions', \r\n'_get_model_version_download_uri', '_set_registered_model_tag', '_set_model_version_tag', \r\n'_delete_registered_model_tag', '_delete_model_version_tag', 'health', 'serve_artifacts', \r\n'serve_model_version_artifact', 'serve_static_file', 'serve']\r\n```\r\n\r\n(see full list of endpoints)\r\n```\r\nfrom mlflow.server import app\r\n\r\napp.view_functions.keys()\r\n```\r\n\r\nFiltering the set of routes to be included in the metrics endpoint seems like a potentially fragile approach as new routes are added in later versions of mlflow. It's especially problematic that the list of filtered routes cannot be configured. We currently have no way to monitor the health of the overall service given that many key routes (e.g. `log_batch`) are not included in the service metrics.\r\n\r\n### Code to reproduce issue\r\n\r\nDockerfile for mlflow server\r\n```\r\nFROM python:3.8\r\nRUN pip install mlflow==1.21.0\r\n\r\nENTRYPOINT mlflow server \\\r\n --backend-store-uri sqlite:///mlflow.sqlite \\\r\n --default-artifact-root file:///artifacts \\\r\n --host 0.0.0.0 \\\r\n --port 5000 \\\r\n --expose-prometheus /prometheus\r\n```\r\n\r\nBuild and run the Docker container\r\n```\r\ndocker build -t mlflow_example -f Dockerfile .\r\ndocker run -p 5000:5000 mlflow_example\r\n```\r\n\r\n\r\nScript with incomplete representation in metrics endpoint\r\n```\r\nimport mlflow\r\nimport random\r\n\r\nmlflow.set_tracking_uri(\"http://127.0.0.1:5000\")\r\nmlflow.set_experiment(\"service_metrics\")\r\n\r\nwith mlflow.start_run(run_name=\"test\"):\r\n\r\n for _ in range(100):\r\n mlflow.log_metrics({\r\n 'loss_a': random.random(),\r\n 'loss_b': random.random(),\r\n 'loss_c': random.random(),\r\n })\r\n\r\n mlflow.log_params({'a': 1, 'b': 2, 'c': 3})\r\n```\r\nSee how metrics for these endpoints **_do not_** appear at http://127.0.0.1:5000/metrics\r\n\r\n---\r\n\r\nScript with expected representation in metrics endpoint\r\n```\r\nimport mlflow\r\nimport random\r\n\r\nmlflow.set_tracking_uri(\"http://127.0.0.1:5000\")\r\nmlflow.set_experiment(\"service_metrics\")\r\n\r\nwith mlflow.start_run(run_name=\"test\"):\r\n for _ in range(100):\r\n mlflow.log_metric('loss', random.random())\r\n\r\n mlflow.log_param('param', 'test')\r\n```\r\nSee how metrics for these endpoints appear at http://127.0.0.1:5000/metrics\r\n\r\n### Other info / logs\r\nInclude any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.\r\n\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [x] `area/server-infra`: MLflow Tracking server backend\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\n", "before_files": [{"content": "from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics\r\nfrom flask import request\r\n\r\n\r\ndef activate_prometheus_exporter(app):\r\n metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False)\r\n\r\n endpoint = app.view_functions\r\n histogram = metrics.histogram(\r\n \"mlflow_requests_by_status_and_path\",\r\n \"Request latencies and count by status and path\",\r\n labels={\r\n \"status\": lambda r: r.status_code,\r\n \"path\": lambda: change_path_for_metric(request.path),\r\n },\r\n )\r\n for func_name, func in endpoint.items():\r\n if func_name in [\"_search_runs\", \"_log_metric\", \"_log_param\", \"_set_tag\", \"_create_run\"]:\r\n app.view_functions[func_name] = histogram(func)\r\n\r\n return app\r\n\r\n\r\ndef change_path_for_metric(path):\r\n \"\"\"\r\n Replace the '/' in the metric path by '_' so grafana can correctly use it.\r\n :param path: path of the metric (example: runs/search)\r\n :return: path with '_' instead of '/'\r\n \"\"\"\r\n if \"mlflow/\" in path:\r\n path = path.split(\"mlflow/\")[-1]\r\n return path.replace(\"/\", \"_\")\r\n", "path": "mlflow/server/prometheus_exporter.py"}], "after_files": [{"content": "from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics\r\nfrom flask import request\r\n\r\nfrom mlflow.version import VERSION\r\n\r\n\r\ndef activate_prometheus_exporter(app):\r\n def mlflow_version(_: request):\r\n return VERSION\r\n\r\n metrics = GunicornInternalPrometheusMetrics(\r\n app,\r\n export_defaults=True,\r\n defaults_prefix=\"mlflow\",\r\n excluded_paths=[\"/health\"],\r\n group_by=mlflow_version,\r\n )\r\n\r\n return metrics\r\n", "path": "mlflow/server/prometheus_exporter.py"}]} | 2,043 | 407 |
gh_patches_debug_13249 | rasdani/github-patches | git_diff | streamlit__streamlit-7256 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pydeck_chart: Error (not a valid JSON) when data contains NaN
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
If the data used in pydeck contains NaN (even if it is not used), the application fails with

### Reproducible Code Example
```Python
import math, pandas, pydeck, streamlit
streamlit.set_page_config(layout="wide")
data = pandas.DataFrame({"lng": [-109.037673], "lat": [36.994672], "weight": [math.nan]})
layer = pydeck.Layer("ScatterplotLayer", data=data, get_position=["lng", "lat"], radius_min_pixels=4)
deck = pydeck.Deck(layers=[layer], map_style=pydeck.map_styles.CARTO_LIGHT, tooltip={"text": "weight: {weight}"})
deck.to_html("test.html")
streamlit.pydeck_chart(deck, use_container_width=True)
```
### Steps To Reproduce
Run the code as usual.
### Expected Behavior
No error, as in the file ```test.html``` generated
### Current Behavior
SyntaxError:
Unexpected token 'N', ...""weight": NaN "... is not valid JSON
### Is this a regression?
- [ ] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: Streamlit v1.15.1
- Python version: 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)]
- Operating System: Windows 10.0.19045.2251
- Browser: Chome, Opera, Edge, Firefox
- Virtual environment: poetry
### Additional Information
_No response_
### Are you willing to submit a PR?
- [ ] Yes, I am willing to submit a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_pydeck_chart.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, cast
16
17 import numpy as np
18 import pandas as pd
19 import pydeck as pdk
20
21 import streamlit as st
22
23 # Empty chart.
24
25 st.pydeck_chart()
26
27 # Basic chart.
28
29 np.random.seed(12345)
30
31 df = pd.DataFrame(
32 cast(Any, np.random.randn(1000, 2) / [50, 50]) + [37.76, -122.4],
33 columns=["lat", "lon"],
34 )
35
36 st.pydeck_chart(
37 pdk.Deck(
38 map_style="mapbox://styles/mapbox/light-v9",
39 initial_view_state=pdk.ViewState(
40 latitude=37.76,
41 longitude=-122.4,
42 zoom=11,
43 pitch=50,
44 ),
45 layers=[
46 pdk.Layer(
47 "HexagonLayer",
48 data=df,
49 get_position="[lon, lat]",
50 radius=200,
51 elevation_scale=4,
52 elevation_range=[0, 1000],
53 pickable=True,
54 extruded=True,
55 ),
56 pdk.Layer(
57 "ScatterplotLayer",
58 data=df,
59 get_position="[lon, lat]",
60 get_color="[200, 30, 0, 160]",
61 get_radius=200,
62 ),
63 ],
64 )
65 )
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_pydeck_chart.py b/e2e/scripts/st_pydeck_chart.py
--- a/e2e/scripts/st_pydeck_chart.py
+++ b/e2e/scripts/st_pydeck_chart.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import math
from typing import Any, cast
import numpy as np
@@ -63,3 +64,15 @@
],
)
)
+
+# Chart w/ invalid JSON - issue #5799.
+data = pd.DataFrame({"lng": [-109.037673], "lat": [36.994672], "weight": [math.nan]})
+layer = pdk.Layer(
+ "ScatterplotLayer", data=data, get_position=["lng", "lat"], radius_min_pixels=4
+)
+deck = pdk.Deck(
+ layers=[layer],
+ map_style=pdk.map_styles.CARTO_LIGHT,
+ tooltip={"text": "weight: {weight}"},
+)
+st.pydeck_chart(deck, use_container_width=True)
| {"golden_diff": "diff --git a/e2e/scripts/st_pydeck_chart.py b/e2e/scripts/st_pydeck_chart.py\n--- a/e2e/scripts/st_pydeck_chart.py\n+++ b/e2e/scripts/st_pydeck_chart.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import math\n from typing import Any, cast\n \n import numpy as np\n@@ -63,3 +64,15 @@\n ],\n )\n )\n+\n+# Chart w/ invalid JSON - issue #5799.\n+data = pd.DataFrame({\"lng\": [-109.037673], \"lat\": [36.994672], \"weight\": [math.nan]})\n+layer = pdk.Layer(\n+ \"ScatterplotLayer\", data=data, get_position=[\"lng\", \"lat\"], radius_min_pixels=4\n+)\n+deck = pdk.Deck(\n+ layers=[layer],\n+ map_style=pdk.map_styles.CARTO_LIGHT,\n+ tooltip={\"text\": \"weight: {weight}\"},\n+)\n+st.pydeck_chart(deck, use_container_width=True)\n", "issue": "pydeck_chart: Error (not a valid JSON) when data contains NaN\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nIf the data used in pydeck contains NaN (even if it is not used), the application fails with \r\n\r\n\r\n\r\n### Reproducible Code Example\r\n\r\n```Python\r\nimport math, pandas, pydeck, streamlit\r\n\r\nstreamlit.set_page_config(layout=\"wide\")\r\ndata = pandas.DataFrame({\"lng\": [-109.037673], \"lat\": [36.994672], \"weight\": [math.nan]})\r\nlayer = pydeck.Layer(\"ScatterplotLayer\", data=data, get_position=[\"lng\", \"lat\"], radius_min_pixels=4)\r\ndeck = pydeck.Deck(layers=[layer], map_style=pydeck.map_styles.CARTO_LIGHT, tooltip={\"text\": \"weight: {weight}\"})\r\ndeck.to_html(\"test.html\")\r\nstreamlit.pydeck_chart(deck, use_container_width=True)\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\nRun the code as usual.\r\n\r\n### Expected Behavior\r\n\r\nNo error, as in the file ```test.html``` generated\r\n\r\n### Current Behavior\r\n\r\nSyntaxError:\r\nUnexpected token 'N', ...\"\"weight\": NaN \"... is not valid JSON\r\n\r\n\r\n### Is this a regression?\r\n\r\n- [ ] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: Streamlit v1.15.1\r\n- Python version: 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)]\r\n- Operating System: Windows 10.0.19045.2251\r\n- Browser: Chome, Opera, Edge, Firefox\r\n- Virtual environment: poetry\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, cast\n\nimport numpy as np\nimport pandas as pd\nimport pydeck as pdk\n\nimport streamlit as st\n\n# Empty chart.\n\nst.pydeck_chart()\n\n# Basic chart.\n\nnp.random.seed(12345)\n\ndf = pd.DataFrame(\n cast(Any, np.random.randn(1000, 2) / [50, 50]) + [37.76, -122.4],\n columns=[\"lat\", \"lon\"],\n)\n\nst.pydeck_chart(\n pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state=pdk.ViewState(\n latitude=37.76,\n longitude=-122.4,\n zoom=11,\n pitch=50,\n ),\n layers=[\n pdk.Layer(\n \"HexagonLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n radius=200,\n elevation_scale=4,\n elevation_range=[0, 1000],\n pickable=True,\n extruded=True,\n ),\n pdk.Layer(\n \"ScatterplotLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n get_color=\"[200, 30, 0, 160]\",\n get_radius=200,\n ),\n ],\n )\n)\n", "path": "e2e/scripts/st_pydeck_chart.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom typing import Any, cast\n\nimport numpy as np\nimport pandas as pd\nimport pydeck as pdk\n\nimport streamlit as st\n\n# Empty chart.\n\nst.pydeck_chart()\n\n# Basic chart.\n\nnp.random.seed(12345)\n\ndf = pd.DataFrame(\n cast(Any, np.random.randn(1000, 2) / [50, 50]) + [37.76, -122.4],\n columns=[\"lat\", \"lon\"],\n)\n\nst.pydeck_chart(\n pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state=pdk.ViewState(\n latitude=37.76,\n longitude=-122.4,\n zoom=11,\n pitch=50,\n ),\n layers=[\n pdk.Layer(\n \"HexagonLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n radius=200,\n elevation_scale=4,\n elevation_range=[0, 1000],\n pickable=True,\n extruded=True,\n ),\n pdk.Layer(\n \"ScatterplotLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n get_color=\"[200, 30, 0, 160]\",\n get_radius=200,\n ),\n ],\n )\n)\n\n# Chart w/ invalid JSON - issue #5799.\ndata = pd.DataFrame({\"lng\": [-109.037673], \"lat\": [36.994672], \"weight\": [math.nan]})\nlayer = pdk.Layer(\n \"ScatterplotLayer\", data=data, get_position=[\"lng\", \"lat\"], radius_min_pixels=4\n)\ndeck = pdk.Deck(\n layers=[layer],\n map_style=pdk.map_styles.CARTO_LIGHT,\n tooltip={\"text\": \"weight: {weight}\"},\n)\nst.pydeck_chart(deck, use_container_width=True)\n", "path": "e2e/scripts/st_pydeck_chart.py"}]} | 1,386 | 257 |
gh_patches_debug_50331 | rasdani/github-patches | git_diff | pypi__warehouse-13706 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong key name used for PEP 658 metadata files in the JSON index
**Describe the bug**
[PEP 691](https://peps.python.org/pep-0691/#project-detail) states that the key name for metadata files in the JSON index should be `dist-info-metadata`:
> `dist-info-metadata`: An optional key that indicates that metadata for this file is available, via the same location as specified in [PEP 658](https://peps.python.org/pep-0658) (`{file_url}.metadata`).
However, warehouse is providing it under the `data-dist-info-metadata` key instead:
```
$ curl -H 'Accept: application/vnd.pypi.simple.v1+json' https://pypi.org/simple/fluffy-server/ | jq .files
[...]
{
"data-dist-info-metadata": {
"sha256": "4db99543165cbdeef42ccb6257545911ccd7865d65e304e3e056f383a25f309c"
},
"filename": "fluffy_server-1.39.2-py3-none-any.whl",
[...]
```
This is causing pip to not use the metadata files as it is looking for the `dist-info-metadata` key only:
https://github.com/pypa/pip/blob/f25f8fffbbd16fdb13a4f8977946afe9a3248453/src/pip/_internal/models/link.py#L265
**Additional context**
There are two bugs discovered recently in pip which may make this tricky to fix:
* https://github.com/pypa/pip/issues/12042
* https://github.com/pypa/pip/issues/12038
I believe if we simply fix the key name in pypi.org, it will break existing pip versions as it will cause users to encounter these bugs. It may be necessary to coordinate this fix with fixes to the above bugs in pip to avoid disruption?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/packaging/utils.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import hashlib
14 import os.path
15 import tempfile
16
17 import packaging_legacy.version
18
19 from pyramid_jinja2 import IJinja2Environment
20 from sqlalchemy.orm import joinedload
21
22 from warehouse.packaging.interfaces import ISimpleStorage
23 from warehouse.packaging.models import File, Project, Release
24
25 API_VERSION = "1.1"
26
27
28 def _simple_index(request, serial):
29 # Fetch the name and normalized name for all of our projects
30 projects = (
31 request.db.query(Project.name, Project.normalized_name, Project.last_serial)
32 .order_by(Project.normalized_name)
33 .all()
34 )
35
36 return {
37 "meta": {"api-version": API_VERSION, "_last-serial": serial},
38 "projects": [{"name": p.name, "_last-serial": p.last_serial} for p in projects],
39 }
40
41
42 def _simple_detail(project, request):
43 # Get all of the files for this project.
44 files = sorted(
45 request.db.query(File)
46 .options(joinedload(File.release))
47 .join(Release)
48 .filter(Release.project == project)
49 .all(),
50 key=lambda f: (packaging_legacy.version.parse(f.release.version), f.filename),
51 )
52 versions = sorted(
53 {f.release.version for f in files}, key=packaging_legacy.version.parse
54 )
55
56 return {
57 "meta": {"api-version": API_VERSION, "_last-serial": project.last_serial},
58 "name": project.normalized_name,
59 "versions": versions,
60 "files": [
61 {
62 "filename": file.filename,
63 "url": request.route_url("packaging.file", path=file.path),
64 "hashes": {
65 "sha256": file.sha256_digest,
66 },
67 "requires-python": file.release.requires_python,
68 "size": file.size,
69 "upload-time": file.upload_time.isoformat() + "Z",
70 "yanked": file.release.yanked_reason
71 if file.release.yanked and file.release.yanked_reason
72 else file.release.yanked,
73 "data-dist-info-metadata": {"sha256": file.metadata_file_sha256_digest}
74 if file.metadata_file_sha256_digest
75 else False,
76 }
77 for file in files
78 ],
79 }
80
81
82 def render_simple_detail(project, request, store=False):
83 context = _simple_detail(project, request)
84
85 env = request.registry.queryUtility(IJinja2Environment, name=".jinja2")
86 template = env.get_template("templates/api/simple/detail.html")
87 content = template.render(**context, request=request)
88
89 content_hasher = hashlib.blake2b(digest_size=256 // 8)
90 content_hasher.update(content.encode("utf-8"))
91 content_hash = content_hasher.hexdigest().lower()
92
93 simple_detail_path = (
94 f"{project.normalized_name}/{content_hash}.{project.normalized_name}.html"
95 )
96
97 if store:
98 storage = request.find_service(ISimpleStorage)
99 with tempfile.NamedTemporaryFile() as f:
100 f.write(content.encode("utf-8"))
101 f.flush()
102
103 storage.store(
104 simple_detail_path,
105 f.name,
106 meta={
107 "project": project.normalized_name,
108 "pypi-last-serial": project.last_serial,
109 "hash": content_hash,
110 },
111 )
112 storage.store(
113 os.path.join(project.normalized_name, "index.html"),
114 f.name,
115 meta={
116 "project": project.normalized_name,
117 "pypi-last-serial": project.last_serial,
118 "hash": content_hash,
119 },
120 )
121
122 return (content_hash, simple_detail_path)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/packaging/utils.py b/warehouse/packaging/utils.py
--- a/warehouse/packaging/utils.py
+++ b/warehouse/packaging/utils.py
@@ -73,6 +73,9 @@
"data-dist-info-metadata": {"sha256": file.metadata_file_sha256_digest}
if file.metadata_file_sha256_digest
else False,
+ "core-metadata": {"sha256": file.metadata_file_sha256_digest}
+ if file.metadata_file_sha256_digest
+ else False,
}
for file in files
],
| {"golden_diff": "diff --git a/warehouse/packaging/utils.py b/warehouse/packaging/utils.py\n--- a/warehouse/packaging/utils.py\n+++ b/warehouse/packaging/utils.py\n@@ -73,6 +73,9 @@\n \"data-dist-info-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n if file.metadata_file_sha256_digest\n else False,\n+ \"core-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n+ if file.metadata_file_sha256_digest\n+ else False,\n }\n for file in files\n ],\n", "issue": "Wrong key name used for PEP 658 metadata files in the JSON index\n**Describe the bug**\r\n\r\n[PEP 691](https://peps.python.org/pep-0691/#project-detail) states that the key name for metadata files in the JSON index should be `dist-info-metadata`:\r\n\r\n> `dist-info-metadata`: An optional key that indicates that metadata for this file is available, via the same location as specified in [PEP 658](https://peps.python.org/pep-0658) (`{file_url}.metadata`).\r\n\r\nHowever, warehouse is providing it under the `data-dist-info-metadata` key instead:\r\n\r\n```\r\n$ curl -H 'Accept: application/vnd.pypi.simple.v1+json' https://pypi.org/simple/fluffy-server/ | jq .files\r\n[...]\r\n {\r\n \"data-dist-info-metadata\": {\r\n \"sha256\": \"4db99543165cbdeef42ccb6257545911ccd7865d65e304e3e056f383a25f309c\"\r\n },\r\n \"filename\": \"fluffy_server-1.39.2-py3-none-any.whl\",\r\n [...]\r\n```\r\n\r\nThis is causing pip to not use the metadata files as it is looking for the `dist-info-metadata` key only:\r\nhttps://github.com/pypa/pip/blob/f25f8fffbbd16fdb13a4f8977946afe9a3248453/src/pip/_internal/models/link.py#L265\r\n\r\n\r\n**Additional context**\r\n\r\nThere are two bugs discovered recently in pip which may make this tricky to fix:\r\n\r\n* https://github.com/pypa/pip/issues/12042\r\n* https://github.com/pypa/pip/issues/12038\r\n\r\nI believe if we simply fix the key name in pypi.org, it will break existing pip versions as it will cause users to encounter these bugs. It may be necessary to coordinate this fix with fixes to the above bugs in pip to avoid disruption?\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\nimport os.path\nimport tempfile\n\nimport packaging_legacy.version\n\nfrom pyramid_jinja2 import IJinja2Environment\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.packaging.interfaces import ISimpleStorage\nfrom warehouse.packaging.models import File, Project, Release\n\nAPI_VERSION = \"1.1\"\n\n\ndef _simple_index(request, serial):\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name, Project.last_serial)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\n \"meta\": {\"api-version\": API_VERSION, \"_last-serial\": serial},\n \"projects\": [{\"name\": p.name, \"_last-serial\": p.last_serial} for p in projects],\n }\n\n\ndef _simple_detail(project, request):\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .join(Release)\n .filter(Release.project == project)\n .all(),\n key=lambda f: (packaging_legacy.version.parse(f.release.version), f.filename),\n )\n versions = sorted(\n {f.release.version for f in files}, key=packaging_legacy.version.parse\n )\n\n return {\n \"meta\": {\"api-version\": API_VERSION, \"_last-serial\": project.last_serial},\n \"name\": project.normalized_name,\n \"versions\": versions,\n \"files\": [\n {\n \"filename\": file.filename,\n \"url\": request.route_url(\"packaging.file\", path=file.path),\n \"hashes\": {\n \"sha256\": file.sha256_digest,\n },\n \"requires-python\": file.release.requires_python,\n \"size\": file.size,\n \"upload-time\": file.upload_time.isoformat() + \"Z\",\n \"yanked\": file.release.yanked_reason\n if file.release.yanked and file.release.yanked_reason\n else file.release.yanked,\n \"data-dist-info-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n if file.metadata_file_sha256_digest\n else False,\n }\n for file in files\n ],\n }\n\n\ndef render_simple_detail(project, request, store=False):\n context = _simple_detail(project, request)\n\n env = request.registry.queryUtility(IJinja2Environment, name=\".jinja2\")\n template = env.get_template(\"templates/api/simple/detail.html\")\n content = template.render(**context, request=request)\n\n content_hasher = hashlib.blake2b(digest_size=256 // 8)\n content_hasher.update(content.encode(\"utf-8\"))\n content_hash = content_hasher.hexdigest().lower()\n\n simple_detail_path = (\n f\"{project.normalized_name}/{content_hash}.{project.normalized_name}.html\"\n )\n\n if store:\n storage = request.find_service(ISimpleStorage)\n with tempfile.NamedTemporaryFile() as f:\n f.write(content.encode(\"utf-8\"))\n f.flush()\n\n storage.store(\n simple_detail_path,\n f.name,\n meta={\n \"project\": project.normalized_name,\n \"pypi-last-serial\": project.last_serial,\n \"hash\": content_hash,\n },\n )\n storage.store(\n os.path.join(project.normalized_name, \"index.html\"),\n f.name,\n meta={\n \"project\": project.normalized_name,\n \"pypi-last-serial\": project.last_serial,\n \"hash\": content_hash,\n },\n )\n\n return (content_hash, simple_detail_path)\n", "path": "warehouse/packaging/utils.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\nimport os.path\nimport tempfile\n\nimport packaging_legacy.version\n\nfrom pyramid_jinja2 import IJinja2Environment\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.packaging.interfaces import ISimpleStorage\nfrom warehouse.packaging.models import File, Project, Release\n\nAPI_VERSION = \"1.1\"\n\n\ndef _simple_index(request, serial):\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name, Project.last_serial)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\n \"meta\": {\"api-version\": API_VERSION, \"_last-serial\": serial},\n \"projects\": [{\"name\": p.name, \"_last-serial\": p.last_serial} for p in projects],\n }\n\n\ndef _simple_detail(project, request):\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .join(Release)\n .filter(Release.project == project)\n .all(),\n key=lambda f: (packaging_legacy.version.parse(f.release.version), f.filename),\n )\n versions = sorted(\n {f.release.version for f in files}, key=packaging_legacy.version.parse\n )\n\n return {\n \"meta\": {\"api-version\": API_VERSION, \"_last-serial\": project.last_serial},\n \"name\": project.normalized_name,\n \"versions\": versions,\n \"files\": [\n {\n \"filename\": file.filename,\n \"url\": request.route_url(\"packaging.file\", path=file.path),\n \"hashes\": {\n \"sha256\": file.sha256_digest,\n },\n \"requires-python\": file.release.requires_python,\n \"size\": file.size,\n \"upload-time\": file.upload_time.isoformat() + \"Z\",\n \"yanked\": file.release.yanked_reason\n if file.release.yanked and file.release.yanked_reason\n else file.release.yanked,\n \"data-dist-info-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n if file.metadata_file_sha256_digest\n else False,\n \"core-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n if file.metadata_file_sha256_digest\n else False,\n }\n for file in files\n ],\n }\n\n\ndef render_simple_detail(project, request, store=False):\n context = _simple_detail(project, request)\n\n env = request.registry.queryUtility(IJinja2Environment, name=\".jinja2\")\n template = env.get_template(\"templates/api/simple/detail.html\")\n content = template.render(**context, request=request)\n\n content_hasher = hashlib.blake2b(digest_size=256 // 8)\n content_hasher.update(content.encode(\"utf-8\"))\n content_hash = content_hasher.hexdigest().lower()\n\n simple_detail_path = (\n f\"{project.normalized_name}/{content_hash}.{project.normalized_name}.html\"\n )\n\n if store:\n storage = request.find_service(ISimpleStorage)\n with tempfile.NamedTemporaryFile() as f:\n f.write(content.encode(\"utf-8\"))\n f.flush()\n\n storage.store(\n simple_detail_path,\n f.name,\n meta={\n \"project\": project.normalized_name,\n \"pypi-last-serial\": project.last_serial,\n \"hash\": content_hash,\n },\n )\n storage.store(\n os.path.join(project.normalized_name, \"index.html\"),\n f.name,\n meta={\n \"project\": project.normalized_name,\n \"pypi-last-serial\": project.last_serial,\n \"hash\": content_hash,\n },\n )\n\n return (content_hash, simple_detail_path)\n", "path": "warehouse/packaging/utils.py"}]} | 1,884 | 137 |
gh_patches_debug_24568 | rasdani/github-patches | git_diff | mdn__kuma-6829 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
T - Fix post-sign-ins redirect so that user lands on correct page
**Summary**
You're not taken back to where you were when you sign in. At least via the modal and GitHub.

**Steps To Reproduce (STR)**
1. Go to some page other than the home page. Be not-signed in.
2. Click sign in.
3. Click GitHub and complete your GitHub auth stuff
**Actual behavior**
I ended up back on the home page `/en-US/`. :(
**Expected behavior**
To be taken to the page I was originally on.
**Additional context**
Pretty sure this is regression but don't know if it started when we switched to a modal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/core/context_processors.py`
Content:
```
1 from urllib.parse import urlparse
2
3 from constance import config
4 from django.conf import settings
5 from django.utils import translation
6
7 from .i18n import get_language_mapping
8
9
10 def global_settings(request):
11 """Adds settings to the context."""
12
13 def clean_safe_url(url):
14 if "://" not in url:
15 # E.g. 'elasticsearch:9200'
16 url = "http://" + url
17 parsed = urlparse(url)
18 if "@" in parsed.netloc:
19 parsed = parsed._replace(
20 netloc="username:secret@" + parsed.netloc.split("@")[-1]
21 )
22 return parsed.geturl()
23
24 # TODO: Ideally, GOOGLE_ANALYTICS_ACCOUNT is only set in settings (from
25 # an environment variable) but for safe transition, we rely on
26 # constance if it hasn't been put into settings yet.
27 # Once we know with confidence, that GOOGLE_ANALYTICS_ACCOUNT is set
28 # and a valid value in the environment (for production!) then we
29 # can delete these lines of code.
30 # See https://bugzilla.mozilla.org/show_bug.cgi?id=1570076
31 google_analytics_account = getattr(settings, "GOOGLE_ANALYTICS_ACCOUNT", None)
32 if google_analytics_account is None:
33 if config.GOOGLE_ANALYTICS_ACCOUNT != "0":
34 settings.GOOGLE_ANALYTICS_ACCOUNT = config.GOOGLE_ANALYTICS_ACCOUNT
35
36 return {
37 "settings": settings,
38 # Because the 'settings.ES_URLS' might contain the username:password
39 # it's never appropriate to display in templates. So clean them up.
40 # But return it as a lambda so it only executes if really needed.
41 "safe_es_urls": lambda: [clean_safe_url(x) for x in settings.ES_URLS],
42 }
43
44
45 def i18n(request):
46 return {
47 "LANGUAGES": get_language_mapping(),
48 "LANG": (
49 settings.LANGUAGE_URL_MAP.get(translation.get_language())
50 or translation.get_language()
51 ),
52 "DIR": "rtl" if translation.get_language_bidi() else "ltr",
53 }
54
55
56 def next_url(request):
57 if (
58 hasattr(request, "path")
59 and "login" not in request.path
60 and "register" not in request.path
61 ):
62 return {"next_url": request.get_full_path()}
63 return {}
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/core/context_processors.py b/kuma/core/context_processors.py
--- a/kuma/core/context_processors.py
+++ b/kuma/core/context_processors.py
@@ -4,6 +4,8 @@
from django.conf import settings
from django.utils import translation
+from kuma.core.urlresolvers import reverse
+
from .i18n import get_language_mapping
@@ -54,10 +56,30 @@
def next_url(request):
- if (
- hasattr(request, "path")
- and "login" not in request.path
- and "register" not in request.path
- ):
- return {"next_url": request.get_full_path()}
- return {}
+ """Return a function by the same name as the context processor.
+ That means, in the jinja templates, instead of doing
+
+ {% set url = next_url %}
+
+ you just have to do:
+
+ {% set url = next_url() %}
+
+ which means that the actual context processor function isn't executed
+ every single time any jinja template is rendered. Now, only if the
+ context processor is actually needed, it gets executed.
+
+ See https://www.peterbe.com/plog/closure-django-context-processors
+ """
+
+ def inner():
+ if hasattr(request, "path"):
+ if request.GET.get("next"):
+ if "://" not in request.GET["next"]:
+ return request.GET["next"]
+ elif reverse(settings.LOGIN_URL) != request.get_full_path():
+ # The only exception is the sign-in landing page which you get to
+ # if you can't use the auth modal.
+ return request.get_full_path()
+
+ return {"next_url": inner}
| {"golden_diff": "diff --git a/kuma/core/context_processors.py b/kuma/core/context_processors.py\n--- a/kuma/core/context_processors.py\n+++ b/kuma/core/context_processors.py\n@@ -4,6 +4,8 @@\n from django.conf import settings\n from django.utils import translation\n \n+from kuma.core.urlresolvers import reverse\n+\n from .i18n import get_language_mapping\n \n \n@@ -54,10 +56,30 @@\n \n \n def next_url(request):\n- if (\n- hasattr(request, \"path\")\n- and \"login\" not in request.path\n- and \"register\" not in request.path\n- ):\n- return {\"next_url\": request.get_full_path()}\n- return {}\n+ \"\"\"Return a function by the same name as the context processor.\n+ That means, in the jinja templates, instead of doing\n+\n+ {% set url = next_url %}\n+\n+ you just have to do:\n+\n+ {% set url = next_url() %}\n+\n+ which means that the actual context processor function isn't executed\n+ every single time any jinja template is rendered. Now, only if the\n+ context processor is actually needed, it gets executed.\n+\n+ See https://www.peterbe.com/plog/closure-django-context-processors\n+ \"\"\"\n+\n+ def inner():\n+ if hasattr(request, \"path\"):\n+ if request.GET.get(\"next\"):\n+ if \"://\" not in request.GET[\"next\"]:\n+ return request.GET[\"next\"]\n+ elif reverse(settings.LOGIN_URL) != request.get_full_path():\n+ # The only exception is the sign-in landing page which you get to\n+ # if you can't use the auth modal.\n+ return request.get_full_path()\n+\n+ return {\"next_url\": inner}\n", "issue": "T - Fix post-sign-ins redirect so that user lands on correct page\n**Summary**\r\nYou're not taken back to where you were when you sign in. At least via the modal and GitHub.\r\n\r\n\r\n\r\n\r\n**Steps To Reproduce (STR)**\r\n\r\n\r\n1. Go to some page other than the home page. Be not-signed in.\r\n2. Click sign in. \r\n3. Click GitHub and complete your GitHub auth stuff\r\n\r\n\r\n**Actual behavior**\r\nI ended up back on the home page `/en-US/`. :(\r\n\r\n\r\n**Expected behavior**\r\nTo be taken to the page I was originally on. \r\n\r\n\r\n**Additional context**\r\nPretty sure this is regression but don't know if it started when we switched to a modal. \r\n\n", "before_files": [{"content": "from urllib.parse import urlparse\n\nfrom constance import config\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom .i18n import get_language_mapping\n\n\ndef global_settings(request):\n \"\"\"Adds settings to the context.\"\"\"\n\n def clean_safe_url(url):\n if \"://\" not in url:\n # E.g. 'elasticsearch:9200'\n url = \"http://\" + url\n parsed = urlparse(url)\n if \"@\" in parsed.netloc:\n parsed = parsed._replace(\n netloc=\"username:secret@\" + parsed.netloc.split(\"@\")[-1]\n )\n return parsed.geturl()\n\n # TODO: Ideally, GOOGLE_ANALYTICS_ACCOUNT is only set in settings (from\n # an environment variable) but for safe transition, we rely on\n # constance if it hasn't been put into settings yet.\n # Once we know with confidence, that GOOGLE_ANALYTICS_ACCOUNT is set\n # and a valid value in the environment (for production!) then we\n # can delete these lines of code.\n # See https://bugzilla.mozilla.org/show_bug.cgi?id=1570076\n google_analytics_account = getattr(settings, \"GOOGLE_ANALYTICS_ACCOUNT\", None)\n if google_analytics_account is None:\n if config.GOOGLE_ANALYTICS_ACCOUNT != \"0\":\n settings.GOOGLE_ANALYTICS_ACCOUNT = config.GOOGLE_ANALYTICS_ACCOUNT\n\n return {\n \"settings\": settings,\n # Because the 'settings.ES_URLS' might contain the username:password\n # it's never appropriate to display in templates. So clean them up.\n # But return it as a lambda so it only executes if really needed.\n \"safe_es_urls\": lambda: [clean_safe_url(x) for x in settings.ES_URLS],\n }\n\n\ndef i18n(request):\n return {\n \"LANGUAGES\": get_language_mapping(),\n \"LANG\": (\n settings.LANGUAGE_URL_MAP.get(translation.get_language())\n or translation.get_language()\n ),\n \"DIR\": \"rtl\" if translation.get_language_bidi() else \"ltr\",\n }\n\n\ndef next_url(request):\n if (\n hasattr(request, \"path\")\n and \"login\" not in request.path\n and \"register\" not in request.path\n ):\n return {\"next_url\": request.get_full_path()}\n return {}\n", "path": "kuma/core/context_processors.py"}], "after_files": [{"content": "from urllib.parse import urlparse\n\nfrom constance import config\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom kuma.core.urlresolvers import reverse\n\nfrom .i18n import get_language_mapping\n\n\ndef global_settings(request):\n \"\"\"Adds settings to the context.\"\"\"\n\n def clean_safe_url(url):\n if \"://\" not in url:\n # E.g. 'elasticsearch:9200'\n url = \"http://\" + url\n parsed = urlparse(url)\n if \"@\" in parsed.netloc:\n parsed = parsed._replace(\n netloc=\"username:secret@\" + parsed.netloc.split(\"@\")[-1]\n )\n return parsed.geturl()\n\n # TODO: Ideally, GOOGLE_ANALYTICS_ACCOUNT is only set in settings (from\n # an environment variable) but for safe transition, we rely on\n # constance if it hasn't been put into settings yet.\n # Once we know with confidence, that GOOGLE_ANALYTICS_ACCOUNT is set\n # and a valid value in the environment (for production!) then we\n # can delete these lines of code.\n # See https://bugzilla.mozilla.org/show_bug.cgi?id=1570076\n google_analytics_account = getattr(settings, \"GOOGLE_ANALYTICS_ACCOUNT\", None)\n if google_analytics_account is None:\n if config.GOOGLE_ANALYTICS_ACCOUNT != \"0\":\n settings.GOOGLE_ANALYTICS_ACCOUNT = config.GOOGLE_ANALYTICS_ACCOUNT\n\n return {\n \"settings\": settings,\n # Because the 'settings.ES_URLS' might contain the username:password\n # it's never appropriate to display in templates. So clean them up.\n # But return it as a lambda so it only executes if really needed.\n \"safe_es_urls\": lambda: [clean_safe_url(x) for x in settings.ES_URLS],\n }\n\n\ndef i18n(request):\n return {\n \"LANGUAGES\": get_language_mapping(),\n \"LANG\": (\n settings.LANGUAGE_URL_MAP.get(translation.get_language())\n or translation.get_language()\n ),\n \"DIR\": \"rtl\" if translation.get_language_bidi() else \"ltr\",\n }\n\n\ndef next_url(request):\n \"\"\"Return a function by the same name as the context processor.\n That means, in the jinja templates, instead of doing\n\n {% set url = next_url %}\n\n you just have to do:\n\n {% set url = next_url() %}\n\n which means that the actual context processor function isn't executed\n every single time any jinja template is rendered. Now, only if the\n context processor is actually needed, it gets executed.\n\n See https://www.peterbe.com/plog/closure-django-context-processors\n \"\"\"\n\n def inner():\n if hasattr(request, \"path\"):\n if request.GET.get(\"next\"):\n if \"://\" not in request.GET[\"next\"]:\n return request.GET[\"next\"]\n elif reverse(settings.LOGIN_URL) != request.get_full_path():\n # The only exception is the sign-in landing page which you get to\n # if you can't use the auth modal.\n return request.get_full_path()\n\n return {\"next_url\": inner}\n", "path": "kuma/core/context_processors.py"}]} | 1,121 | 389 |
gh_patches_debug_9354 | rasdani/github-patches | git_diff | huggingface__text-generation-inference-579 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tied weight optimization for checkpoints doesn't work with text-generation-inference.
### System Info
Ubuntu 20.04
4 A10 NVIDIA GPU's
I think checkpoints saved after this feature was merged don't work with text-generation-inference.
https://github.com/huggingface/transformers/issues/23868
With falcon models getting "`lm_head` not found"
I'll add more details once I find minimal steps to reproduce.
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Save tiiuae/falcon-40b checkpoint using transformers==4.30.2
launch text-generation-inference server
(using transformers==4.27.4 works without issue)
### Expected behavior
Expect the text-generation-inference weight loader to be able to find the `lm_head` weight in the checkpoint. Note this may be a safetensor issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/text_generation_server/models/flash_rw.py`
Content:
```
1 import torch
2 import torch.distributed
3
4 from opentelemetry import trace
5 from transformers import AutoTokenizer
6 from typing import Optional
7
8 from text_generation_server.models import FlashCausalLM
9 from text_generation_server.models.custom_modeling.flash_rw_modeling import (
10 RWConfig,
11 FlashRWForCausalLM,
12 )
13 from text_generation_server.utils import (
14 initialize_torch_distributed,
15 weight_files,
16 Weights,
17 )
18
19 tracer = trace.get_tracer(__name__)
20
21
22 class FlashRWSharded(FlashCausalLM):
23 def __init__(
24 self,
25 model_id: str,
26 revision: Optional[str] = None,
27 quantize: Optional[str] = None,
28 dtype: Optional[torch.dtype] = None,
29 trust_remote_code: bool = False,
30 ):
31 self.process_group, rank, world_size = initialize_torch_distributed()
32 if torch.cuda.is_available():
33 device = torch.device(f"cuda:{rank}")
34 dtype = torch.float16 if dtype is None else dtype
35 else:
36 raise NotImplementedError("FlashRW is only available on GPU")
37
38 tokenizer = AutoTokenizer.from_pretrained(
39 model_id,
40 revision=revision,
41 padding_side="left",
42 truncation_side="left",
43 trust_remote_code=trust_remote_code,
44 )
45
46 config = RWConfig.from_pretrained(
47 model_id, revision=revision, trust_remote_code=trust_remote_code
48 )
49
50 torch.distributed.barrier(group=self.process_group)
51 filenames = weight_files(model_id, revision=revision, extension=".safetensors")
52 weights = Weights(filenames, device, dtype, process_group=self.process_group)
53
54 config.quantize = quantize
55
56 model = FlashRWForCausalLM(config, weights)
57
58 torch.distributed.barrier(group=self.process_group)
59 super(FlashRWSharded, self).__init__(
60 model=model.to(device),
61 tokenizer=tokenizer,
62 num_layers=len(model.transformer.h),
63 num_kv_heads=model.transformer.cache_size,
64 head_size=model.transformer.head_size,
65 dtype=dtype,
66 device=device,
67 rank=rank,
68 world_size=world_size,
69 )
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py
--- a/server/text_generation_server/models/flash_rw.py
+++ b/server/text_generation_server/models/flash_rw.py
@@ -49,7 +49,13 @@
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
- weights = Weights(filenames, device, dtype, process_group=self.process_group)
+ weights = Weights(
+ filenames,
+ device,
+ dtype,
+ process_group=self.process_group,
+ aliases={"transformer.word_embeddings.weight": ["lm_head.weight"]},
+ )
config.quantize = quantize
| {"golden_diff": "diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py\n--- a/server/text_generation_server/models/flash_rw.py\n+++ b/server/text_generation_server/models/flash_rw.py\n@@ -49,7 +49,13 @@\n \n torch.distributed.barrier(group=self.process_group)\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n- weights = Weights(filenames, device, dtype, process_group=self.process_group)\n+ weights = Weights(\n+ filenames,\n+ device,\n+ dtype,\n+ process_group=self.process_group,\n+ aliases={\"transformer.word_embeddings.weight\": [\"lm_head.weight\"]},\n+ )\n \n config.quantize = quantize\n", "issue": "Tied weight optimization for checkpoints doesn't work with text-generation-inference.\n### System Info\r\nUbuntu 20.04\r\n4 A10 NVIDIA GPU's\r\n\r\nI think checkpoints saved after this feature was merged don't work with text-generation-inference.\r\nhttps://github.com/huggingface/transformers/issues/23868\r\n\r\nWith falcon models getting \"`lm_head` not found\"\r\nI'll add more details once I find minimal steps to reproduce.\r\n\r\n### Information\r\n\r\n- [X] Docker\r\n- [ ] The CLI directly\r\n\r\n### Tasks\r\n\r\n- [X] An officially supported command\r\n- [ ] My own modifications\r\n\r\n### Reproduction\r\n\r\nSave tiiuae/falcon-40b checkpoint using transformers==4.30.2\r\nlaunch text-generation-inference server\r\n\r\n(using transformers==4.27.4 works without issue)\r\n\r\n### Expected behavior\r\n\r\nExpect the text-generation-inference weight loader to be able to find the `lm_head` weight in the checkpoint. Note this may be a safetensor issue.\n", "before_files": [{"content": "import torch\nimport torch.distributed\n\nfrom opentelemetry import trace\nfrom transformers import AutoTokenizer\nfrom typing import Optional\n\nfrom text_generation_server.models import FlashCausalLM\nfrom text_generation_server.models.custom_modeling.flash_rw_modeling import (\n RWConfig,\n FlashRWForCausalLM,\n)\nfrom text_generation_server.utils import (\n initialize_torch_distributed,\n weight_files,\n Weights,\n)\n\ntracer = trace.get_tracer(__name__)\n\n\nclass FlashRWSharded(FlashCausalLM):\n def __init__(\n self,\n model_id: str,\n revision: Optional[str] = None,\n quantize: Optional[str] = None,\n dtype: Optional[torch.dtype] = None,\n trust_remote_code: bool = False,\n ):\n self.process_group, rank, world_size = initialize_torch_distributed()\n if torch.cuda.is_available():\n device = torch.device(f\"cuda:{rank}\")\n dtype = torch.float16 if dtype is None else dtype\n else:\n raise NotImplementedError(\"FlashRW is only available on GPU\")\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_id,\n revision=revision,\n padding_side=\"left\",\n truncation_side=\"left\",\n trust_remote_code=trust_remote_code,\n )\n\n config = RWConfig.from_pretrained(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n\n torch.distributed.barrier(group=self.process_group)\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n weights = Weights(filenames, device, dtype, process_group=self.process_group)\n\n config.quantize = quantize\n\n model = FlashRWForCausalLM(config, weights)\n\n torch.distributed.barrier(group=self.process_group)\n super(FlashRWSharded, self).__init__(\n model=model.to(device),\n tokenizer=tokenizer,\n num_layers=len(model.transformer.h),\n num_kv_heads=model.transformer.cache_size,\n head_size=model.transformer.head_size,\n dtype=dtype,\n device=device,\n rank=rank,\n world_size=world_size,\n )\n", "path": "server/text_generation_server/models/flash_rw.py"}], "after_files": [{"content": "import torch\nimport torch.distributed\n\nfrom opentelemetry import trace\nfrom transformers import AutoTokenizer\nfrom typing import Optional\n\nfrom text_generation_server.models import FlashCausalLM\nfrom text_generation_server.models.custom_modeling.flash_rw_modeling import (\n RWConfig,\n FlashRWForCausalLM,\n)\nfrom text_generation_server.utils import (\n initialize_torch_distributed,\n weight_files,\n Weights,\n)\n\ntracer = trace.get_tracer(__name__)\n\n\nclass FlashRWSharded(FlashCausalLM):\n def __init__(\n self,\n model_id: str,\n revision: Optional[str] = None,\n quantize: Optional[str] = None,\n dtype: Optional[torch.dtype] = None,\n trust_remote_code: bool = False,\n ):\n self.process_group, rank, world_size = initialize_torch_distributed()\n if torch.cuda.is_available():\n device = torch.device(f\"cuda:{rank}\")\n dtype = torch.float16 if dtype is None else dtype\n else:\n raise NotImplementedError(\"FlashRW is only available on GPU\")\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_id,\n revision=revision,\n padding_side=\"left\",\n truncation_side=\"left\",\n trust_remote_code=trust_remote_code,\n )\n\n config = RWConfig.from_pretrained(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n\n torch.distributed.barrier(group=self.process_group)\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n weights = Weights(\n filenames,\n device,\n dtype,\n process_group=self.process_group,\n aliases={\"transformer.word_embeddings.weight\": [\"lm_head.weight\"]},\n )\n\n config.quantize = quantize\n\n model = FlashRWForCausalLM(config, weights)\n\n torch.distributed.barrier(group=self.process_group)\n super(FlashRWSharded, self).__init__(\n model=model.to(device),\n tokenizer=tokenizer,\n num_layers=len(model.transformer.h),\n num_kv_heads=model.transformer.cache_size,\n head_size=model.transformer.head_size,\n dtype=dtype,\n device=device,\n rank=rank,\n world_size=world_size,\n )\n", "path": "server/text_generation_server/models/flash_rw.py"}]} | 1,066 | 166 |
gh_patches_debug_38598 | rasdani/github-patches | git_diff | apache__airflow-32382 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add DagProcessor status to health endpoint.
### Description
Add DagProcessor status including latest heartbeat to health endpoint similar to Triggerer status added recently. Related PRs.
https://github.com/apache/airflow/pull/31529
https://github.com/apache/airflow/pull/27755
### Use case/motivation
It helps in dag processor monitoring
### Related issues
_No response_
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/api_connexion/schemas/health_schema.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from __future__ import annotations
18
19 from marshmallow import Schema, fields
20
21
22 class BaseInfoSchema(Schema):
23 """Base status field for metadatabase and scheduler."""
24
25 status = fields.String(dump_only=True)
26
27
28 class MetaDatabaseInfoSchema(BaseInfoSchema):
29 """Schema for Metadatabase info."""
30
31
32 class SchedulerInfoSchema(BaseInfoSchema):
33 """Schema for Scheduler info."""
34
35 latest_scheduler_heartbeat = fields.String(dump_only=True)
36
37
38 class TriggererInfoSchema(BaseInfoSchema):
39 """Schema for Triggerer info."""
40
41 latest_triggerer_heartbeat = fields.String(dump_only=True)
42
43
44 class HealthInfoSchema(Schema):
45 """Schema for the Health endpoint."""
46
47 metadatabase = fields.Nested(MetaDatabaseInfoSchema)
48 scheduler = fields.Nested(SchedulerInfoSchema)
49 triggerer = fields.Nested(TriggererInfoSchema)
50
51
52 health_schema = HealthInfoSchema()
53
```
Path: `airflow/api/common/airflow_health.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from __future__ import annotations
18
19 from typing import Any
20
21 from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
22 from airflow.jobs.triggerer_job_runner import TriggererJobRunner
23
24 HEALTHY = "healthy"
25 UNHEALTHY = "unhealthy"
26
27
28 def get_airflow_health() -> dict[str, Any]:
29 """Get the health for Airflow metadatabase, scheduler and triggerer."""
30 metadatabase_status = HEALTHY
31 latest_scheduler_heartbeat = None
32 latest_triggerer_heartbeat = None
33 scheduler_status = UNHEALTHY
34 triggerer_status: str | None = UNHEALTHY
35
36 try:
37 latest_scheduler_job = SchedulerJobRunner.most_recent_job()
38
39 if latest_scheduler_job:
40 latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat()
41 if latest_scheduler_job.is_alive():
42 scheduler_status = HEALTHY
43 except Exception:
44 metadatabase_status = UNHEALTHY
45
46 try:
47 latest_triggerer_job = TriggererJobRunner.most_recent_job()
48
49 if latest_triggerer_job:
50 latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat()
51 if latest_triggerer_job.is_alive():
52 triggerer_status = HEALTHY
53 else:
54 triggerer_status = None
55 except Exception:
56 metadatabase_status = UNHEALTHY
57
58 airflow_health_status = {
59 "metadatabase": {"status": metadatabase_status},
60 "scheduler": {
61 "status": scheduler_status,
62 "latest_scheduler_heartbeat": latest_scheduler_heartbeat,
63 },
64 "triggerer": {
65 "status": triggerer_status,
66 "latest_triggerer_heartbeat": latest_triggerer_heartbeat,
67 },
68 }
69
70 return airflow_health_status
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/api/common/airflow_health.py b/airflow/api/common/airflow_health.py
--- a/airflow/api/common/airflow_health.py
+++ b/airflow/api/common/airflow_health.py
@@ -18,6 +18,7 @@
from typing import Any
+from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner
from airflow.jobs.scheduler_job_runner import SchedulerJobRunner
from airflow.jobs.triggerer_job_runner import TriggererJobRunner
@@ -30,8 +31,10 @@
metadatabase_status = HEALTHY
latest_scheduler_heartbeat = None
latest_triggerer_heartbeat = None
+ latest_dag_processor_heartbeat = None
scheduler_status = UNHEALTHY
triggerer_status: str | None = UNHEALTHY
+ dag_processor_status: str | None = UNHEALTHY
try:
latest_scheduler_job = SchedulerJobRunner.most_recent_job()
@@ -55,6 +58,18 @@
except Exception:
metadatabase_status = UNHEALTHY
+ try:
+ latest_dag_processor_job = DagProcessorJobRunner.most_recent_job()
+
+ if latest_dag_processor_job:
+ latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat()
+ if latest_dag_processor_job.is_alive():
+ dag_processor_status = HEALTHY
+ else:
+ dag_processor_status = None
+ except Exception:
+ metadatabase_status = UNHEALTHY
+
airflow_health_status = {
"metadatabase": {"status": metadatabase_status},
"scheduler": {
@@ -65,6 +80,10 @@
"status": triggerer_status,
"latest_triggerer_heartbeat": latest_triggerer_heartbeat,
},
+ "dag_processor": {
+ "status": dag_processor_status,
+ "latest_dag_processor_heartbeat": latest_dag_processor_heartbeat,
+ },
}
return airflow_health_status
diff --git a/airflow/api_connexion/schemas/health_schema.py b/airflow/api_connexion/schemas/health_schema.py
--- a/airflow/api_connexion/schemas/health_schema.py
+++ b/airflow/api_connexion/schemas/health_schema.py
@@ -41,12 +41,19 @@
latest_triggerer_heartbeat = fields.String(dump_only=True)
+class DagProcessorInfoSchema(BaseInfoSchema):
+ """Schema for DagProcessor info."""
+
+ latest_dag_processor_heartbeat = fields.String(dump_only=True)
+
+
class HealthInfoSchema(Schema):
"""Schema for the Health endpoint."""
metadatabase = fields.Nested(MetaDatabaseInfoSchema)
scheduler = fields.Nested(SchedulerInfoSchema)
triggerer = fields.Nested(TriggererInfoSchema)
+ dag_processor = fields.Nested(DagProcessorInfoSchema)
health_schema = HealthInfoSchema()
| {"golden_diff": "diff --git a/airflow/api/common/airflow_health.py b/airflow/api/common/airflow_health.py\n--- a/airflow/api/common/airflow_health.py\n+++ b/airflow/api/common/airflow_health.py\n@@ -18,6 +18,7 @@\n \n from typing import Any\n \n+from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner\n from airflow.jobs.scheduler_job_runner import SchedulerJobRunner\n from airflow.jobs.triggerer_job_runner import TriggererJobRunner\n \n@@ -30,8 +31,10 @@\n metadatabase_status = HEALTHY\n latest_scheduler_heartbeat = None\n latest_triggerer_heartbeat = None\n+ latest_dag_processor_heartbeat = None\n scheduler_status = UNHEALTHY\n triggerer_status: str | None = UNHEALTHY\n+ dag_processor_status: str | None = UNHEALTHY\n \n try:\n latest_scheduler_job = SchedulerJobRunner.most_recent_job()\n@@ -55,6 +58,18 @@\n except Exception:\n metadatabase_status = UNHEALTHY\n \n+ try:\n+ latest_dag_processor_job = DagProcessorJobRunner.most_recent_job()\n+\n+ if latest_dag_processor_job:\n+ latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat()\n+ if latest_dag_processor_job.is_alive():\n+ dag_processor_status = HEALTHY\n+ else:\n+ dag_processor_status = None\n+ except Exception:\n+ metadatabase_status = UNHEALTHY\n+\n airflow_health_status = {\n \"metadatabase\": {\"status\": metadatabase_status},\n \"scheduler\": {\n@@ -65,6 +80,10 @@\n \"status\": triggerer_status,\n \"latest_triggerer_heartbeat\": latest_triggerer_heartbeat,\n },\n+ \"dag_processor\": {\n+ \"status\": dag_processor_status,\n+ \"latest_dag_processor_heartbeat\": latest_dag_processor_heartbeat,\n+ },\n }\n \n return airflow_health_status\ndiff --git a/airflow/api_connexion/schemas/health_schema.py b/airflow/api_connexion/schemas/health_schema.py\n--- a/airflow/api_connexion/schemas/health_schema.py\n+++ b/airflow/api_connexion/schemas/health_schema.py\n@@ -41,12 +41,19 @@\n latest_triggerer_heartbeat = fields.String(dump_only=True)\n \n \n+class DagProcessorInfoSchema(BaseInfoSchema):\n+ \"\"\"Schema for DagProcessor info.\"\"\"\n+\n+ latest_dag_processor_heartbeat = fields.String(dump_only=True)\n+\n+\n class HealthInfoSchema(Schema):\n \"\"\"Schema for the Health endpoint.\"\"\"\n \n metadatabase = fields.Nested(MetaDatabaseInfoSchema)\n scheduler = fields.Nested(SchedulerInfoSchema)\n triggerer = fields.Nested(TriggererInfoSchema)\n+ dag_processor = fields.Nested(DagProcessorInfoSchema)\n \n \n health_schema = HealthInfoSchema()\n", "issue": "Add DagProcessor status to health endpoint.\n### Description\n\nAdd DagProcessor status including latest heartbeat to health endpoint similar to Triggerer status added recently. Related PRs.\r\n\r\nhttps://github.com/apache/airflow/pull/31529\r\nhttps://github.com/apache/airflow/pull/27755\n\n### Use case/motivation\n\nIt helps in dag processor monitoring \n\n### Related issues\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom marshmallow import Schema, fields\n\n\nclass BaseInfoSchema(Schema):\n \"\"\"Base status field for metadatabase and scheduler.\"\"\"\n\n status = fields.String(dump_only=True)\n\n\nclass MetaDatabaseInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Metadatabase info.\"\"\"\n\n\nclass SchedulerInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Scheduler info.\"\"\"\n\n latest_scheduler_heartbeat = fields.String(dump_only=True)\n\n\nclass TriggererInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Triggerer info.\"\"\"\n\n latest_triggerer_heartbeat = fields.String(dump_only=True)\n\n\nclass HealthInfoSchema(Schema):\n \"\"\"Schema for the Health endpoint.\"\"\"\n\n metadatabase = fields.Nested(MetaDatabaseInfoSchema)\n scheduler = fields.Nested(SchedulerInfoSchema)\n triggerer = fields.Nested(TriggererInfoSchema)\n\n\nhealth_schema = HealthInfoSchema()\n", "path": "airflow/api_connexion/schemas/health_schema.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom airflow.jobs.scheduler_job_runner import SchedulerJobRunner\nfrom airflow.jobs.triggerer_job_runner import TriggererJobRunner\n\nHEALTHY = \"healthy\"\nUNHEALTHY = \"unhealthy\"\n\n\ndef get_airflow_health() -> dict[str, Any]:\n \"\"\"Get the health for Airflow metadatabase, scheduler and triggerer.\"\"\"\n metadatabase_status = HEALTHY\n latest_scheduler_heartbeat = None\n latest_triggerer_heartbeat = None\n scheduler_status = UNHEALTHY\n triggerer_status: str | None = UNHEALTHY\n\n try:\n latest_scheduler_job = SchedulerJobRunner.most_recent_job()\n\n if latest_scheduler_job:\n latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat()\n if latest_scheduler_job.is_alive():\n scheduler_status = HEALTHY\n except Exception:\n metadatabase_status = UNHEALTHY\n\n try:\n latest_triggerer_job = TriggererJobRunner.most_recent_job()\n\n if latest_triggerer_job:\n latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat()\n if latest_triggerer_job.is_alive():\n triggerer_status = HEALTHY\n else:\n triggerer_status = None\n except Exception:\n metadatabase_status = UNHEALTHY\n\n airflow_health_status = {\n \"metadatabase\": {\"status\": metadatabase_status},\n \"scheduler\": {\n \"status\": scheduler_status,\n \"latest_scheduler_heartbeat\": latest_scheduler_heartbeat,\n },\n \"triggerer\": {\n \"status\": triggerer_status,\n \"latest_triggerer_heartbeat\": latest_triggerer_heartbeat,\n },\n }\n\n return airflow_health_status\n", "path": "airflow/api/common/airflow_health.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom marshmallow import Schema, fields\n\n\nclass BaseInfoSchema(Schema):\n \"\"\"Base status field for metadatabase and scheduler.\"\"\"\n\n status = fields.String(dump_only=True)\n\n\nclass MetaDatabaseInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Metadatabase info.\"\"\"\n\n\nclass SchedulerInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Scheduler info.\"\"\"\n\n latest_scheduler_heartbeat = fields.String(dump_only=True)\n\n\nclass TriggererInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Triggerer info.\"\"\"\n\n latest_triggerer_heartbeat = fields.String(dump_only=True)\n\n\nclass DagProcessorInfoSchema(BaseInfoSchema):\n \"\"\"Schema for DagProcessor info.\"\"\"\n\n latest_dag_processor_heartbeat = fields.String(dump_only=True)\n\n\nclass HealthInfoSchema(Schema):\n \"\"\"Schema for the Health endpoint.\"\"\"\n\n metadatabase = fields.Nested(MetaDatabaseInfoSchema)\n scheduler = fields.Nested(SchedulerInfoSchema)\n triggerer = fields.Nested(TriggererInfoSchema)\n dag_processor = fields.Nested(DagProcessorInfoSchema)\n\n\nhealth_schema = HealthInfoSchema()\n", "path": "airflow/api_connexion/schemas/health_schema.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner\nfrom airflow.jobs.scheduler_job_runner import SchedulerJobRunner\nfrom airflow.jobs.triggerer_job_runner import TriggererJobRunner\n\nHEALTHY = \"healthy\"\nUNHEALTHY = \"unhealthy\"\n\n\ndef get_airflow_health() -> dict[str, Any]:\n \"\"\"Get the health for Airflow metadatabase, scheduler and triggerer.\"\"\"\n metadatabase_status = HEALTHY\n latest_scheduler_heartbeat = None\n latest_triggerer_heartbeat = None\n latest_dag_processor_heartbeat = None\n scheduler_status = UNHEALTHY\n triggerer_status: str | None = UNHEALTHY\n dag_processor_status: str | None = UNHEALTHY\n\n try:\n latest_scheduler_job = SchedulerJobRunner.most_recent_job()\n\n if latest_scheduler_job:\n latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat()\n if latest_scheduler_job.is_alive():\n scheduler_status = HEALTHY\n except Exception:\n metadatabase_status = UNHEALTHY\n\n try:\n latest_triggerer_job = TriggererJobRunner.most_recent_job()\n\n if latest_triggerer_job:\n latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat()\n if latest_triggerer_job.is_alive():\n triggerer_status = HEALTHY\n else:\n triggerer_status = None\n except Exception:\n metadatabase_status = UNHEALTHY\n\n try:\n latest_dag_processor_job = DagProcessorJobRunner.most_recent_job()\n\n if latest_dag_processor_job:\n latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat()\n if latest_dag_processor_job.is_alive():\n dag_processor_status = HEALTHY\n else:\n dag_processor_status = None\n except Exception:\n metadatabase_status = UNHEALTHY\n\n airflow_health_status = {\n \"metadatabase\": {\"status\": metadatabase_status},\n \"scheduler\": {\n \"status\": scheduler_status,\n \"latest_scheduler_heartbeat\": latest_scheduler_heartbeat,\n },\n \"triggerer\": {\n \"status\": triggerer_status,\n \"latest_triggerer_heartbeat\": latest_triggerer_heartbeat,\n },\n \"dag_processor\": {\n \"status\": dag_processor_status,\n \"latest_dag_processor_heartbeat\": latest_dag_processor_heartbeat,\n },\n }\n\n return airflow_health_status\n", "path": "airflow/api/common/airflow_health.py"}]} | 1,584 | 652 |
gh_patches_debug_31339 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1853 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better exception handling for unmet dependencies
Some of our users update Pwndbg only by `git pull`ing the newest version and not re-running `setup.sh`. If that happens and when we updated our dependencies recently, the user may end up with broken Pwndbg such as (likely) happened in https://github.com/pwndbg/pwndbg/issues/1790.
We should do the two following things:
1) When we detect/handle an exception in the [`pwndbg/exception.py:handle` function]( https://github.com/pwndbg/pwndbg/blob/de4acb2f40da3b0c97353a8b680ffa6440346f7b/pwndbg/exception.py#L61-L96), we should check the installed dependencies versions against the required ones and if they do not match, we should inform the user to update them.
2) We may want to display installed dependencies versions within the `bugreport` command output?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/exception.py`
Content:
```
1 from __future__ import annotations
2
3 import functools
4 import sys
5 import traceback
6
7 import gdb
8
9 import pwndbg.lib.cache
10 import pwndbg.lib.stdio
11 from pwndbg.color import message
12 from pwndbg.gdblib import config
13
14 with pwndbg.lib.stdio.stdio:
15 try:
16 import ipdb as pdb
17 except ImportError:
18 import pdb
19 try:
20 from rich.console import Console
21
22 _rich_console = Console()
23 except ImportError:
24 _rich_console = None
25
26 verbose = config.add_param(
27 "exception-verbose",
28 False,
29 "whether to print a full stacktrace for exceptions raised in Pwndbg commands",
30 )
31 debug = config.add_param(
32 "exception-debugger", False, "whether to debug exceptions raised in Pwndbg commands"
33 )
34
35
36 @pwndbg.lib.cache.cache_until("forever")
37 def inform_report_issue(exception_msg) -> None:
38 """
39 Informs user that he can report an issue.
40 The use of caching makes it reporting only once for a given exception message.
41 """
42 print(
43 message.notice(
44 "If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\n"
45 "(Please don't forget to search if it hasn't been reported before)\n"
46 "To generate the report and open a browser, you may run "
47 )
48 + message.hint("`bugreport --run-browser`")
49 + message.notice("\nPS: Pull requests are welcome")
50 )
51
52
53 def inform_verbose_and_debug() -> None:
54 print(
55 message.notice("For more info invoke `")
56 + message.hint("set exception-verbose on")
57 + message.notice("` and rerun the command\nor debug it by yourself with `")
58 + message.hint("set exception-debugger on")
59 + message.notice("`")
60 )
61
62
63 def handle(name="Error"):
64 """Displays an exception to the user, optionally displaying a full traceback
65 and spawning an interactive post-moretem debugger.
66
67 Notes:
68 - ``set exception-verbose on`` enables stack traces.
69 - ``set exception-debugger on`` enables the post-mortem debugger.
70 """
71
72 # This is for unit tests so they fail on exceptions instead of displaying them.
73 if getattr(sys, "_pwndbg_unittest_run", False) is True:
74 E, V, T = sys.exc_info()
75 e = E(V)
76 e.__traceback__ = T
77 raise e
78
79 # Display the error
80 if debug or verbose:
81 exception_msg = traceback.format_exc()
82 if _rich_console:
83 _rich_console.print_exception()
84 else:
85 print(exception_msg)
86 inform_report_issue(exception_msg)
87
88 else:
89 exc_type, exc_value, exc_traceback = sys.exc_info()
90
91 print(message.error(f"Exception occurred: {name}: {exc_value} ({exc_type})"))
92
93 inform_verbose_and_debug()
94
95 # Break into the interactive debugger
96 if debug:
97 with pwndbg.lib.stdio.stdio:
98 pdb.post_mortem()
99
100
101 @functools.wraps(pdb.set_trace)
102 def set_trace() -> None:
103 """Enable sane debugging in Pwndbg by switching to the "real" stdio."""
104 debugger = pdb.Pdb(
105 stdin=sys.__stdin__, stdout=sys.__stdout__, skip=["pwndbg.lib.stdio", "pwndbg.exception"]
106 )
107 debugger.set_trace()
108
109
110 pdb.set_trace = set_trace
111
112
113 @config.trigger(verbose, debug)
114 def update() -> None:
115 if verbose or debug:
116 command = "set python print-stack full"
117 else:
118 command = "set python print-stack message"
119
120 gdb.execute(command, from_tty=True, to_string=True)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/exception.py b/pwndbg/exception.py
--- a/pwndbg/exception.py
+++ b/pwndbg/exception.py
@@ -1,10 +1,12 @@
from __future__ import annotations
import functools
+import os
import sys
import traceback
import gdb
+import pkg_resources
import pwndbg.lib.cache
import pwndbg.lib.stdio
@@ -33,6 +35,41 @@
)
+def inform_unmet_dependencies(errors) -> None:
+ """
+ Informs user about unmet dependencies
+ """
+ msg = message.error("You appear to have unmet Pwndbg dependencies.\n")
+ for e in errors:
+ if isinstance(e, pkg_resources.DistributionNotFound):
+ msg += message.notice(f"- required {e.args[0]}, but not installed\n")
+ else:
+ msg += message.notice(f"- required {e.args[1]}, installed: {e.args[0]}\n")
+ msg += message.notice("Consider running: ")
+ msg += message.hint("`setup.sh` ")
+ msg += message.notice("from Pwndbg project directory.\n")
+ print(msg)
+
+
[email protected]_until("forever")
+def check_dependencies():
+ """
+ Checks if there are any unmet dependencies in requirements.txt
+ """
+ project_path = os.path.dirname(os.path.abspath(__file__))
+ requirements_path = os.path.join(project_path, os.pardir, "requirements.txt")
+ with open(requirements_path, "r") as f:
+ errors = []
+ for line in f.readlines():
+ try:
+ pkg_resources.require(line)
+ except (pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:
+ errors.append(e)
+
+ if errors:
+ inform_unmet_dependencies(errors)
+
+
@pwndbg.lib.cache.cache_until("forever")
def inform_report_issue(exception_msg) -> None:
"""
@@ -76,6 +113,9 @@
e.__traceback__ = T
raise e
+ # Check dependencies against requirements.txt and warn user
+ check_dependencies()
+
# Display the error
if debug or verbose:
exception_msg = traceback.format_exc()
| {"golden_diff": "diff --git a/pwndbg/exception.py b/pwndbg/exception.py\n--- a/pwndbg/exception.py\n+++ b/pwndbg/exception.py\n@@ -1,10 +1,12 @@\n from __future__ import annotations\n \n import functools\n+import os\n import sys\n import traceback\n \n import gdb\n+import pkg_resources\n \n import pwndbg.lib.cache\n import pwndbg.lib.stdio\n@@ -33,6 +35,41 @@\n )\n \n \n+def inform_unmet_dependencies(errors) -> None:\n+ \"\"\"\n+ Informs user about unmet dependencies\n+ \"\"\"\n+ msg = message.error(\"You appear to have unmet Pwndbg dependencies.\\n\")\n+ for e in errors:\n+ if isinstance(e, pkg_resources.DistributionNotFound):\n+ msg += message.notice(f\"- required {e.args[0]}, but not installed\\n\")\n+ else:\n+ msg += message.notice(f\"- required {e.args[1]}, installed: {e.args[0]}\\n\")\n+ msg += message.notice(\"Consider running: \")\n+ msg += message.hint(\"`setup.sh` \")\n+ msg += message.notice(\"from Pwndbg project directory.\\n\")\n+ print(msg)\n+\n+\[email protected]_until(\"forever\")\n+def check_dependencies():\n+ \"\"\"\n+ Checks if there are any unmet dependencies in requirements.txt\n+ \"\"\"\n+ project_path = os.path.dirname(os.path.abspath(__file__))\n+ requirements_path = os.path.join(project_path, os.pardir, \"requirements.txt\")\n+ with open(requirements_path, \"r\") as f:\n+ errors = []\n+ for line in f.readlines():\n+ try:\n+ pkg_resources.require(line)\n+ except (pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:\n+ errors.append(e)\n+\n+ if errors:\n+ inform_unmet_dependencies(errors)\n+\n+\n @pwndbg.lib.cache.cache_until(\"forever\")\n def inform_report_issue(exception_msg) -> None:\n \"\"\"\n@@ -76,6 +113,9 @@\n e.__traceback__ = T\n raise e\n \n+ # Check dependencies against requirements.txt and warn user\n+ check_dependencies()\n+\n # Display the error\n if debug or verbose:\n exception_msg = traceback.format_exc()\n", "issue": "Better exception handling for unmet dependencies\nSome of our users update Pwndbg only by `git pull`ing the newest version and not re-running `setup.sh`. If that happens and when we updated our dependencies recently, the user may end up with broken Pwndbg such as (likely) happened in https://github.com/pwndbg/pwndbg/issues/1790.\r\n\r\nWe should do the two following things:\r\n1) When we detect/handle an exception in the [`pwndbg/exception.py:handle` function]( https://github.com/pwndbg/pwndbg/blob/de4acb2f40da3b0c97353a8b680ffa6440346f7b/pwndbg/exception.py#L61-L96), we should check the installed dependencies versions against the required ones and if they do not match, we should inform the user to update them.\r\n2) We may want to display installed dependencies versions within the `bugreport` command output? \n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport sys\nimport traceback\n\nimport gdb\n\nimport pwndbg.lib.cache\nimport pwndbg.lib.stdio\nfrom pwndbg.color import message\nfrom pwndbg.gdblib import config\n\nwith pwndbg.lib.stdio.stdio:\n try:\n import ipdb as pdb\n except ImportError:\n import pdb\n try:\n from rich.console import Console\n\n _rich_console = Console()\n except ImportError:\n _rich_console = None\n\nverbose = config.add_param(\n \"exception-verbose\",\n False,\n \"whether to print a full stacktrace for exceptions raised in Pwndbg commands\",\n)\ndebug = config.add_param(\n \"exception-debugger\", False, \"whether to debug exceptions raised in Pwndbg commands\"\n)\n\n\[email protected]_until(\"forever\")\ndef inform_report_issue(exception_msg) -> None:\n \"\"\"\n Informs user that he can report an issue.\n The use of caching makes it reporting only once for a given exception message.\n \"\"\"\n print(\n message.notice(\n \"If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\\n\"\n \"(Please don't forget to search if it hasn't been reported before)\\n\"\n \"To generate the report and open a browser, you may run \"\n )\n + message.hint(\"`bugreport --run-browser`\")\n + message.notice(\"\\nPS: Pull requests are welcome\")\n )\n\n\ndef inform_verbose_and_debug() -> None:\n print(\n message.notice(\"For more info invoke `\")\n + message.hint(\"set exception-verbose on\")\n + message.notice(\"` and rerun the command\\nor debug it by yourself with `\")\n + message.hint(\"set exception-debugger on\")\n + message.notice(\"`\")\n )\n\n\ndef handle(name=\"Error\"):\n \"\"\"Displays an exception to the user, optionally displaying a full traceback\n and spawning an interactive post-moretem debugger.\n\n Notes:\n - ``set exception-verbose on`` enables stack traces.\n - ``set exception-debugger on`` enables the post-mortem debugger.\n \"\"\"\n\n # This is for unit tests so they fail on exceptions instead of displaying them.\n if getattr(sys, \"_pwndbg_unittest_run\", False) is True:\n E, V, T = sys.exc_info()\n e = E(V)\n e.__traceback__ = T\n raise e\n\n # Display the error\n if debug or verbose:\n exception_msg = traceback.format_exc()\n if _rich_console:\n _rich_console.print_exception()\n else:\n print(exception_msg)\n inform_report_issue(exception_msg)\n\n else:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n\n print(message.error(f\"Exception occurred: {name}: {exc_value} ({exc_type})\"))\n\n inform_verbose_and_debug()\n\n # Break into the interactive debugger\n if debug:\n with pwndbg.lib.stdio.stdio:\n pdb.post_mortem()\n\n\[email protected](pdb.set_trace)\ndef set_trace() -> None:\n \"\"\"Enable sane debugging in Pwndbg by switching to the \"real\" stdio.\"\"\"\n debugger = pdb.Pdb(\n stdin=sys.__stdin__, stdout=sys.__stdout__, skip=[\"pwndbg.lib.stdio\", \"pwndbg.exception\"]\n )\n debugger.set_trace()\n\n\npdb.set_trace = set_trace\n\n\[email protected](verbose, debug)\ndef update() -> None:\n if verbose or debug:\n command = \"set python print-stack full\"\n else:\n command = \"set python print-stack message\"\n\n gdb.execute(command, from_tty=True, to_string=True)\n", "path": "pwndbg/exception.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport os\nimport sys\nimport traceback\n\nimport gdb\nimport pkg_resources\n\nimport pwndbg.lib.cache\nimport pwndbg.lib.stdio\nfrom pwndbg.color import message\nfrom pwndbg.gdblib import config\n\nwith pwndbg.lib.stdio.stdio:\n try:\n import ipdb as pdb\n except ImportError:\n import pdb\n try:\n from rich.console import Console\n\n _rich_console = Console()\n except ImportError:\n _rich_console = None\n\nverbose = config.add_param(\n \"exception-verbose\",\n False,\n \"whether to print a full stacktrace for exceptions raised in Pwndbg commands\",\n)\ndebug = config.add_param(\n \"exception-debugger\", False, \"whether to debug exceptions raised in Pwndbg commands\"\n)\n\n\ndef inform_unmet_dependencies(errors) -> None:\n \"\"\"\n Informs user about unmet dependencies\n \"\"\"\n msg = message.error(\"You appear to have unmet Pwndbg dependencies.\\n\")\n for e in errors:\n if isinstance(e, pkg_resources.DistributionNotFound):\n msg += message.notice(f\"- required {e.args[0]}, but not installed\\n\")\n else:\n msg += message.notice(f\"- required {e.args[1]}, installed: {e.args[0]}\\n\")\n msg += message.notice(\"Consider running: \")\n msg += message.hint(\"`setup.sh` \")\n msg += message.notice(\"from Pwndbg project directory.\\n\")\n print(msg)\n\n\[email protected]_until(\"forever\")\ndef check_dependencies():\n \"\"\"\n Checks if there are any unmet dependencies in requirements.txt\n \"\"\"\n project_path = os.path.dirname(os.path.abspath(__file__))\n requirements_path = os.path.join(project_path, os.pardir, \"requirements.txt\")\n with open(requirements_path, \"r\") as f:\n errors = []\n for line in f.readlines():\n try:\n pkg_resources.require(line)\n except (pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:\n errors.append(e)\n\n if errors:\n inform_unmet_dependencies(errors)\n\n\[email protected]_until(\"forever\")\ndef inform_report_issue(exception_msg) -> None:\n \"\"\"\n Informs user that he can report an issue.\n The use of caching makes it reporting only once for a given exception message.\n \"\"\"\n print(\n message.notice(\n \"If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\\n\"\n \"(Please don't forget to search if it hasn't been reported before)\\n\"\n \"To generate the report and open a browser, you may run \"\n )\n + message.hint(\"`bugreport --run-browser`\")\n + message.notice(\"\\nPS: Pull requests are welcome\")\n )\n\n\ndef inform_verbose_and_debug() -> None:\n print(\n message.notice(\"For more info invoke `\")\n + message.hint(\"set exception-verbose on\")\n + message.notice(\"` and rerun the command\\nor debug it by yourself with `\")\n + message.hint(\"set exception-debugger on\")\n + message.notice(\"`\")\n )\n\n\ndef handle(name=\"Error\"):\n \"\"\"Displays an exception to the user, optionally displaying a full traceback\n and spawning an interactive post-moretem debugger.\n\n Notes:\n - ``set exception-verbose on`` enables stack traces.\n - ``set exception-debugger on`` enables the post-mortem debugger.\n \"\"\"\n\n # This is for unit tests so they fail on exceptions instead of displaying them.\n if getattr(sys, \"_pwndbg_unittest_run\", False) is True:\n E, V, T = sys.exc_info()\n e = E(V)\n e.__traceback__ = T\n raise e\n\n # Check dependencies against requirements.txt and warn user\n check_dependencies()\n\n # Display the error\n if debug or verbose:\n exception_msg = traceback.format_exc()\n if _rich_console:\n _rich_console.print_exception()\n else:\n print(exception_msg)\n inform_report_issue(exception_msg)\n\n else:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n\n print(message.error(f\"Exception occurred: {name}: {exc_value} ({exc_type})\"))\n\n inform_verbose_and_debug()\n\n # Break into the interactive debugger\n if debug:\n with pwndbg.lib.stdio.stdio:\n pdb.post_mortem()\n\n\[email protected](pdb.set_trace)\ndef set_trace() -> None:\n \"\"\"Enable sane debugging in Pwndbg by switching to the \"real\" stdio.\"\"\"\n debugger = pdb.Pdb(\n stdin=sys.__stdin__, stdout=sys.__stdout__, skip=[\"pwndbg.lib.stdio\", \"pwndbg.exception\"]\n )\n debugger.set_trace()\n\n\npdb.set_trace = set_trace\n\n\[email protected](verbose, debug)\ndef update() -> None:\n if verbose or debug:\n command = \"set python print-stack full\"\n else:\n command = \"set python print-stack message\"\n\n gdb.execute(command, from_tty=True, to_string=True)\n", "path": "pwndbg/exception.py"}]} | 1,541 | 506 |
gh_patches_debug_18840 | rasdani/github-patches | git_diff | pyro-ppl__numpyro-1325 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
compat module: compat.infer.MCMC broken
Calling
```python
compat.infer.MCMC(kernel, num_warmup=100, num_samples=1000)
```
throws an error since no kwargs are handed to `mcmc.MCMC`. Also, shouldnt the correct `compat` call be
```python
compat.infer.MCMC(kernel, warmup_steps=100, num_samples=1000)
```
Thanks :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpyro/compat/infer.py`
Content:
```
1 # Copyright Contributors to the Pyro project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import math
5
6 from jax import jit
7
8 import numpyro
9 from numpyro.compat.pyro import get_param_store
10 from numpyro.infer import elbo, hmc, mcmc, svi
11
12
13 class HMC(hmc.HMC):
14 def __init__(
15 self,
16 model=None,
17 potential_fn=None,
18 step_size=1,
19 adapt_step_size=True,
20 adapt_mass_matrix=True,
21 full_mass=False,
22 use_multinomial_sampling=True,
23 transforms=None,
24 max_plate_nesting=None,
25 jit_compile=False,
26 jit_options=None,
27 ignore_jit_warnings=False,
28 trajectory_length=2 * math.pi,
29 target_accept_prob=0.8,
30 ):
31 super(HMC, self).__init__(
32 model=model,
33 potential_fn=potential_fn,
34 step_size=step_size,
35 adapt_step_size=adapt_step_size,
36 adapt_mass_matrix=adapt_mass_matrix,
37 dense_mass=full_mass,
38 target_accept_prob=target_accept_prob,
39 trajectory_length=trajectory_length,
40 )
41
42
43 class NUTS(hmc.NUTS):
44 def __init__(
45 self,
46 model=None,
47 potential_fn=None,
48 step_size=1,
49 adapt_step_size=True,
50 adapt_mass_matrix=True,
51 full_mass=False,
52 use_multinomial_sampling=True,
53 transforms=None,
54 max_plate_nesting=None,
55 jit_compile=False,
56 jit_options=None,
57 ignore_jit_warnings=False,
58 trajectory_length=2 * math.pi,
59 target_accept_prob=0.8,
60 max_tree_depth=10,
61 ):
62 if potential_fn is not None:
63 raise ValueError(
64 "Only `model` argument is supported in generic module;"
65 " `potential_fn` is not supported."
66 )
67 super(NUTS, self).__init__(
68 model=model,
69 potential_fn=potential_fn,
70 step_size=step_size,
71 adapt_step_size=adapt_step_size,
72 adapt_mass_matrix=adapt_mass_matrix,
73 dense_mass=full_mass,
74 target_accept_prob=target_accept_prob,
75 trajectory_length=trajectory_length,
76 max_tree_depth=max_tree_depth,
77 )
78
79
80 class MCMC(object):
81 def __init__(
82 self,
83 kernel,
84 num_samples,
85 num_warmup=None,
86 initial_params=None,
87 num_chains=1,
88 hook_fn=None,
89 mp_context=None,
90 disable_progbar=False,
91 disable_validation=True,
92 transforms=None,
93 ):
94 if num_warmup is None:
95 num_warmup = num_samples
96 self._initial_params = initial_params
97 self._mcmc = mcmc.MCMC(
98 kernel,
99 num_warmup,
100 num_samples,
101 num_chains=num_chains,
102 progress_bar=(not disable_progbar),
103 )
104
105 def run(self, *args, rng_key=None, **kwargs):
106 if rng_key is None:
107 rng_key = numpyro.prng_key()
108 self._mcmc.run(rng_key, *args, init_params=self._initial_params, **kwargs)
109
110 def get_samples(self, num_samples=None, group_by_chain=False):
111 if num_samples is not None:
112 raise ValueError("`num_samples` arg unsupported in NumPyro.")
113 return self._mcmc.get_samples(group_by_chain=group_by_chain)
114
115 def summary(self, prob=0.9):
116 self._mcmc.print_summary()
117
118
119 class SVI(svi.SVI):
120 def __init__(
121 self,
122 model,
123 guide,
124 optim,
125 loss,
126 loss_and_grads=None,
127 num_samples=10,
128 num_steps=0,
129 **kwargs
130 ):
131 super(SVI, self).__init__(model=model, guide=guide, optim=optim, loss=loss)
132 self.svi_state = None
133
134 def evaluate_loss(self, *args, **kwargs):
135 return self.evaluate(self.svi_state, *args, **kwargs)
136
137 def step(self, *args, rng_key=None, **kwargs):
138 if self.svi_state is None:
139 if rng_key is None:
140 rng_key = numpyro.prng_key()
141 self.svi_state = self.init(rng_key, *args, **kwargs)
142 try:
143 self.svi_state, loss = jit(self.update)(self.svi_state, *args, **kwargs)
144 except TypeError as e:
145 if "not a valid JAX type" in str(e):
146 raise TypeError(
147 "NumPyro backend requires args, kwargs to be arrays or tuples, "
148 "dicts of arrays."
149 ) from e
150 else:
151 raise e
152 params = jit(super(SVI, self).get_params)(self.svi_state)
153 get_param_store().update(params)
154 return loss
155
156 def get_params(self):
157 return super(SVI, self).get_params(self.svi_state)
158
159
160 class Trace_ELBO(elbo.Trace_ELBO):
161 def __init__(
162 self,
163 num_particles=1,
164 max_plate_nesting=float("inf"),
165 max_iarange_nesting=None, # DEPRECATED
166 vectorize_particles=False,
167 strict_enumeration_warning=True,
168 ignore_jit_warnings=False,
169 jit_options=None,
170 retain_graph=None,
171 tail_adaptive_beta=-1.0,
172 ):
173 super(Trace_ELBO, self).__init__(num_particles=num_particles)
174
175
176 # JIT is enabled by default
177 JitTrace_ELBO = Trace_ELBO
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpyro/compat/infer.py b/numpyro/compat/infer.py
--- a/numpyro/compat/infer.py
+++ b/numpyro/compat/infer.py
@@ -82,7 +82,7 @@
self,
kernel,
num_samples,
- num_warmup=None,
+ warmup_steps=None,
initial_params=None,
num_chains=1,
hook_fn=None,
@@ -91,13 +91,13 @@
disable_validation=True,
transforms=None,
):
- if num_warmup is None:
- num_warmup = num_samples
+ if warmup_steps is None:
+ warmup_steps = num_samples
self._initial_params = initial_params
self._mcmc = mcmc.MCMC(
kernel,
- num_warmup,
- num_samples,
+ num_warmup=warmup_steps,
+ num_samples=num_samples,
num_chains=num_chains,
progress_bar=(not disable_progbar),
)
| {"golden_diff": "diff --git a/numpyro/compat/infer.py b/numpyro/compat/infer.py\n--- a/numpyro/compat/infer.py\n+++ b/numpyro/compat/infer.py\n@@ -82,7 +82,7 @@\n self,\n kernel,\n num_samples,\n- num_warmup=None,\n+ warmup_steps=None,\n initial_params=None,\n num_chains=1,\n hook_fn=None,\n@@ -91,13 +91,13 @@\n disable_validation=True,\n transforms=None,\n ):\n- if num_warmup is None:\n- num_warmup = num_samples\n+ if warmup_steps is None:\n+ warmup_steps = num_samples\n self._initial_params = initial_params\n self._mcmc = mcmc.MCMC(\n kernel,\n- num_warmup,\n- num_samples,\n+ num_warmup=warmup_steps,\n+ num_samples=num_samples,\n num_chains=num_chains,\n progress_bar=(not disable_progbar),\n )\n", "issue": "compat module: compat.infer.MCMC broken\nCalling\r\n```python\r\ncompat.infer.MCMC(kernel, num_warmup=100, num_samples=1000)\r\n```\r\nthrows an error since no kwargs are handed to `mcmc.MCMC`. Also, shouldnt the correct `compat` call be\r\n```python\r\ncompat.infer.MCMC(kernel, warmup_steps=100, num_samples=1000)\r\n```\r\nThanks :)\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\n\nfrom jax import jit\n\nimport numpyro\nfrom numpyro.compat.pyro import get_param_store\nfrom numpyro.infer import elbo, hmc, mcmc, svi\n\n\nclass HMC(hmc.HMC):\n def __init__(\n self,\n model=None,\n potential_fn=None,\n step_size=1,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n full_mass=False,\n use_multinomial_sampling=True,\n transforms=None,\n max_plate_nesting=None,\n jit_compile=False,\n jit_options=None,\n ignore_jit_warnings=False,\n trajectory_length=2 * math.pi,\n target_accept_prob=0.8,\n ):\n super(HMC, self).__init__(\n model=model,\n potential_fn=potential_fn,\n step_size=step_size,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n dense_mass=full_mass,\n target_accept_prob=target_accept_prob,\n trajectory_length=trajectory_length,\n )\n\n\nclass NUTS(hmc.NUTS):\n def __init__(\n self,\n model=None,\n potential_fn=None,\n step_size=1,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n full_mass=False,\n use_multinomial_sampling=True,\n transforms=None,\n max_plate_nesting=None,\n jit_compile=False,\n jit_options=None,\n ignore_jit_warnings=False,\n trajectory_length=2 * math.pi,\n target_accept_prob=0.8,\n max_tree_depth=10,\n ):\n if potential_fn is not None:\n raise ValueError(\n \"Only `model` argument is supported in generic module;\"\n \" `potential_fn` is not supported.\"\n )\n super(NUTS, self).__init__(\n model=model,\n potential_fn=potential_fn,\n step_size=step_size,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n dense_mass=full_mass,\n target_accept_prob=target_accept_prob,\n trajectory_length=trajectory_length,\n max_tree_depth=max_tree_depth,\n )\n\n\nclass MCMC(object):\n def __init__(\n self,\n kernel,\n num_samples,\n num_warmup=None,\n initial_params=None,\n num_chains=1,\n hook_fn=None,\n mp_context=None,\n disable_progbar=False,\n disable_validation=True,\n transforms=None,\n ):\n if num_warmup is None:\n num_warmup = num_samples\n self._initial_params = initial_params\n self._mcmc = mcmc.MCMC(\n kernel,\n num_warmup,\n num_samples,\n num_chains=num_chains,\n progress_bar=(not disable_progbar),\n )\n\n def run(self, *args, rng_key=None, **kwargs):\n if rng_key is None:\n rng_key = numpyro.prng_key()\n self._mcmc.run(rng_key, *args, init_params=self._initial_params, **kwargs)\n\n def get_samples(self, num_samples=None, group_by_chain=False):\n if num_samples is not None:\n raise ValueError(\"`num_samples` arg unsupported in NumPyro.\")\n return self._mcmc.get_samples(group_by_chain=group_by_chain)\n\n def summary(self, prob=0.9):\n self._mcmc.print_summary()\n\n\nclass SVI(svi.SVI):\n def __init__(\n self,\n model,\n guide,\n optim,\n loss,\n loss_and_grads=None,\n num_samples=10,\n num_steps=0,\n **kwargs\n ):\n super(SVI, self).__init__(model=model, guide=guide, optim=optim, loss=loss)\n self.svi_state = None\n\n def evaluate_loss(self, *args, **kwargs):\n return self.evaluate(self.svi_state, *args, **kwargs)\n\n def step(self, *args, rng_key=None, **kwargs):\n if self.svi_state is None:\n if rng_key is None:\n rng_key = numpyro.prng_key()\n self.svi_state = self.init(rng_key, *args, **kwargs)\n try:\n self.svi_state, loss = jit(self.update)(self.svi_state, *args, **kwargs)\n except TypeError as e:\n if \"not a valid JAX type\" in str(e):\n raise TypeError(\n \"NumPyro backend requires args, kwargs to be arrays or tuples, \"\n \"dicts of arrays.\"\n ) from e\n else:\n raise e\n params = jit(super(SVI, self).get_params)(self.svi_state)\n get_param_store().update(params)\n return loss\n\n def get_params(self):\n return super(SVI, self).get_params(self.svi_state)\n\n\nclass Trace_ELBO(elbo.Trace_ELBO):\n def __init__(\n self,\n num_particles=1,\n max_plate_nesting=float(\"inf\"),\n max_iarange_nesting=None, # DEPRECATED\n vectorize_particles=False,\n strict_enumeration_warning=True,\n ignore_jit_warnings=False,\n jit_options=None,\n retain_graph=None,\n tail_adaptive_beta=-1.0,\n ):\n super(Trace_ELBO, self).__init__(num_particles=num_particles)\n\n\n# JIT is enabled by default\nJitTrace_ELBO = Trace_ELBO\n", "path": "numpyro/compat/infer.py"}], "after_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\n\nfrom jax import jit\n\nimport numpyro\nfrom numpyro.compat.pyro import get_param_store\nfrom numpyro.infer import elbo, hmc, mcmc, svi\n\n\nclass HMC(hmc.HMC):\n def __init__(\n self,\n model=None,\n potential_fn=None,\n step_size=1,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n full_mass=False,\n use_multinomial_sampling=True,\n transforms=None,\n max_plate_nesting=None,\n jit_compile=False,\n jit_options=None,\n ignore_jit_warnings=False,\n trajectory_length=2 * math.pi,\n target_accept_prob=0.8,\n ):\n super(HMC, self).__init__(\n model=model,\n potential_fn=potential_fn,\n step_size=step_size,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n dense_mass=full_mass,\n target_accept_prob=target_accept_prob,\n trajectory_length=trajectory_length,\n )\n\n\nclass NUTS(hmc.NUTS):\n def __init__(\n self,\n model=None,\n potential_fn=None,\n step_size=1,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n full_mass=False,\n use_multinomial_sampling=True,\n transforms=None,\n max_plate_nesting=None,\n jit_compile=False,\n jit_options=None,\n ignore_jit_warnings=False,\n trajectory_length=2 * math.pi,\n target_accept_prob=0.8,\n max_tree_depth=10,\n ):\n if potential_fn is not None:\n raise ValueError(\n \"Only `model` argument is supported in generic module;\"\n \" `potential_fn` is not supported.\"\n )\n super(NUTS, self).__init__(\n model=model,\n potential_fn=potential_fn,\n step_size=step_size,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n dense_mass=full_mass,\n target_accept_prob=target_accept_prob,\n trajectory_length=trajectory_length,\n max_tree_depth=max_tree_depth,\n )\n\n\nclass MCMC(object):\n def __init__(\n self,\n kernel,\n num_samples,\n warmup_steps=None,\n initial_params=None,\n num_chains=1,\n hook_fn=None,\n mp_context=None,\n disable_progbar=False,\n disable_validation=True,\n transforms=None,\n ):\n if warmup_steps is None:\n warmup_steps = num_samples\n self._initial_params = initial_params\n self._mcmc = mcmc.MCMC(\n kernel,\n num_warmup=warmup_steps,\n num_samples=num_samples,\n num_chains=num_chains,\n progress_bar=(not disable_progbar),\n )\n\n def run(self, *args, rng_key=None, **kwargs):\n if rng_key is None:\n rng_key = numpyro.prng_key()\n self._mcmc.run(rng_key, *args, init_params=self._initial_params, **kwargs)\n\n def get_samples(self, num_samples=None, group_by_chain=False):\n if num_samples is not None:\n raise ValueError(\"`num_samples` arg unsupported in NumPyro.\")\n return self._mcmc.get_samples(group_by_chain=group_by_chain)\n\n def summary(self, prob=0.9):\n self._mcmc.print_summary()\n\n\nclass SVI(svi.SVI):\n def __init__(\n self,\n model,\n guide,\n optim,\n loss,\n loss_and_grads=None,\n num_samples=10,\n num_steps=0,\n **kwargs\n ):\n super(SVI, self).__init__(model=model, guide=guide, optim=optim, loss=loss)\n self.svi_state = None\n\n def evaluate_loss(self, *args, **kwargs):\n return self.evaluate(self.svi_state, *args, **kwargs)\n\n def step(self, *args, rng_key=None, **kwargs):\n if self.svi_state is None:\n if rng_key is None:\n rng_key = numpyro.prng_key()\n self.svi_state = self.init(rng_key, *args, **kwargs)\n try:\n self.svi_state, loss = jit(self.update)(self.svi_state, *args, **kwargs)\n except TypeError as e:\n if \"not a valid JAX type\" in str(e):\n raise TypeError(\n \"NumPyro backend requires args, kwargs to be arrays or tuples, \"\n \"dicts of arrays.\"\n ) from e\n else:\n raise e\n params = jit(super(SVI, self).get_params)(self.svi_state)\n get_param_store().update(params)\n return loss\n\n def get_params(self):\n return super(SVI, self).get_params(self.svi_state)\n\n\nclass Trace_ELBO(elbo.Trace_ELBO):\n def __init__(\n self,\n num_particles=1,\n max_plate_nesting=float(\"inf\"),\n max_iarange_nesting=None, # DEPRECATED\n vectorize_particles=False,\n strict_enumeration_warning=True,\n ignore_jit_warnings=False,\n jit_options=None,\n retain_graph=None,\n tail_adaptive_beta=-1.0,\n ):\n super(Trace_ELBO, self).__init__(num_particles=num_particles)\n\n\n# JIT is enabled by default\nJitTrace_ELBO = Trace_ELBO\n", "path": "numpyro/compat/infer.py"}]} | 1,971 | 234 |
gh_patches_debug_4953 | rasdani/github-patches | git_diff | getnikola__nikola-2998 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Theme meta file ignored_assets key should ignore whitespace
### Environment (if reporting a bug)
**Python Version:**
3.5.4
**Nikola Version:**
8.0.0dev HEAD
**Operating System:**
gentoo GNU/Linux
### Description:
Current behaviour: The ignored_assets key in theme meta files takes a comma-separated list of filenames, that must not contain leading or trailing whitespace because it's considered to be part of the filename.
Expected behaviour: The comma-separated list may contain the usual amount of whitespace: foo, bar, baz, etc
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/copy_assets.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2018 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Copy theme assets into output."""
28
29
30 import io
31 import os
32
33 from nikola.plugin_categories import Task
34 from nikola import utils
35
36
37 class CopyAssets(Task):
38 """Copy theme assets into output."""
39
40 name = "copy_assets"
41
42 def gen_tasks(self):
43 """Create tasks to copy the assets of the whole theme chain.
44
45 If a file is present on two themes, use the version
46 from the "youngest" theme.
47 """
48 kw = {
49 "themes": self.site.THEMES,
50 "translations": self.site.translations,
51 "files_folders": self.site.config['FILES_FOLDERS'],
52 "output_folder": self.site.config['OUTPUT_FOLDER'],
53 "filters": self.site.config['FILTERS'],
54 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
55 "code.css_selectors": ['pre.code', '.highlight pre'],
56 "code.css_head": '/* code.css file generated by Nikola */\n',
57 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
58 }
59 tasks = {}
60 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
61 code_css_input = utils.get_asset_path('assets/css/code.css',
62 themes=kw['themes'],
63 files_folders=kw['files_folders'], output_dir=None)
64 yield self.group_task()
65
66 main_theme = utils.get_theme_path(kw['themes'][0])
67 theme_ini = utils.parse_theme_meta(main_theme)
68 if theme_ini:
69 ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',')
70 else:
71 ignored_assets = []
72
73 for theme_name in kw['themes']:
74 src = os.path.join(utils.get_theme_path(theme_name), 'assets')
75 dst = os.path.join(kw['output_folder'], 'assets')
76 for task in utils.copy_tree(src, dst):
77 asset_name = os.path.relpath(task['name'], dst)
78 if task['name'] in tasks or asset_name in ignored_assets:
79 continue
80 tasks[task['name']] = task
81 task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]
82 task['basename'] = self.name
83 if code_css_input:
84 if 'file_dep' not in task:
85 task['file_dep'] = []
86 task['file_dep'].append(code_css_input)
87 yield utils.apply_filters(task, kw['filters'])
88
89 # Check whether or not there is a code.css file around.
90 if not code_css_input and kw['code_color_scheme']:
91 def create_code_css():
92 from pygments.formatters import get_formatter_by_name
93 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
94 utils.makedirs(os.path.dirname(code_css_path))
95 with io.open(code_css_path, 'w+', encoding='utf8') as outf:
96 outf.write(kw["code.css_head"])
97 outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
98 outf.write(kw["code.css_close"])
99
100 if os.path.exists(code_css_path):
101 with io.open(code_css_path, 'r', encoding='utf-8') as fh:
102 testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
103 else:
104 testcontents = False
105
106 task = {
107 'basename': self.name,
108 'name': code_css_path,
109 'targets': [code_css_path],
110 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],
111 'actions': [(create_code_css, [])],
112 'clean': True,
113 }
114 yield utils.apply_filters(task, kw['filters'])
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py
--- a/nikola/plugins/task/copy_assets.py
+++ b/nikola/plugins/task/copy_assets.py
@@ -67,6 +67,7 @@
theme_ini = utils.parse_theme_meta(main_theme)
if theme_ini:
ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',')
+ ignored_assets = [asset_name.strip() for asset_name in ignored_assets]
else:
ignored_assets = []
| {"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -67,6 +67,7 @@\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n+ ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n else:\n ignored_assets = []\n", "issue": "Theme meta file ignored_assets key should ignore whitespace\n### Environment (if reporting a bug)\r\n\r\n**Python Version:**\r\n3.5.4\r\n**Nikola Version:**\r\n8.0.0dev HEAD\r\n**Operating System:**\r\ngentoo GNU/Linux\r\n### Description:\r\nCurrent behaviour: The ignored_assets key in theme meta files takes a comma-separated list of filenames, that must not contain leading or trailing whitespace because it's considered to be part of the filename.\r\n\r\nExpected behaviour: The comma-separated list may contain the usual amount of whitespace: foo, bar, baz, etc\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Copy theme assets into output.\"\"\"\n\n\nimport io\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n kw = {\n \"themes\": self.site.THEMES,\n \"translations\": self.site.translations,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": ['pre.code', '.highlight pre'],\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'], output_dir=None)\n yield self.group_task()\n\n main_theme = utils.get_theme_path(kw['themes'][0])\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n else:\n ignored_assets = []\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n asset_name = os.path.relpath(task['name'], dst)\n if task['name'] in tasks or asset_name in ignored_assets:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]\n task['basename'] = self.name\n if code_css_input:\n if 'file_dep' not in task:\n task['file_dep'] = []\n task['file_dep'].append(code_css_input)\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input and kw['code_color_scheme']:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with io.open(code_css_path, 'w+', encoding='utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with io.open(code_css_path, 'r', encoding='utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Copy theme assets into output.\"\"\"\n\n\nimport io\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n kw = {\n \"themes\": self.site.THEMES,\n \"translations\": self.site.translations,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": ['pre.code', '.highlight pre'],\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'], output_dir=None)\n yield self.group_task()\n\n main_theme = utils.get_theme_path(kw['themes'][0])\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n else:\n ignored_assets = []\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n asset_name = os.path.relpath(task['name'], dst)\n if task['name'] in tasks or asset_name in ignored_assets:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]\n task['basename'] = self.name\n if code_css_input:\n if 'file_dep' not in task:\n task['file_dep'] = []\n task['file_dep'].append(code_css_input)\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input and kw['code_color_scheme']:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with io.open(code_css_path, 'w+', encoding='utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with io.open(code_css_path, 'r', encoding='utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]} | 1,697 | 122 |
gh_patches_debug_18140 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2408 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OTLP Exporter: Add support for Gauge point types
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import logging
15 from os import environ
16 from typing import Optional, Sequence
17 from grpc import ChannelCredentials, Compression
18 from opentelemetry.exporter.otlp.proto.grpc.exporter import (
19 OTLPExporterMixin,
20 get_resource_data,
21 )
22 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (
23 ExportMetricsServiceRequest,
24 )
25 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (
26 MetricsServiceStub,
27 )
28 from opentelemetry.proto.common.v1.common_pb2 import InstrumentationLibrary
29 from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2
30 from opentelemetry.sdk.environment_variables import (
31 OTEL_EXPORTER_OTLP_METRICS_INSECURE,
32 )
33 from opentelemetry.sdk._metrics.point import (
34 Gauge,
35 Histogram,
36 Metric,
37 Sum,
38 )
39
40 from opentelemetry.sdk._metrics.export import (
41 MetricExporter,
42 MetricExportResult,
43 )
44
45 logger = logging.getLogger(__name__)
46
47
48 class OTLPMetricExporter(
49 MetricExporter,
50 OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],
51 ):
52 _result = MetricExportResult
53 _stub = MetricsServiceStub
54
55 def __init__(
56 self,
57 endpoint: Optional[str] = None,
58 insecure: Optional[bool] = None,
59 credentials: Optional[ChannelCredentials] = None,
60 headers: Optional[Sequence] = None,
61 timeout: Optional[int] = None,
62 compression: Optional[Compression] = None,
63 ):
64
65 if insecure is None:
66 insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)
67 if insecure is not None:
68 insecure = insecure.lower() == "true"
69
70 super().__init__(
71 **{
72 "endpoint": endpoint,
73 "insecure": insecure,
74 "credentials": credentials,
75 "headers": headers,
76 "timeout": timeout,
77 "compression": compression,
78 }
79 )
80
81 def _translate_data(
82 self, data: Sequence[Metric]
83 ) -> ExportMetricsServiceRequest:
84 sdk_resource_instrumentation_library_metrics = {}
85
86 for metric in data:
87 resource = metric.resource
88 instrumentation_library_map = (
89 sdk_resource_instrumentation_library_metrics.get(resource, {})
90 )
91 if not instrumentation_library_map:
92 sdk_resource_instrumentation_library_metrics[
93 resource
94 ] = instrumentation_library_map
95
96 instrumentation_library_metrics = instrumentation_library_map.get(
97 metric.instrumentation_info
98 )
99
100 if not instrumentation_library_metrics:
101 if metric.instrumentation_info is not None:
102 instrumentation_library_map[
103 metric.instrumentation_info
104 ] = pb2.InstrumentationLibraryMetrics(
105 instrumentation_library=InstrumentationLibrary(
106 name=metric.instrumentation_info.name,
107 version=metric.instrumentation_info.version,
108 )
109 )
110 else:
111 instrumentation_library_map[
112 metric.instrumentation_info
113 ] = pb2.InstrumentationLibraryMetrics()
114
115 instrumentation_library_metrics = instrumentation_library_map.get(
116 metric.instrumentation_info
117 )
118
119 pbmetric = pb2.Metric(
120 name=metric.name,
121 description=metric.description,
122 unit=metric.unit,
123 )
124 if isinstance(metric.point, Gauge):
125 # TODO: implement gauge
126 pbmetric.gauge = pb2.Gauge(
127 data_points=[],
128 )
129 elif isinstance(metric.point, Histogram):
130 # TODO: implement histogram
131 pbmetric.histogram = pb2.Histogram(
132 data_points=[],
133 )
134 elif isinstance(metric.point, Sum):
135 pt = pb2.NumberDataPoint(
136 attributes=self._translate_attributes(metric.attributes),
137 start_time_unix_nano=metric.point.start_time_unix_nano,
138 time_unix_nano=metric.point.time_unix_nano,
139 )
140 if isinstance(metric.point.value, int):
141 pt.as_int = metric.point.value
142 else:
143 pt.as_double = metric.point.value
144 # note that because sum is a message type, the fields must be
145 # set individually rather than instantiating a pb2.Sum and setting
146 # it once
147 pbmetric.sum.aggregation_temporality = (
148 metric.point.aggregation_temporality
149 )
150 pbmetric.sum.is_monotonic = metric.point.is_monotonic
151 pbmetric.sum.data_points.append(pt)
152 else:
153 logger.warn("unsupported datapoint type %s", metric.point)
154 continue
155
156 instrumentation_library_metrics.metrics.append(
157 pbmetric,
158 )
159 return ExportMetricsServiceRequest(
160 resource_metrics=get_resource_data(
161 sdk_resource_instrumentation_library_metrics,
162 pb2.ResourceMetrics,
163 "metrics",
164 )
165 )
166
167 def export(self, metrics: Sequence[Metric]) -> MetricExportResult:
168 return self._export(metrics)
169
170 def shutdown(self):
171 pass
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py
@@ -122,10 +122,15 @@
unit=metric.unit,
)
if isinstance(metric.point, Gauge):
- # TODO: implement gauge
- pbmetric.gauge = pb2.Gauge(
- data_points=[],
+ pt = pb2.NumberDataPoint(
+ attributes=self._translate_attributes(metric.attributes),
+ time_unix_nano=metric.point.time_unix_nano,
)
+ if isinstance(metric.point.value, int):
+ pt.as_int = metric.point.value
+ else:
+ pt.as_double = metric.point.value
+ pbmetric.gauge.data_points.append(pt)
elif isinstance(metric.point, Histogram):
# TODO: implement histogram
pbmetric.histogram = pb2.Histogram(
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py\n@@ -122,10 +122,15 @@\n unit=metric.unit,\n )\n if isinstance(metric.point, Gauge):\n- # TODO: implement gauge\n- pbmetric.gauge = pb2.Gauge(\n- data_points=[],\n+ pt = pb2.NumberDataPoint(\n+ attributes=self._translate_attributes(metric.attributes),\n+ time_unix_nano=metric.point.time_unix_nano,\n )\n+ if isinstance(metric.point.value, int):\n+ pt.as_int = metric.point.value\n+ else:\n+ pt.as_double = metric.point.value\n+ pbmetric.gauge.data_points.append(pt)\n elif isinstance(metric.point, Histogram):\n # TODO: implement histogram\n pbmetric.histogram = pb2.Histogram(\n", "issue": "OTLP Exporter: Add support for Gauge point types\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom os import environ\nfrom typing import Optional, Sequence\nfrom grpc import ChannelCredentials, Compression\nfrom opentelemetry.exporter.otlp.proto.grpc.exporter import (\n OTLPExporterMixin,\n get_resource_data,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (\n ExportMetricsServiceRequest,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (\n MetricsServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import InstrumentationLibrary\nfrom opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_METRICS_INSECURE,\n)\nfrom opentelemetry.sdk._metrics.point import (\n Gauge,\n Histogram,\n Metric,\n Sum,\n)\n\nfrom opentelemetry.sdk._metrics.export import (\n MetricExporter,\n MetricExportResult,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass OTLPMetricExporter(\n MetricExporter,\n OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],\n):\n _result = MetricExportResult\n _stub = MetricsServiceStub\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n\n if insecure is None:\n insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)\n if insecure is not None:\n insecure = insecure.lower() == \"true\"\n\n super().__init__(\n **{\n \"endpoint\": endpoint,\n \"insecure\": insecure,\n \"credentials\": credentials,\n \"headers\": headers,\n \"timeout\": timeout,\n \"compression\": compression,\n }\n )\n\n def _translate_data(\n self, data: Sequence[Metric]\n ) -> ExportMetricsServiceRequest:\n sdk_resource_instrumentation_library_metrics = {}\n\n for metric in data:\n resource = metric.resource\n instrumentation_library_map = (\n sdk_resource_instrumentation_library_metrics.get(resource, {})\n )\n if not instrumentation_library_map:\n sdk_resource_instrumentation_library_metrics[\n resource\n ] = instrumentation_library_map\n\n instrumentation_library_metrics = instrumentation_library_map.get(\n metric.instrumentation_info\n )\n\n if not instrumentation_library_metrics:\n if metric.instrumentation_info is not None:\n instrumentation_library_map[\n metric.instrumentation_info\n ] = pb2.InstrumentationLibraryMetrics(\n instrumentation_library=InstrumentationLibrary(\n name=metric.instrumentation_info.name,\n version=metric.instrumentation_info.version,\n )\n )\n else:\n instrumentation_library_map[\n metric.instrumentation_info\n ] = pb2.InstrumentationLibraryMetrics()\n\n instrumentation_library_metrics = instrumentation_library_map.get(\n metric.instrumentation_info\n )\n\n pbmetric = pb2.Metric(\n name=metric.name,\n description=metric.description,\n unit=metric.unit,\n )\n if isinstance(metric.point, Gauge):\n # TODO: implement gauge\n pbmetric.gauge = pb2.Gauge(\n data_points=[],\n )\n elif isinstance(metric.point, Histogram):\n # TODO: implement histogram\n pbmetric.histogram = pb2.Histogram(\n data_points=[],\n )\n elif isinstance(metric.point, Sum):\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(metric.attributes),\n start_time_unix_nano=metric.point.start_time_unix_nano,\n time_unix_nano=metric.point.time_unix_nano,\n )\n if isinstance(metric.point.value, int):\n pt.as_int = metric.point.value\n else:\n pt.as_double = metric.point.value\n # note that because sum is a message type, the fields must be\n # set individually rather than instantiating a pb2.Sum and setting\n # it once\n pbmetric.sum.aggregation_temporality = (\n metric.point.aggregation_temporality\n )\n pbmetric.sum.is_monotonic = metric.point.is_monotonic\n pbmetric.sum.data_points.append(pt)\n else:\n logger.warn(\"unsupported datapoint type %s\", metric.point)\n continue\n\n instrumentation_library_metrics.metrics.append(\n pbmetric,\n )\n return ExportMetricsServiceRequest(\n resource_metrics=get_resource_data(\n sdk_resource_instrumentation_library_metrics,\n pb2.ResourceMetrics,\n \"metrics\",\n )\n )\n\n def export(self, metrics: Sequence[Metric]) -> MetricExportResult:\n return self._export(metrics)\n\n def shutdown(self):\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom os import environ\nfrom typing import Optional, Sequence\nfrom grpc import ChannelCredentials, Compression\nfrom opentelemetry.exporter.otlp.proto.grpc.exporter import (\n OTLPExporterMixin,\n get_resource_data,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (\n ExportMetricsServiceRequest,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (\n MetricsServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import InstrumentationLibrary\nfrom opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_METRICS_INSECURE,\n)\nfrom opentelemetry.sdk._metrics.point import (\n Gauge,\n Histogram,\n Metric,\n Sum,\n)\n\nfrom opentelemetry.sdk._metrics.export import (\n MetricExporter,\n MetricExportResult,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass OTLPMetricExporter(\n MetricExporter,\n OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],\n):\n _result = MetricExportResult\n _stub = MetricsServiceStub\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n\n if insecure is None:\n insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)\n if insecure is not None:\n insecure = insecure.lower() == \"true\"\n\n super().__init__(\n **{\n \"endpoint\": endpoint,\n \"insecure\": insecure,\n \"credentials\": credentials,\n \"headers\": headers,\n \"timeout\": timeout,\n \"compression\": compression,\n }\n )\n\n def _translate_data(\n self, data: Sequence[Metric]\n ) -> ExportMetricsServiceRequest:\n sdk_resource_instrumentation_library_metrics = {}\n\n for metric in data:\n resource = metric.resource\n instrumentation_library_map = (\n sdk_resource_instrumentation_library_metrics.get(resource, {})\n )\n if not instrumentation_library_map:\n sdk_resource_instrumentation_library_metrics[\n resource\n ] = instrumentation_library_map\n\n instrumentation_library_metrics = instrumentation_library_map.get(\n metric.instrumentation_info\n )\n\n if not instrumentation_library_metrics:\n if metric.instrumentation_info is not None:\n instrumentation_library_map[\n metric.instrumentation_info\n ] = pb2.InstrumentationLibraryMetrics(\n instrumentation_library=InstrumentationLibrary(\n name=metric.instrumentation_info.name,\n version=metric.instrumentation_info.version,\n )\n )\n else:\n instrumentation_library_map[\n metric.instrumentation_info\n ] = pb2.InstrumentationLibraryMetrics()\n\n instrumentation_library_metrics = instrumentation_library_map.get(\n metric.instrumentation_info\n )\n\n pbmetric = pb2.Metric(\n name=metric.name,\n description=metric.description,\n unit=metric.unit,\n )\n if isinstance(metric.point, Gauge):\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(metric.attributes),\n time_unix_nano=metric.point.time_unix_nano,\n )\n if isinstance(metric.point.value, int):\n pt.as_int = metric.point.value\n else:\n pt.as_double = metric.point.value\n pbmetric.gauge.data_points.append(pt)\n elif isinstance(metric.point, Histogram):\n # TODO: implement histogram\n pbmetric.histogram = pb2.Histogram(\n data_points=[],\n )\n elif isinstance(metric.point, Sum):\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(metric.attributes),\n start_time_unix_nano=metric.point.start_time_unix_nano,\n time_unix_nano=metric.point.time_unix_nano,\n )\n if isinstance(metric.point.value, int):\n pt.as_int = metric.point.value\n else:\n pt.as_double = metric.point.value\n # note that because sum is a message type, the fields must be\n # set individually rather than instantiating a pb2.Sum and setting\n # it once\n pbmetric.sum.aggregation_temporality = (\n metric.point.aggregation_temporality\n )\n pbmetric.sum.is_monotonic = metric.point.is_monotonic\n pbmetric.sum.data_points.append(pt)\n else:\n logger.warn(\"unsupported datapoint type %s\", metric.point)\n continue\n\n instrumentation_library_metrics.metrics.append(\n pbmetric,\n )\n return ExportMetricsServiceRequest(\n resource_metrics=get_resource_data(\n sdk_resource_instrumentation_library_metrics,\n pb2.ResourceMetrics,\n \"metrics\",\n )\n )\n\n def export(self, metrics: Sequence[Metric]) -> MetricExportResult:\n return self._export(metrics)\n\n def shutdown(self):\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py"}]} | 1,841 | 308 |
gh_patches_debug_2760 | rasdani/github-patches | git_diff | getredash__redash-464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error running query: datetime.time(13, 52, 27) is not JSON serializable
My table schema:
``` sql
CREATE TABLE F_entrances (
id SERIAL PRIMARY KEY,
timeOfEntrance time,
customerId int REFERENCES D_customers
);
```
(and yes, I committed the horrible sin of camel_case vs underScore. I'll be fixing that soonish)
The query
``` sql
SELECT
timeofentrance
FROM F_entrances
```
Gives me the error `Error running query: datetime.time(13, 52, 27) is not JSON serializable`. I worked around it with `to_char` but this seems to be a problem at the [Python layer](http://stackoverflow.com/a/11875813/1216976).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/utils.py`
Content:
```
1 import cStringIO
2 import csv
3 import codecs
4 import decimal
5 import datetime
6 import json
7 import re
8 import hashlib
9 import sqlparse
10 import pytz
11
12 COMMENTS_REGEX = re.compile("/\*.*?\*/")
13
14
15 class SQLMetaData(object):
16 TABLE_SELECTION_KEYWORDS = ('FROM', 'JOIN', 'LEFT JOIN', 'FULL JOIN', 'RIGHT JOIN', 'CROSS JOIN', 'INNER JOIN',
17 'OUTER JOIN', 'LEFT OUTER JOIN', 'RIGHT OUTER JOIN', 'FULL OUTER JOIN')
18
19 def __init__(self, sql):
20 self.sql = sql
21 self.parsed_sql = sqlparse.parse(self.sql)
22
23 self.has_ddl_statements = self._find_ddl_statements()
24 self.has_non_select_dml_statements = self._find_dml_statements()
25 self.used_tables = self._find_tables()
26
27 def _find_ddl_statements(self):
28 for statement in self.parsed_sql:
29 if len([x for x in statement.flatten() if x.ttype == sqlparse.tokens.DDL]):
30 return True
31
32 return False
33
34 def _find_tables(self):
35 tables = set()
36 for statement in self.parsed_sql:
37 tables.update(self.extract_table_names(statement.tokens))
38
39 return tables
40
41 def extract_table_names(self, tokens):
42 tables = set()
43 tokens = [t for t in tokens if t.ttype not in (sqlparse.tokens.Whitespace, sqlparse.tokens.Newline)]
44
45 for i in range(len(tokens)):
46 if tokens[i].is_group():
47 tables.update(self.extract_table_names(tokens[i].tokens))
48 else:
49 if tokens[i].ttype == sqlparse.tokens.Keyword and tokens[i].normalized in self.TABLE_SELECTION_KEYWORDS:
50 if isinstance(tokens[i + 1], sqlparse.sql.Identifier):
51 tables.add(tokens[i + 1].value)
52
53 if isinstance(tokens[i + 1], sqlparse.sql.IdentifierList):
54 tables.update(set([t.value for t in tokens[i+1].get_identifiers()]))
55 return tables
56
57 def _find_dml_statements(self):
58 for statement in self.parsed_sql:
59 for token in statement.flatten():
60 if token.ttype == sqlparse.tokens.DML and token.normalized != 'SELECT':
61 return True
62
63 return False
64
65
66 def utcnow():
67 """Return datetime.now value with timezone specified.
68
69 Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,
70 which leads to errors in calculations.
71 """
72 return datetime.datetime.now(pytz.utc)
73
74 def slugify(s):
75 return re.sub('[^a-z0-9_\-]+', '-', s.lower())
76
77
78 def gen_query_hash(sql):
79 """Returns hash of the given query after stripping all comments, line breaks and multiple
80 spaces, and lower casing all text.
81
82 TODO: possible issue - the following queries will get the same id:
83 1. SELECT 1 FROM table WHERE column='Value';
84 2. SELECT 1 FROM table where column='value';
85 """
86 sql = COMMENTS_REGEX.sub("", sql)
87 sql = "".join(sql.split()).lower()
88 return hashlib.md5(sql.encode('utf-8')).hexdigest()
89
90
91 class JSONEncoder(json.JSONEncoder):
92 """Custom JSON encoding class, to handle Decimal and datetime.date instances.
93 """
94 def default(self, o):
95 if isinstance(o, decimal.Decimal):
96 return float(o)
97
98 if isinstance(o, datetime.date):
99 return o.isoformat()
100
101 super(JSONEncoder, self).default(o)
102
103
104 def json_dumps(data):
105 return json.dumps(data, cls=JSONEncoder)
106
107
108 class UnicodeWriter:
109 """
110 A CSV writer which will write rows to CSV file "f",
111 which is encoded in the given encoding.
112 """
113 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
114 # Redirect output to a queue
115 self.queue = cStringIO.StringIO()
116 self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
117 self.stream = f
118 self.encoder = codecs.getincrementalencoder(encoding)()
119
120 def _encode_utf8(self, val):
121 if isinstance(val, (unicode, str)):
122 return val.encode('utf-8')
123
124 return val
125
126 def writerow(self, row):
127 self.writer.writerow([self._encode_utf8(s) for s in row])
128 # Fetch UTF-8 output from the queue ...
129 data = self.queue.getvalue()
130 data = data.decode("utf-8")
131 # ... and reencode it into the target encoding
132 data = self.encoder.encode(data)
133 # write to the target stream
134 self.stream.write(data)
135 # empty queue
136 self.queue.truncate(0)
137
138 def writerows(self, rows):
139 for row in rows:
140 self.writerow(row)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/utils.py b/redash/utils.py
--- a/redash/utils.py
+++ b/redash/utils.py
@@ -95,7 +95,7 @@
if isinstance(o, decimal.Decimal):
return float(o)
- if isinstance(o, datetime.date):
+ if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):
return o.isoformat()
super(JSONEncoder, self).default(o)
| {"golden_diff": "diff --git a/redash/utils.py b/redash/utils.py\n--- a/redash/utils.py\n+++ b/redash/utils.py\n@@ -95,7 +95,7 @@\n if isinstance(o, decimal.Decimal):\n return float(o)\n \n- if isinstance(o, datetime.date):\n+ if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n", "issue": "Error running query: datetime.time(13, 52, 27) is not JSON serializable\nMy table schema:\n\n``` sql\nCREATE TABLE F_entrances (\n id SERIAL PRIMARY KEY,\n timeOfEntrance time,\n customerId int REFERENCES D_customers\n);\n```\n\n(and yes, I committed the horrible sin of camel_case vs underScore. I'll be fixing that soonish)\n\nThe query\n\n``` sql\nSELECT\ntimeofentrance\nFROM F_entrances\n```\n\nGives me the error `Error running query: datetime.time(13, 52, 27) is not JSON serializable`. I worked around it with `to_char` but this seems to be a problem at the [Python layer](http://stackoverflow.com/a/11875813/1216976).\n\n", "before_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport re\nimport hashlib\nimport sqlparse\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\nclass SQLMetaData(object):\n TABLE_SELECTION_KEYWORDS = ('FROM', 'JOIN', 'LEFT JOIN', 'FULL JOIN', 'RIGHT JOIN', 'CROSS JOIN', 'INNER JOIN',\n 'OUTER JOIN', 'LEFT OUTER JOIN', 'RIGHT OUTER JOIN', 'FULL OUTER JOIN')\n\n def __init__(self, sql):\n self.sql = sql\n self.parsed_sql = sqlparse.parse(self.sql)\n\n self.has_ddl_statements = self._find_ddl_statements()\n self.has_non_select_dml_statements = self._find_dml_statements()\n self.used_tables = self._find_tables()\n\n def _find_ddl_statements(self):\n for statement in self.parsed_sql:\n if len([x for x in statement.flatten() if x.ttype == sqlparse.tokens.DDL]):\n return True\n\n return False\n\n def _find_tables(self):\n tables = set()\n for statement in self.parsed_sql:\n tables.update(self.extract_table_names(statement.tokens))\n\n return tables\n\n def extract_table_names(self, tokens):\n tables = set()\n tokens = [t for t in tokens if t.ttype not in (sqlparse.tokens.Whitespace, sqlparse.tokens.Newline)]\n\n for i in range(len(tokens)):\n if tokens[i].is_group():\n tables.update(self.extract_table_names(tokens[i].tokens))\n else:\n if tokens[i].ttype == sqlparse.tokens.Keyword and tokens[i].normalized in self.TABLE_SELECTION_KEYWORDS:\n if isinstance(tokens[i + 1], sqlparse.sql.Identifier):\n tables.add(tokens[i + 1].value)\n\n if isinstance(tokens[i + 1], sqlparse.sql.IdentifierList):\n tables.update(set([t.value for t in tokens[i+1].get_identifiers()]))\n return tables\n\n def _find_dml_statements(self):\n for statement in self.parsed_sql:\n for token in statement.flatten():\n if token.ttype == sqlparse.tokens.DML and token.normalized != 'SELECT':\n return True\n\n return False\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, datetime.date):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}], "after_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport re\nimport hashlib\nimport sqlparse\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\nclass SQLMetaData(object):\n TABLE_SELECTION_KEYWORDS = ('FROM', 'JOIN', 'LEFT JOIN', 'FULL JOIN', 'RIGHT JOIN', 'CROSS JOIN', 'INNER JOIN',\n 'OUTER JOIN', 'LEFT OUTER JOIN', 'RIGHT OUTER JOIN', 'FULL OUTER JOIN')\n\n def __init__(self, sql):\n self.sql = sql\n self.parsed_sql = sqlparse.parse(self.sql)\n\n self.has_ddl_statements = self._find_ddl_statements()\n self.has_non_select_dml_statements = self._find_dml_statements()\n self.used_tables = self._find_tables()\n\n def _find_ddl_statements(self):\n for statement in self.parsed_sql:\n if len([x for x in statement.flatten() if x.ttype == sqlparse.tokens.DDL]):\n return True\n\n return False\n\n def _find_tables(self):\n tables = set()\n for statement in self.parsed_sql:\n tables.update(self.extract_table_names(statement.tokens))\n\n return tables\n\n def extract_table_names(self, tokens):\n tables = set()\n tokens = [t for t in tokens if t.ttype not in (sqlparse.tokens.Whitespace, sqlparse.tokens.Newline)]\n\n for i in range(len(tokens)):\n if tokens[i].is_group():\n tables.update(self.extract_table_names(tokens[i].tokens))\n else:\n if tokens[i].ttype == sqlparse.tokens.Keyword and tokens[i].normalized in self.TABLE_SELECTION_KEYWORDS:\n if isinstance(tokens[i + 1], sqlparse.sql.Identifier):\n tables.add(tokens[i + 1].value)\n\n if isinstance(tokens[i + 1], sqlparse.sql.IdentifierList):\n tables.update(set([t.value for t in tokens[i+1].get_identifiers()]))\n return tables\n\n def _find_dml_statements(self):\n for statement in self.parsed_sql:\n for token in statement.flatten():\n if token.ttype == sqlparse.tokens.DML and token.normalized != 'SELECT':\n return True\n\n return False\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}]} | 1,799 | 96 |
gh_patches_debug_7666 | rasdani/github-patches | git_diff | tensorflow__addons-2204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Python3.5 Support
Per SIG Build, python3.5 will not be supported in TF2.4 since it has reached end of life. We should align.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons.
16
17 TensorFlow Addons is a repository of contributions that conform to well-
18 established API patterns, but implement new functionality not available
19 in core TensorFlow. TensorFlow natively supports a large number of
20 operators, layers, metrics, losses, and optimizers. However, in a fast
21 moving field like ML, there are many interesting new developments that
22 cannot be integrated into core TensorFlow (because their broad
23 applicability is not yet clear, or it is mostly used by a smaller subset
24 of the community).
25 """
26
27 import os
28 from pathlib import Path
29 import sys
30
31 from datetime import datetime
32 from setuptools import find_packages
33 from setuptools import setup
34 from setuptools.dist import Distribution
35 from setuptools import Extension
36
37 DOCLINES = __doc__.split("\n")
38
39
40 def get_last_commit_time() -> str:
41 string_time = os.getenv("NIGHTLY_TIME").replace('"', "")
42 return datetime.strptime(string_time, "%Y-%m-%dT%H:%M:%SZ").strftime("%Y%m%d%H%M%S")
43
44
45 def get_project_name_version():
46 # Version
47 version = {}
48 base_dir = os.path.dirname(os.path.abspath(__file__))
49 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
50 exec(fp.read(), version)
51
52 project_name = "tensorflow-addons"
53 if "--nightly" in sys.argv:
54 project_name = "tfa-nightly"
55 version["__version__"] += get_last_commit_time()
56 sys.argv.remove("--nightly")
57
58 return project_name, version
59
60
61 def get_ext_modules():
62 ext_modules = []
63 if "--platlib-patch" in sys.argv:
64 if sys.platform.startswith("linux"):
65 # Manylinux2010 requires a patch for platlib
66 ext_modules = [Extension("_foo", ["stub.cc"])]
67 sys.argv.remove("--platlib-patch")
68 return ext_modules
69
70
71 class BinaryDistribution(Distribution):
72 """This class is needed in order to create OS specific wheels."""
73
74 def has_ext_modules(self):
75 return True
76
77
78 project_name, version = get_project_name_version()
79 min_tf_version = version["MIN_TF_VERSION"]
80 max_tf_version = version["MAX_TF_VERSION"]
81 setup(
82 name=project_name,
83 version=version["__version__"],
84 description=DOCLINES[0],
85 long_description="\n".join(DOCLINES[2:]),
86 author="Google Inc.",
87 author_email="[email protected]",
88 packages=find_packages(),
89 ext_modules=get_ext_modules(),
90 install_requires=Path("requirements.txt").read_text().splitlines(),
91 extras_require={
92 "tensorflow": ["tensorflow>={},<{}".format(min_tf_version, max_tf_version)],
93 "tensorflow-gpu": [
94 "tensorflow-gpu>={},<{}".format(min_tf_version, max_tf_version)
95 ],
96 "tensorflow-cpu": [
97 "tensorflow-cpu>={},<{}".format(min_tf_version, max_tf_version)
98 ],
99 },
100 include_package_data=True,
101 zip_safe=False,
102 distclass=BinaryDistribution,
103 classifiers=[
104 "Development Status :: 4 - Beta",
105 "Intended Audience :: Developers",
106 "Intended Audience :: Education",
107 "Intended Audience :: Science/Research",
108 "License :: OSI Approved :: Apache Software License",
109 "Programming Language :: Python :: 3",
110 "Programming Language :: Python :: 3.5",
111 "Programming Language :: Python :: 3.6",
112 "Programming Language :: Python :: 3.7",
113 "Programming Language :: Python :: 3.8",
114 "Topic :: Scientific/Engineering :: Mathematics",
115 "Topic :: Software Development :: Libraries :: Python Modules",
116 "Topic :: Software Development :: Libraries",
117 ],
118 license="Apache 2.0",
119 keywords="tensorflow addons machine learning",
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -107,7 +107,6 @@
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,7 +107,6 @@\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n", "issue": "Drop Python3.5 Support\nPer SIG Build, python3.5 will not be supported in TF2.4 since it has reached end of life. We should align.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split(\"\\n\")\n\n\ndef get_last_commit_time() -> str:\n string_time = os.getenv(\"NIGHTLY_TIME\").replace('\"', \"\")\n return datetime.strptime(string_time, \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%Y%m%d%H%M%S\")\n\n\ndef get_project_name_version():\n # Version\n version = {}\n base_dir = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n exec(fp.read(), version)\n\n project_name = \"tensorflow-addons\"\n if \"--nightly\" in sys.argv:\n project_name = \"tfa-nightly\"\n version[\"__version__\"] += get_last_commit_time()\n sys.argv.remove(\"--nightly\")\n\n return project_name, version\n\n\ndef get_ext_modules():\n ext_modules = []\n if \"--platlib-patch\" in sys.argv:\n if sys.platform.startswith(\"linux\"):\n # Manylinux2010 requires a patch for platlib\n ext_modules = [Extension(\"_foo\", [\"stub.cc\"])]\n sys.argv.remove(\"--platlib-patch\")\n return ext_modules\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nproject_name, version = get_project_name_version()\nmin_tf_version = version[\"MIN_TF_VERSION\"]\nmax_tf_version = version[\"MAX_TF_VERSION\"]\nsetup(\n name=project_name,\n version=version[\"__version__\"],\n description=DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author=\"Google Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n ext_modules=get_ext_modules(),\n install_requires=Path(\"requirements.txt\").read_text().splitlines(),\n extras_require={\n \"tensorflow\": [\"tensorflow>={},<{}\".format(min_tf_version, max_tf_version)],\n \"tensorflow-gpu\": [\n \"tensorflow-gpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n \"tensorflow-cpu\": [\n \"tensorflow-cpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n },\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Libraries\",\n ],\n license=\"Apache 2.0\",\n keywords=\"tensorflow addons machine learning\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split(\"\\n\")\n\n\ndef get_last_commit_time() -> str:\n string_time = os.getenv(\"NIGHTLY_TIME\").replace('\"', \"\")\n return datetime.strptime(string_time, \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%Y%m%d%H%M%S\")\n\n\ndef get_project_name_version():\n # Version\n version = {}\n base_dir = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n exec(fp.read(), version)\n\n project_name = \"tensorflow-addons\"\n if \"--nightly\" in sys.argv:\n project_name = \"tfa-nightly\"\n version[\"__version__\"] += get_last_commit_time()\n sys.argv.remove(\"--nightly\")\n\n return project_name, version\n\n\ndef get_ext_modules():\n ext_modules = []\n if \"--platlib-patch\" in sys.argv:\n if sys.platform.startswith(\"linux\"):\n # Manylinux2010 requires a patch for platlib\n ext_modules = [Extension(\"_foo\", [\"stub.cc\"])]\n sys.argv.remove(\"--platlib-patch\")\n return ext_modules\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nproject_name, version = get_project_name_version()\nmin_tf_version = version[\"MIN_TF_VERSION\"]\nmax_tf_version = version[\"MAX_TF_VERSION\"]\nsetup(\n name=project_name,\n version=version[\"__version__\"],\n description=DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author=\"Google Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n ext_modules=get_ext_modules(),\n install_requires=Path(\"requirements.txt\").read_text().splitlines(),\n extras_require={\n \"tensorflow\": [\"tensorflow>={},<{}\".format(min_tf_version, max_tf_version)],\n \"tensorflow-gpu\": [\n \"tensorflow-gpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n \"tensorflow-cpu\": [\n \"tensorflow-cpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n },\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Libraries\",\n ],\n license=\"Apache 2.0\",\n keywords=\"tensorflow addons machine learning\",\n)\n", "path": "setup.py"}]} | 1,498 | 114 |
gh_patches_debug_3093 | rasdani/github-patches | git_diff | googleapis__python-bigquery-135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A new release of rsa dependency breaks Python 2.7 tests
Recent `rsa` releases are not compatible with Python 2.7 anymore, the last compatible version is 4.0. We need to bound its version in order to preserve Python 2.7 compatibility.
> Major changes in 4.1
Version 4.0 was the last version to support Python 2 and 3.4. Version 4.1 is compatible with Python 3.5+ only.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25 version = "1.25.0"
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 'enum34; python_version < "3.4"',
33 "google-auth >= 1.9.0, < 2.0dev",
34 "google-api-core >= 1.15.0, < 2.0dev",
35 "google-cloud-core >= 1.1.0, < 2.0dev",
36 "google-resumable-media >= 0.5.0, < 0.6dev",
37 "protobuf >= 3.6.0",
38 "six >=1.13.0,< 2.0.0dev",
39 ]
40 extras = {
41 "bqstorage": [
42 "google-cloud-bigquery-storage >= 1.0.0, <2.0.0dev",
43 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
44 # installed, even though `google-cloud-bigquery-storage` specifies it
45 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
46 # See: https://github.com/googleapis/python-bigquery/issues/83
47 "grpcio >= 1.8.2, < 2.0dev",
48 "pyarrow>=0.16.0, < 2.0dev",
49 ],
50 "pandas": ["pandas>=0.17.1"],
51 # Exclude PyArrow dependency from Windows Python 2.7.
52 'pyarrow: platform_system != "Windows" or python_version >= "3.4"': [
53 # Bad Linux release for 0.14.0.
54 # https://issues.apache.org/jira/browse/ARROW-5868
55 "pyarrow>=0.4.1, != 0.14.0"
56 ],
57 "tqdm": ["tqdm >= 4.0.0, <5.0.0dev"],
58 "fastparquet": [
59 "fastparquet",
60 "python-snappy",
61 # llvmlite >= 0.32.0 cannot be installed on Python 3.5 and below
62 # (building the wheel fails), thus needs to be restricted.
63 # See: https://github.com/googleapis/python-bigquery/issues/78
64 "llvmlite <= 0.31.0",
65 ],
66 }
67
68 all_extras = []
69
70 for extra in extras:
71 if extra == "fastparquet":
72 # Skip fastparquet from "all" because it is redundant with pyarrow and
73 # creates a dependency on pre-release versions of numpy. See:
74 # https://github.com/googleapis/google-cloud-python/issues/8549
75 continue
76 all_extras.extend(extras[extra])
77
78 extras["all"] = all_extras
79
80 # Setup boilerplate below this line.
81
82 package_root = os.path.abspath(os.path.dirname(__file__))
83
84 readme_filename = os.path.join(package_root, "README.rst")
85 with io.open(readme_filename, encoding="utf-8") as readme_file:
86 readme = readme_file.read()
87
88 # Only include packages under the 'google' namespace. Do not include tests,
89 # benchmarks, etc.
90 packages = [
91 package for package in setuptools.find_packages() if package.startswith("google")
92 ]
93
94 # Determine which namespaces are needed.
95 namespaces = ["google"]
96 if "google.cloud" in packages:
97 namespaces.append("google.cloud")
98
99
100 setuptools.setup(
101 name=name,
102 version=version,
103 description=description,
104 long_description=readme,
105 author="Google LLC",
106 author_email="[email protected]",
107 license="Apache 2.0",
108 url="https://github.com/googleapis/python-bigquery",
109 classifiers=[
110 release_status,
111 "Intended Audience :: Developers",
112 "License :: OSI Approved :: Apache Software License",
113 "Programming Language :: Python",
114 "Programming Language :: Python :: 2",
115 "Programming Language :: Python :: 2.7",
116 "Programming Language :: Python :: 3",
117 "Programming Language :: Python :: 3.5",
118 "Programming Language :: Python :: 3.6",
119 "Programming Language :: Python :: 3.7",
120 "Programming Language :: Python :: 3.8",
121 "Operating System :: OS Independent",
122 "Topic :: Internet",
123 ],
124 platforms="Posix; MacOS X; Windows",
125 packages=packages,
126 namespace_packages=namespaces,
127 install_requires=dependencies,
128 extras_require=extras,
129 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
130 include_package_data=True,
131 zip_safe=False,
132 )
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,6 +36,10 @@
"google-resumable-media >= 0.5.0, < 0.6dev",
"protobuf >= 3.6.0",
"six >=1.13.0,< 2.0.0dev",
+ # rsa >= 4.1 is not compatible with Python 2
+ # https://github.com/sybrenstuvel/python-rsa/issues/152
+ 'rsa <4.1; python_version < "3"',
+ 'rsa >=3.1.4, <5; python_version >= "3"',
]
extras = {
"bqstorage": [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,6 +36,10 @@\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n+ # rsa >= 4.1 is not compatible with Python 2\n+ # https://github.com/sybrenstuvel/python-rsa/issues/152\n+ 'rsa <4.1; python_version < \"3\"',\n+ 'rsa >=3.1.4, <5; python_version >= \"3\"',\n ]\n extras = {\n \"bqstorage\": [\n", "issue": "A new release of rsa dependency breaks Python 2.7 tests\nRecent `rsa` releases are not compatible with Python 2.7 anymore, the last compatible version is 4.0. We need to bound its version in order to preserve Python 2.7 compatibility.\r\n\r\n> Major changes in 4.1\r\nVersion 4.0 was the last version to support Python 2 and 3.4. Version 4.1 is compatible with Python 3.5+ only.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\nversion = \"1.25.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n 'enum34; python_version < \"3.4\"',\n \"google-auth >= 1.9.0, < 2.0dev\",\n \"google-api-core >= 1.15.0, < 2.0dev\",\n \"google-cloud-core >= 1.1.0, < 2.0dev\",\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 1.0.0, <2.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83\n \"grpcio >= 1.8.2, < 2.0dev\",\n \"pyarrow>=0.16.0, < 2.0dev\",\n ],\n \"pandas\": [\"pandas>=0.17.1\"],\n # Exclude PyArrow dependency from Windows Python 2.7.\n 'pyarrow: platform_system != \"Windows\" or python_version >= \"3.4\"': [\n # Bad Linux release for 0.14.0.\n # https://issues.apache.org/jira/browse/ARROW-5868\n \"pyarrow>=0.4.1, != 0.14.0\"\n ],\n \"tqdm\": [\"tqdm >= 4.0.0, <5.0.0dev\"],\n \"fastparquet\": [\n \"fastparquet\",\n \"python-snappy\",\n # llvmlite >= 0.32.0 cannot be installed on Python 3.5 and below\n # (building the wheel fails), thus needs to be restricted.\n # See: https://github.com/googleapis/python-bigquery/issues/78\n \"llvmlite <= 0.31.0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n if extra == \"fastparquet\":\n # Skip fastparquet from \"all\" because it is redundant with pyarrow and\n # creates a dependency on pre-release versions of numpy. See:\n # https://github.com/googleapis/google-cloud-python/issues/8549\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\nversion = \"1.25.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n 'enum34; python_version < \"3.4\"',\n \"google-auth >= 1.9.0, < 2.0dev\",\n \"google-api-core >= 1.15.0, < 2.0dev\",\n \"google-cloud-core >= 1.1.0, < 2.0dev\",\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n # rsa >= 4.1 is not compatible with Python 2\n # https://github.com/sybrenstuvel/python-rsa/issues/152\n 'rsa <4.1; python_version < \"3\"',\n 'rsa >=3.1.4, <5; python_version >= \"3\"',\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 1.0.0, <2.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83\n \"grpcio >= 1.8.2, < 2.0dev\",\n \"pyarrow>=0.16.0, < 2.0dev\",\n ],\n \"pandas\": [\"pandas>=0.17.1\"],\n # Exclude PyArrow dependency from Windows Python 2.7.\n 'pyarrow: platform_system != \"Windows\" or python_version >= \"3.4\"': [\n # Bad Linux release for 0.14.0.\n # https://issues.apache.org/jira/browse/ARROW-5868\n \"pyarrow>=0.4.1, != 0.14.0\"\n ],\n \"tqdm\": [\"tqdm >= 4.0.0, <5.0.0dev\"],\n \"fastparquet\": [\n \"fastparquet\",\n \"python-snappy\",\n # llvmlite >= 0.32.0 cannot be installed on Python 3.5 and below\n # (building the wheel fails), thus needs to be restricted.\n # See: https://github.com/googleapis/python-bigquery/issues/78\n \"llvmlite <= 0.31.0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n if extra == \"fastparquet\":\n # Skip fastparquet from \"all\" because it is redundant with pyarrow and\n # creates a dependency on pre-release versions of numpy. See:\n # https://github.com/googleapis/google-cloud-python/issues/8549\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,877 | 169 |
gh_patches_debug_54533 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-7566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CT-2552] Pin protobuf to greater than 4.0
Some users were seeing an error:
> ImportError: cannot import name 'builder' from 'google.protobuf.internal'.
The generated types_pb2.py file was built with protobuf 4 and requires protobuf 4 in order to import "builder".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 7, 2):
6 print("Error: dbt does not support this version of Python.")
7 print("Please upgrade to Python 3.7.2 or higher.")
8 sys.exit(1)
9
10
11 from setuptools import setup
12
13 try:
14 from setuptools import find_namespace_packages
15 except ImportError:
16 # the user has a downlevel version of setuptools.
17 print("Error: dbt requires setuptools v40.1.0 or higher.")
18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again")
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, "README.md")) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.6.0a1"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type="text/markdown",
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=["dbt", "dbt.*"]),
43 include_package_data=True,
44 test_suite="test",
45 entry_points={
46 "console_scripts": ["dbt = dbt.cli.main:cli"],
47 },
48 install_requires=[
49 "Jinja2==3.1.2",
50 "agate>=1.6,<1.7.1",
51 "click>=7.0,<9",
52 "colorama>=0.3.9,<0.4.7",
53 "hologram>=0.0.14,<=0.0.16",
54 "isodate>=0.6,<0.7",
55 "logbook>=1.5,<1.6",
56 "mashumaro[msgpack]==3.6",
57 "minimal-snowplow-tracker==0.0.2",
58 "networkx>=2.3,<2.8.1;python_version<'3.8'",
59 "networkx>=2.3,<3;python_version>='3.8'",
60 "packaging>20.9",
61 "sqlparse>=0.2.3,<0.4.4",
62 "dbt-extractor~=0.4.1",
63 "typing-extensions>=3.7.4",
64 "werkzeug>=1,<3",
65 "pathspec>=0.9,<0.12",
66 "protobuf>=3.18.3",
67 "pytz>=2015.7",
68 # the following are all to match snowflake-connector-python
69 "requests<3.0.0",
70 "idna>=2.5,<4",
71 "cffi>=1.9,<2.0.0",
72 "pyyaml>=6.0",
73 ],
74 zip_safe=False,
75 classifiers=[
76 "Development Status :: 5 - Production/Stable",
77 "License :: OSI Approved :: Apache Software License",
78 "Operating System :: Microsoft :: Windows",
79 "Operating System :: MacOS :: MacOS X",
80 "Operating System :: POSIX :: Linux",
81 "Programming Language :: Python :: 3.7",
82 "Programming Language :: Python :: 3.8",
83 "Programming Language :: Python :: 3.9",
84 "Programming Language :: Python :: 3.10",
85 "Programming Language :: Python :: 3.11",
86 ],
87 python_requires=">=3.7.2",
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -63,7 +63,7 @@
"typing-extensions>=3.7.4",
"werkzeug>=1,<3",
"pathspec>=0.9,<0.12",
- "protobuf>=3.18.3",
+ "protobuf>=4.0.0",
"pytz>=2015.7",
# the following are all to match snowflake-connector-python
"requests<3.0.0",
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -63,7 +63,7 @@\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n- \"protobuf>=3.18.3\",\n+ \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n", "issue": "[CT-2552] Pin protobuf to greater than 4.0\nSome users were seeing an error:\r\n\r\n> ImportError: cannot import name 'builder' from 'google.protobuf.internal'.\r\nThe generated types_pb2.py file was built with protobuf 4 and requires protobuf 4 in order to import \"builder\".\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.6.0a1\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.7.1\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.7\",\n \"hologram>=0.0.14,<=0.0.16\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.6\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>20.9\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n \"protobuf>=3.18.3\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.6.0a1\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.7.1\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.7\",\n \"hologram>=0.0.14,<=0.0.16\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.6\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>20.9\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]} | 1,302 | 130 |
gh_patches_debug_4049 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Decimal scalars aren't serialized as strings when passed as numbers
When using the `Decimal` scalar, if the value passed in is a floating point number, the value isn't serialized as a string, but as if it were still the floating point number which causes precision issues as can be seen in the working example below.
[Working example](https://la4de.github.io/strawberry-playground/#UEsDBAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAcmVxdWlyZW1lbnRzLnR4dHN0cmF3YmVycnktZ3JhcGhxbFBLAwQKAAAAAABWgIxUj3TlatIAAADSAAAACQAAAHNjaGVtYS5weWltcG9ydCBzdHJhd2JlcnJ5CmZyb20gZGVjaW1hbCBpbXBvcnQgRGVjaW1hbAoKQHN0cmF3YmVycnkudHlwZQpjbGFzcyBRdWVyeToKICAgIEBzdHJhd2JlcnJ5LmZpZWxkCiAgICBkZWYgZGVjaW1hbChzZWxmLCB2YWx1ZTogRGVjaW1hbCkgLT4gRGVjaW1hbDoKICAgICAgICByZXR1cm4gdmFsdWUKCnNjaGVtYSA9IHN0cmF3YmVycnkuU2NoZW1hKHF1ZXJ5PVF1ZXJ5KVBLAwQKAAAAAABWgIxUERrh0UMAAABDAAAACQAAAHF1ZXJ5LmdxbHsKICBkZWNpbWFsKHZhbHVlOiAzLjE0KQogIGFub3RoZXJEZWNpbWFsOiBkZWNpbWFsKHZhbHVlOiAiMy4xNCIpCn1QSwMECgAAAAAAVoCMVKi7vnMDAAAAAwAAAA4AAAB2YXJpYWJsZXMuanNvbnsKfVBLAQIUAAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAAAAAAAAAAAAAAAAAAAByZXF1aXJlbWVudHMudHh0UEsBAhQACgAAAAAAVoCMVI905WrSAAAA0gAAAAkAAAAAAAAAAAAAAAAAQAAAAHNjaGVtYS5weVBLAQIUAAoAAAAAAFaAjFQRGuHRQwAAAEMAAAAJAAAAAAAAAAAAAAAAADkBAABxdWVyeS5ncWxQSwECFAAKAAAAAABWgIxUqLu+cwMAAAADAAAADgAAAAAAAAAAAAAAAACjAQAAdmFyaWFibGVzLmpzb25QSwUGAAAAAAQABADoAAAA0gEAAAAA)
Actual output - `"3.140000000000000124344978758017532527446746826171875"`
Expected output - `"3.14"`
As text:
```
@strawberry.type
class DecimalResponse:
value: Decimal
@strawberry.field
def decimals(
dec_value: Decimal,
) -> DecimalResponse:
return DecimalResponse(value=dec_value)
```
```
mutation decimals($dec_value: Decimal!) {
decimals(
decValue: $dec_value
) {
... on DecimalResponse {
value
}
}
}
{
"dec_value": 3.14
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/schema/types/base_scalars.py`
Content:
```
1 import datetime
2 import decimal
3 import uuid
4 from operator import methodcaller
5 from typing import Callable
6
7 import dateutil.parser
8
9 from graphql import GraphQLError
10
11 from strawberry.custom_scalar import scalar
12
13
14 def wrap_parser(parser: Callable, type_: str) -> Callable:
15 def inner(value: str):
16 try:
17 return parser(value)
18 except ValueError as e:
19 raise GraphQLError(f'Value cannot represent a {type_}: "{value}". {e}')
20
21 return inner
22
23
24 def parse_decimal(value: str) -> decimal.Decimal:
25 try:
26 return decimal.Decimal(value)
27 except decimal.DecimalException:
28 raise GraphQLError(f'Value cannot represent a Decimal: "{value}".')
29
30
31 isoformat = methodcaller("isoformat")
32
33
34 Date = scalar(
35 datetime.date,
36 name="Date",
37 description="Date (isoformat)",
38 serialize=isoformat,
39 parse_value=wrap_parser(datetime.date.fromisoformat, "Date"),
40 )
41 DateTime = scalar(
42 datetime.datetime,
43 name="DateTime",
44 description="Date with time (isoformat)",
45 serialize=isoformat,
46 parse_value=wrap_parser(dateutil.parser.isoparse, "DateTime"),
47 )
48 Time = scalar(
49 datetime.time,
50 name="Time",
51 description="Time (isoformat)",
52 serialize=isoformat,
53 parse_value=wrap_parser(datetime.time.fromisoformat, "Time"),
54 )
55
56 Decimal = scalar(
57 decimal.Decimal,
58 name="Decimal",
59 description="Decimal (fixed-point)",
60 serialize=str,
61 parse_value=parse_decimal,
62 )
63
64 UUID = scalar(
65 uuid.UUID,
66 name="UUID",
67 serialize=str,
68 parse_value=wrap_parser(uuid.UUID, "UUID"),
69 )
70
71
72 def _verify_void(x) -> None:
73 if x is not None:
74 raise ValueError(f"Expected 'None', got '{x}'")
75
76
77 Void = scalar(
78 type(None),
79 name="Void",
80 serialize=_verify_void,
81 parse_value=_verify_void,
82 description="Represents NULL values",
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/schema/types/base_scalars.py b/strawberry/schema/types/base_scalars.py
--- a/strawberry/schema/types/base_scalars.py
+++ b/strawberry/schema/types/base_scalars.py
@@ -21,9 +21,9 @@
return inner
-def parse_decimal(value: str) -> decimal.Decimal:
+def parse_decimal(value: object) -> decimal.Decimal:
try:
- return decimal.Decimal(value)
+ return decimal.Decimal(str(value))
except decimal.DecimalException:
raise GraphQLError(f'Value cannot represent a Decimal: "{value}".')
| {"golden_diff": "diff --git a/strawberry/schema/types/base_scalars.py b/strawberry/schema/types/base_scalars.py\n--- a/strawberry/schema/types/base_scalars.py\n+++ b/strawberry/schema/types/base_scalars.py\n@@ -21,9 +21,9 @@\n return inner\n \n \n-def parse_decimal(value: str) -> decimal.Decimal:\n+def parse_decimal(value: object) -> decimal.Decimal:\n try:\n- return decimal.Decimal(value)\n+ return decimal.Decimal(str(value))\n except decimal.DecimalException:\n raise GraphQLError(f'Value cannot represent a Decimal: \"{value}\".')\n", "issue": "Decimal scalars aren't serialized as strings when passed as numbers\nWhen using the `Decimal` scalar, if the value passed in is a floating point number, the value isn't serialized as a string, but as if it were still the floating point number which causes precision issues as can be seen in the working example below.\r\n\r\n\r\n[Working example](https://la4de.github.io/strawberry-playground/#UEsDBAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAcmVxdWlyZW1lbnRzLnR4dHN0cmF3YmVycnktZ3JhcGhxbFBLAwQKAAAAAABWgIxUj3TlatIAAADSAAAACQAAAHNjaGVtYS5weWltcG9ydCBzdHJhd2JlcnJ5CmZyb20gZGVjaW1hbCBpbXBvcnQgRGVjaW1hbAoKQHN0cmF3YmVycnkudHlwZQpjbGFzcyBRdWVyeToKICAgIEBzdHJhd2JlcnJ5LmZpZWxkCiAgICBkZWYgZGVjaW1hbChzZWxmLCB2YWx1ZTogRGVjaW1hbCkgLT4gRGVjaW1hbDoKICAgICAgICByZXR1cm4gdmFsdWUKCnNjaGVtYSA9IHN0cmF3YmVycnkuU2NoZW1hKHF1ZXJ5PVF1ZXJ5KVBLAwQKAAAAAABWgIxUERrh0UMAAABDAAAACQAAAHF1ZXJ5LmdxbHsKICBkZWNpbWFsKHZhbHVlOiAzLjE0KQogIGFub3RoZXJEZWNpbWFsOiBkZWNpbWFsKHZhbHVlOiAiMy4xNCIpCn1QSwMECgAAAAAAVoCMVKi7vnMDAAAAAwAAAA4AAAB2YXJpYWJsZXMuanNvbnsKfVBLAQIUAAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAAAAAAAAAAAAAAAAAAAByZXF1aXJlbWVudHMudHh0UEsBAhQACgAAAAAAVoCMVI905WrSAAAA0gAAAAkAAAAAAAAAAAAAAAAAQAAAAHNjaGVtYS5weVBLAQIUAAoAAAAAAFaAjFQRGuHRQwAAAEMAAAAJAAAAAAAAAAAAAAAAADkBAABxdWVyeS5ncWxQSwECFAAKAAAAAABWgIxUqLu+cwMAAAADAAAADgAAAAAAAAAAAAAAAACjAQAAdmFyaWFibGVzLmpzb25QSwUGAAAAAAQABADoAAAA0gEAAAAA)\r\n\r\nActual output - `\"3.140000000000000124344978758017532527446746826171875\"`\r\nExpected output - `\"3.14\"`\r\n\r\nAs text:\r\n\r\n```\r\[email protected]\r\nclass DecimalResponse:\r\n value: Decimal\r\n\r\n\r\[email protected]\r\ndef decimals(\r\n dec_value: Decimal,\r\n) -> DecimalResponse:\r\n return DecimalResponse(value=dec_value)\r\n ```\r\n```\r\nmutation decimals($dec_value: Decimal!) {\r\n decimals(\r\n decValue: $dec_value\r\n ) {\r\n ... on DecimalResponse {\r\n value\r\n }\r\n }\r\n}\r\n{\r\n \"dec_value\": 3.14\r\n}\r\n```\r\n\n", "before_files": [{"content": "import datetime\nimport decimal\nimport uuid\nfrom operator import methodcaller\nfrom typing import Callable\n\nimport dateutil.parser\n\nfrom graphql import GraphQLError\n\nfrom strawberry.custom_scalar import scalar\n\n\ndef wrap_parser(parser: Callable, type_: str) -> Callable:\n def inner(value: str):\n try:\n return parser(value)\n except ValueError as e:\n raise GraphQLError(f'Value cannot represent a {type_}: \"{value}\". {e}')\n\n return inner\n\n\ndef parse_decimal(value: str) -> decimal.Decimal:\n try:\n return decimal.Decimal(value)\n except decimal.DecimalException:\n raise GraphQLError(f'Value cannot represent a Decimal: \"{value}\".')\n\n\nisoformat = methodcaller(\"isoformat\")\n\n\nDate = scalar(\n datetime.date,\n name=\"Date\",\n description=\"Date (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.date.fromisoformat, \"Date\"),\n)\nDateTime = scalar(\n datetime.datetime,\n name=\"DateTime\",\n description=\"Date with time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(dateutil.parser.isoparse, \"DateTime\"),\n)\nTime = scalar(\n datetime.time,\n name=\"Time\",\n description=\"Time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.time.fromisoformat, \"Time\"),\n)\n\nDecimal = scalar(\n decimal.Decimal,\n name=\"Decimal\",\n description=\"Decimal (fixed-point)\",\n serialize=str,\n parse_value=parse_decimal,\n)\n\nUUID = scalar(\n uuid.UUID,\n name=\"UUID\",\n serialize=str,\n parse_value=wrap_parser(uuid.UUID, \"UUID\"),\n)\n\n\ndef _verify_void(x) -> None:\n if x is not None:\n raise ValueError(f\"Expected 'None', got '{x}'\")\n\n\nVoid = scalar(\n type(None),\n name=\"Void\",\n serialize=_verify_void,\n parse_value=_verify_void,\n description=\"Represents NULL values\",\n)\n", "path": "strawberry/schema/types/base_scalars.py"}], "after_files": [{"content": "import datetime\nimport decimal\nimport uuid\nfrom operator import methodcaller\nfrom typing import Callable\n\nimport dateutil.parser\n\nfrom graphql import GraphQLError\n\nfrom strawberry.custom_scalar import scalar\n\n\ndef wrap_parser(parser: Callable, type_: str) -> Callable:\n def inner(value: str):\n try:\n return parser(value)\n except ValueError as e:\n raise GraphQLError(f'Value cannot represent a {type_}: \"{value}\". {e}')\n\n return inner\n\n\ndef parse_decimal(value: object) -> decimal.Decimal:\n try:\n return decimal.Decimal(str(value))\n except decimal.DecimalException:\n raise GraphQLError(f'Value cannot represent a Decimal: \"{value}\".')\n\n\nisoformat = methodcaller(\"isoformat\")\n\n\nDate = scalar(\n datetime.date,\n name=\"Date\",\n description=\"Date (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.date.fromisoformat, \"Date\"),\n)\nDateTime = scalar(\n datetime.datetime,\n name=\"DateTime\",\n description=\"Date with time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(dateutil.parser.isoparse, \"DateTime\"),\n)\nTime = scalar(\n datetime.time,\n name=\"Time\",\n description=\"Time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.time.fromisoformat, \"Time\"),\n)\n\nDecimal = scalar(\n decimal.Decimal,\n name=\"Decimal\",\n description=\"Decimal (fixed-point)\",\n serialize=str,\n parse_value=parse_decimal,\n)\n\nUUID = scalar(\n uuid.UUID,\n name=\"UUID\",\n serialize=str,\n parse_value=wrap_parser(uuid.UUID, \"UUID\"),\n)\n\n\ndef _verify_void(x) -> None:\n if x is not None:\n raise ValueError(f\"Expected 'None', got '{x}'\")\n\n\nVoid = scalar(\n type(None),\n name=\"Void\",\n serialize=_verify_void,\n parse_value=_verify_void,\n description=\"Represents NULL values\",\n)\n", "path": "strawberry/schema/types/base_scalars.py"}]} | 1,686 | 133 |
gh_patches_debug_6817 | rasdani/github-patches | git_diff | SeldonIO__MLServer-866 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more debugging information
I am running a custom Python model in MLServer, and it fails with an error. The error itself is not interesting, what's important is that MLServer gives very little helpful information to debug it.
After sending a request with grpcurl here is the entire response:
````
ERROR:
Code: Unknown
Message: Unexpected <class 'TypeError'>: list indices must be integers or slices, not str
````
No stack trace, no line numbers or class names. The only thing that appears in the logs is
> scv2-mlserver-1 | 2022-10-19 09:10:44,218 [mlserver.grpc] INFO - /inference.GRPCInferenceService/ModelInfer
Would be great if MLServer propagated more debugging info to the response message and to the log.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/grpc/utils.py`
Content:
```
1 import grpc
2
3 from typing import Callable, Dict, Tuple
4 from fastapi import status
5
6 from grpc import ServicerContext
7
8 from .logging import logger
9 from ..errors import MLServerError
10
11
12 STATUS_CODE_MAPPING = {
13 status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,
14 status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,
15 status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,
16 status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,
17 }
18
19
20 def to_headers(context: ServicerContext) -> Dict[str, str]:
21 metadata = context.invocation_metadata()
22 if hasattr(context, "trailing_metadata"):
23 # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose
24 # access to the trailing metadata on the service side
25 metadata += context.trailing_metadata()
26 headers = {}
27 for metadatum in metadata:
28 headers[metadatum.key] = metadatum.value
29
30 return headers
31
32
33 def to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:
34 return tuple((key.lower(), value) for key, value in headers.items())
35
36
37 def _grpc_status_code(err: MLServerError):
38 return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)
39
40
41 def handle_mlserver_error(f: Callable):
42 async def _inner(self, request, context):
43 try:
44 return await f(self, request, context)
45 except MLServerError as err:
46 logger.error(err)
47 await context.abort(code=_grpc_status_code(err), details=str(err))
48
49 return _inner
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py
--- a/mlserver/grpc/utils.py
+++ b/mlserver/grpc/utils.py
@@ -43,7 +43,10 @@
try:
return await f(self, request, context)
except MLServerError as err:
- logger.error(err)
+ logger.exception(err)
await context.abort(code=_grpc_status_code(err), details=str(err))
+ except Exception as err:
+ logger.exception(err)
+ await context.abort(code=grpc.StatusCode.INTERNAL, details=str(err))
return _inner
| {"golden_diff": "diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py\n--- a/mlserver/grpc/utils.py\n+++ b/mlserver/grpc/utils.py\n@@ -43,7 +43,10 @@\n try:\n return await f(self, request, context)\n except MLServerError as err:\n- logger.error(err)\n+ logger.exception(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n+ except Exception as err:\n+ logger.exception(err)\n+ await context.abort(code=grpc.StatusCode.INTERNAL, details=str(err))\n \n return _inner\n", "issue": "Add more debugging information\nI am running a custom Python model in MLServer, and it fails with an error. The error itself is not interesting, what's important is that MLServer gives very little helpful information to debug it.\r\n\r\nAfter sending a request with grpcurl here is the entire response:\r\n````\r\nERROR:\r\n Code: Unknown\r\n Message: Unexpected <class 'TypeError'>: list indices must be integers or slices, not str\r\n````\r\nNo stack trace, no line numbers or class names. The only thing that appears in the logs is\r\n> scv2-mlserver-1 | 2022-10-19 09:10:44,218 [mlserver.grpc] INFO - /inference.GRPCInferenceService/ModelInfer\r\n\r\nWould be great if MLServer propagated more debugging info to the response message and to the log.\n", "before_files": [{"content": "import grpc\n\nfrom typing import Callable, Dict, Tuple\nfrom fastapi import status\n\nfrom grpc import ServicerContext\n\nfrom .logging import logger\nfrom ..errors import MLServerError\n\n\nSTATUS_CODE_MAPPING = {\n status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,\n status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,\n status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,\n status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,\n}\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose\n # access to the trailing metadata on the service side\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:\n return tuple((key.lower(), value) for key, value in headers.items())\n\n\ndef _grpc_status_code(err: MLServerError):\n return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)\n\n\ndef handle_mlserver_error(f: Callable):\n async def _inner(self, request, context):\n try:\n return await f(self, request, context)\n except MLServerError as err:\n logger.error(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n\n return _inner\n", "path": "mlserver/grpc/utils.py"}], "after_files": [{"content": "import grpc\n\nfrom typing import Callable, Dict, Tuple\nfrom fastapi import status\n\nfrom grpc import ServicerContext\n\nfrom .logging import logger\nfrom ..errors import MLServerError\n\n\nSTATUS_CODE_MAPPING = {\n status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,\n status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,\n status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,\n status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,\n}\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose\n # access to the trailing metadata on the service side\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:\n return tuple((key.lower(), value) for key, value in headers.items())\n\n\ndef _grpc_status_code(err: MLServerError):\n return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)\n\n\ndef handle_mlserver_error(f: Callable):\n async def _inner(self, request, context):\n try:\n return await f(self, request, context)\n except MLServerError as err:\n logger.exception(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n except Exception as err:\n logger.exception(err)\n await context.abort(code=grpc.StatusCode.INTERNAL, details=str(err))\n\n return _inner\n", "path": "mlserver/grpc/utils.py"}]} | 900 | 126 |
gh_patches_debug_39788 | rasdani/github-patches | git_diff | cupy__cupy-2145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use `ReductionKernel` in k-means example
The k-mean example uses `ElementwiseKernel` as an example of a custom kernel, but its algorithm is essentially reduction rather than mapping. It would be better to use `ReductionKernel` in the k-means example and to provide another example that uses `ElementwiseKernel`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/kmeans/kmeans.py`
Content:
```
1 import argparse
2 import contextlib
3 import time
4
5 import matplotlib.pyplot as plt
6 import numpy as np
7 import six
8
9 import cupy
10
11
12 @contextlib.contextmanager
13 def timer(message):
14 cupy.cuda.Stream.null.synchronize()
15 start = time.time()
16 yield
17 cupy.cuda.Stream.null.synchronize()
18 end = time.time()
19 print('%s: %f sec' % (message, end - start))
20
21
22 _fit_calc_distances = cupy.ElementwiseKernel(
23 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist',
24 '''
25 for (int j = 0; j < n_clusters; j++){
26 int cent_ind[] = {j, i % dim};
27 int dist_ind[] = {i / dim, j};
28 double diff = centers[cent_ind] - data;
29 atomicAdd(&dist[dist_ind], diff * diff);
30 }
31 ''',
32 'calc_distances'
33 )
34 _fit_calc_center = cupy.ElementwiseKernel(
35 'S data, T label, int32 dim', 'raw S centers, raw S group',
36 '''
37 int cent_ind[] = {label, i % dim};
38 atomicAdd(¢ers[cent_ind], data);
39 atomicAdd(&group[label], 1);
40 ''',
41 'calc_center'
42 )
43
44
45 def fit(X, n_clusters, max_iter, use_custom_kernel):
46 assert X.ndim == 2
47 xp = cupy.get_array_module(X)
48 pred = xp.zeros(len(X), dtype=np.int32)
49 initial_indexes = np.random.choice(len(X), n_clusters,
50 replace=False).astype(np.int32)
51 centers = X[initial_indexes]
52 data_num = X.shape[0]
53 data_dim = X.shape[1]
54
55 for _ in six.moves.range(max_iter):
56 # calculate distances and label
57 if not use_custom_kernel or xp == np:
58 distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :],
59 axis=2)
60 else:
61 distances = xp.zeros((data_num, n_clusters), dtype=np.float32)
62 _fit_calc_distances(X, centers, n_clusters, data_dim, distances)
63
64 new_pred = xp.argmin(distances, axis=1).astype(np.int32)
65 if xp.all(new_pred == pred):
66 break
67 pred = new_pred
68
69 # calculate centers
70 if not use_custom_kernel or xp == np:
71 centers = xp.stack([X[pred == i].mean(axis=0)
72 for i in six.moves.range(n_clusters)])
73 else:
74 centers = xp.zeros((n_clusters, data_dim),
75 dtype=np.float32)
76 group = xp.zeros(n_clusters, dtype=np.float32)
77 label = pred[:, None]
78 _fit_calc_center(X, label, data_dim, centers, group)
79 group /= data_dim
80 centers /= group[:, None]
81
82 return centers, pred
83
84
85 def draw(X, n_clusters, centers, pred, output):
86 xp = cupy.get_array_module(X)
87 for i in six.moves.range(n_clusters):
88 labels = X[pred == i]
89 if xp == cupy:
90 labels = labels.get()
91 plt.scatter(labels[:, 0], labels[:, 1], c=np.random.rand(3))
92 if xp == cupy:
93 centers = centers.get()
94 plt.scatter(centers[:, 0], centers[:, 1], s=120, marker='s',
95 facecolors='y', edgecolors='k')
96 plt.savefig(output)
97
98
99 def run(gpuid, n_clusters, num, max_iter, use_custom_kernel, output):
100 samples = np.random.randn(num, 2).astype(np.float32)
101 X_train = np.r_[samples + 1, samples - 1]
102 repeat = 1
103
104 with timer(' CPU '):
105 for i in range(repeat):
106 centers, pred = fit(X_train, n_clusters, max_iter,
107 use_custom_kernel)
108
109 with cupy.cuda.Device(gpuid):
110 X_train = cupy.asarray(X_train)
111 with timer(' GPU '):
112 for i in range(repeat):
113 centers, pred = fit(X_train, n_clusters, max_iter,
114 use_custom_kernel)
115 if output is not None:
116 index = np.random.choice(10000000, 300, replace=False)
117 draw(X_train[index], n_clusters, centers, pred[index], output)
118
119
120 if __name__ == '__main__':
121 parser = argparse.ArgumentParser()
122 parser.add_argument('--gpu-id', '-g', default=0, type=int,
123 help='ID of GPU.')
124 parser.add_argument('--n-clusters', '-n', default=2, type=int,
125 help='number of clusters')
126 parser.add_argument('--num', default=5000000, type=int,
127 help='number of samples')
128 parser.add_argument('--max-iter', '-m', default=10, type=int,
129 help='number of iterations')
130 parser.add_argument('--use-custom-kernel', action='store_true',
131 default=False, help='use Elementwise kernel')
132 parser.add_argument('--output-image', '-o', default=None, type=str,
133 help='output image file name')
134 args = parser.parse_args()
135 run(args.gpu_id, args.n_clusters, args.num, args.max_iter,
136 args.use_custom_kernel, args.output_image)
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/kmeans/kmeans.py b/examples/kmeans/kmeans.py
--- a/examples/kmeans/kmeans.py
+++ b/examples/kmeans/kmeans.py
@@ -19,26 +19,22 @@
print('%s: %f sec' % (message, end - start))
-_fit_calc_distances = cupy.ElementwiseKernel(
- 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist',
- '''
- for (int j = 0; j < n_clusters; j++){
- int cent_ind[] = {j, i % dim};
- int dist_ind[] = {i / dim, j};
- double diff = centers[cent_ind] - data;
- atomicAdd(&dist[dist_ind], diff * diff);
- }
- ''',
- 'calc_distances'
+var_kernel = cupy.ElementwiseKernel(
+ 'T x0, T x1, T c0, T c1', 'T out',
+ 'out = (x0 - c0) * (x0 - c0) + (x1 - c1) * (x1 - c1)',
+ 'var_kernel'
)
-_fit_calc_center = cupy.ElementwiseKernel(
- 'S data, T label, int32 dim', 'raw S centers, raw S group',
- '''
- int cent_ind[] = {label, i % dim};
- atomicAdd(¢ers[cent_ind], data);
- atomicAdd(&group[label], 1);
- ''',
- 'calc_center'
+sum_kernel = cupy.ReductionKernel(
+ 'T x, S mask', 'T out',
+ 'mask ? x : 0',
+ 'a + b', 'out = a', '0',
+ 'sum_kernel'
+)
+count_kernel = cupy.ReductionKernel(
+ 'T mask', 'float32 out',
+ 'mask ? 1.0 : 0.0',
+ 'a + b', 'out = a', '0.0',
+ 'count_kernel'
)
@@ -49,8 +45,6 @@
initial_indexes = np.random.choice(len(X), n_clusters,
replace=False).astype(np.int32)
centers = X[initial_indexes]
- data_num = X.shape[0]
- data_dim = X.shape[1]
for _ in six.moves.range(max_iter):
# calculate distances and label
@@ -58,8 +52,8 @@
distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :],
axis=2)
else:
- distances = xp.zeros((data_num, n_clusters), dtype=np.float32)
- _fit_calc_distances(X, centers, n_clusters, data_dim, distances)
+ distances = var_kernel(X[:, None, 0], X[:, None, 1],
+ centers[None, :, 1], centers[None, :, 0])
new_pred = xp.argmin(distances, axis=1).astype(np.int32)
if xp.all(new_pred == pred):
@@ -67,17 +61,16 @@
pred = new_pred
# calculate centers
+ i = xp.arange(n_clusters)
+ mask = pred == i[:, None]
if not use_custom_kernel or xp == np:
- centers = xp.stack([X[pred == i].mean(axis=0)
- for i in six.moves.range(n_clusters)])
+ sums = xp.where(mask[:, :, None], X, 0).sum(axis=1)
+ counts = xp.count_nonzero(mask, axis=1)
+ centers = sums / counts
else:
- centers = xp.zeros((n_clusters, data_dim),
- dtype=np.float32)
- group = xp.zeros(n_clusters, dtype=np.float32)
- label = pred[:, None]
- _fit_calc_center(X, label, data_dim, centers, group)
- group /= data_dim
- centers /= group[:, None]
+ sums = sum_kernel(X, mask[:, :, None], axis=1)
+ counts = count_kernel(mask, axis=1)
+ centers = sums / counts
return centers, pred
| {"golden_diff": "diff --git a/examples/kmeans/kmeans.py b/examples/kmeans/kmeans.py\n--- a/examples/kmeans/kmeans.py\n+++ b/examples/kmeans/kmeans.py\n@@ -19,26 +19,22 @@\n print('%s: %f sec' % (message, end - start))\n \n \n-_fit_calc_distances = cupy.ElementwiseKernel(\n- 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist',\n- '''\n- for (int j = 0; j < n_clusters; j++){\n- int cent_ind[] = {j, i % dim};\n- int dist_ind[] = {i / dim, j};\n- double diff = centers[cent_ind] - data;\n- atomicAdd(&dist[dist_ind], diff * diff);\n- }\n- ''',\n- 'calc_distances'\n+var_kernel = cupy.ElementwiseKernel(\n+ 'T x0, T x1, T c0, T c1', 'T out',\n+ 'out = (x0 - c0) * (x0 - c0) + (x1 - c1) * (x1 - c1)',\n+ 'var_kernel'\n )\n-_fit_calc_center = cupy.ElementwiseKernel(\n- 'S data, T label, int32 dim', 'raw S centers, raw S group',\n- '''\n- int cent_ind[] = {label, i % dim};\n- atomicAdd(¢ers[cent_ind], data);\n- atomicAdd(&group[label], 1);\n- ''',\n- 'calc_center'\n+sum_kernel = cupy.ReductionKernel(\n+ 'T x, S mask', 'T out',\n+ 'mask ? x : 0',\n+ 'a + b', 'out = a', '0',\n+ 'sum_kernel'\n+)\n+count_kernel = cupy.ReductionKernel(\n+ 'T mask', 'float32 out',\n+ 'mask ? 1.0 : 0.0',\n+ 'a + b', 'out = a', '0.0',\n+ 'count_kernel'\n )\n \n \n@@ -49,8 +45,6 @@\n initial_indexes = np.random.choice(len(X), n_clusters,\n replace=False).astype(np.int32)\n centers = X[initial_indexes]\n- data_num = X.shape[0]\n- data_dim = X.shape[1]\n \n for _ in six.moves.range(max_iter):\n # calculate distances and label\n@@ -58,8 +52,8 @@\n distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :],\n axis=2)\n else:\n- distances = xp.zeros((data_num, n_clusters), dtype=np.float32)\n- _fit_calc_distances(X, centers, n_clusters, data_dim, distances)\n+ distances = var_kernel(X[:, None, 0], X[:, None, 1],\n+ centers[None, :, 1], centers[None, :, 0])\n \n new_pred = xp.argmin(distances, axis=1).astype(np.int32)\n if xp.all(new_pred == pred):\n@@ -67,17 +61,16 @@\n pred = new_pred\n \n # calculate centers\n+ i = xp.arange(n_clusters)\n+ mask = pred == i[:, None]\n if not use_custom_kernel or xp == np:\n- centers = xp.stack([X[pred == i].mean(axis=0)\n- for i in six.moves.range(n_clusters)])\n+ sums = xp.where(mask[:, :, None], X, 0).sum(axis=1)\n+ counts = xp.count_nonzero(mask, axis=1)\n+ centers = sums / counts\n else:\n- centers = xp.zeros((n_clusters, data_dim),\n- dtype=np.float32)\n- group = xp.zeros(n_clusters, dtype=np.float32)\n- label = pred[:, None]\n- _fit_calc_center(X, label, data_dim, centers, group)\n- group /= data_dim\n- centers /= group[:, None]\n+ sums = sum_kernel(X, mask[:, :, None], axis=1)\n+ counts = count_kernel(mask, axis=1)\n+ centers = sums / counts\n \n return centers, pred\n", "issue": "Use `ReductionKernel` in k-means example\nThe k-mean example uses `ElementwiseKernel` as an example of a custom kernel, but its algorithm is essentially reduction rather than mapping. It would be better to use `ReductionKernel` in the k-means example and to provide another example that uses `ElementwiseKernel`.\n", "before_files": [{"content": "import argparse\nimport contextlib\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport six\n\nimport cupy\n\n\[email protected]\ndef timer(message):\n cupy.cuda.Stream.null.synchronize()\n start = time.time()\n yield\n cupy.cuda.Stream.null.synchronize()\n end = time.time()\n print('%s: %f sec' % (message, end - start))\n\n\n_fit_calc_distances = cupy.ElementwiseKernel(\n 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist',\n '''\n for (int j = 0; j < n_clusters; j++){\n int cent_ind[] = {j, i % dim};\n int dist_ind[] = {i / dim, j};\n double diff = centers[cent_ind] - data;\n atomicAdd(&dist[dist_ind], diff * diff);\n }\n ''',\n 'calc_distances'\n)\n_fit_calc_center = cupy.ElementwiseKernel(\n 'S data, T label, int32 dim', 'raw S centers, raw S group',\n '''\n int cent_ind[] = {label, i % dim};\n atomicAdd(¢ers[cent_ind], data);\n atomicAdd(&group[label], 1);\n ''',\n 'calc_center'\n)\n\n\ndef fit(X, n_clusters, max_iter, use_custom_kernel):\n assert X.ndim == 2\n xp = cupy.get_array_module(X)\n pred = xp.zeros(len(X), dtype=np.int32)\n initial_indexes = np.random.choice(len(X), n_clusters,\n replace=False).astype(np.int32)\n centers = X[initial_indexes]\n data_num = X.shape[0]\n data_dim = X.shape[1]\n\n for _ in six.moves.range(max_iter):\n # calculate distances and label\n if not use_custom_kernel or xp == np:\n distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :],\n axis=2)\n else:\n distances = xp.zeros((data_num, n_clusters), dtype=np.float32)\n _fit_calc_distances(X, centers, n_clusters, data_dim, distances)\n\n new_pred = xp.argmin(distances, axis=1).astype(np.int32)\n if xp.all(new_pred == pred):\n break\n pred = new_pred\n\n # calculate centers\n if not use_custom_kernel or xp == np:\n centers = xp.stack([X[pred == i].mean(axis=0)\n for i in six.moves.range(n_clusters)])\n else:\n centers = xp.zeros((n_clusters, data_dim),\n dtype=np.float32)\n group = xp.zeros(n_clusters, dtype=np.float32)\n label = pred[:, None]\n _fit_calc_center(X, label, data_dim, centers, group)\n group /= data_dim\n centers /= group[:, None]\n\n return centers, pred\n\n\ndef draw(X, n_clusters, centers, pred, output):\n xp = cupy.get_array_module(X)\n for i in six.moves.range(n_clusters):\n labels = X[pred == i]\n if xp == cupy:\n labels = labels.get()\n plt.scatter(labels[:, 0], labels[:, 1], c=np.random.rand(3))\n if xp == cupy:\n centers = centers.get()\n plt.scatter(centers[:, 0], centers[:, 1], s=120, marker='s',\n facecolors='y', edgecolors='k')\n plt.savefig(output)\n\n\ndef run(gpuid, n_clusters, num, max_iter, use_custom_kernel, output):\n samples = np.random.randn(num, 2).astype(np.float32)\n X_train = np.r_[samples + 1, samples - 1]\n repeat = 1\n\n with timer(' CPU '):\n for i in range(repeat):\n centers, pred = fit(X_train, n_clusters, max_iter,\n use_custom_kernel)\n\n with cupy.cuda.Device(gpuid):\n X_train = cupy.asarray(X_train)\n with timer(' GPU '):\n for i in range(repeat):\n centers, pred = fit(X_train, n_clusters, max_iter,\n use_custom_kernel)\n if output is not None:\n index = np.random.choice(10000000, 300, replace=False)\n draw(X_train[index], n_clusters, centers, pred[index], output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu-id', '-g', default=0, type=int,\n help='ID of GPU.')\n parser.add_argument('--n-clusters', '-n', default=2, type=int,\n help='number of clusters')\n parser.add_argument('--num', default=5000000, type=int,\n help='number of samples')\n parser.add_argument('--max-iter', '-m', default=10, type=int,\n help='number of iterations')\n parser.add_argument('--use-custom-kernel', action='store_true',\n default=False, help='use Elementwise kernel')\n parser.add_argument('--output-image', '-o', default=None, type=str,\n help='output image file name')\n args = parser.parse_args()\n run(args.gpu_id, args.n_clusters, args.num, args.max_iter,\n args.use_custom_kernel, args.output_image)\n", "path": "examples/kmeans/kmeans.py"}], "after_files": [{"content": "import argparse\nimport contextlib\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport six\n\nimport cupy\n\n\[email protected]\ndef timer(message):\n cupy.cuda.Stream.null.synchronize()\n start = time.time()\n yield\n cupy.cuda.Stream.null.synchronize()\n end = time.time()\n print('%s: %f sec' % (message, end - start))\n\n\nvar_kernel = cupy.ElementwiseKernel(\n 'T x0, T x1, T c0, T c1', 'T out',\n 'out = (x0 - c0) * (x0 - c0) + (x1 - c1) * (x1 - c1)',\n 'var_kernel'\n)\nsum_kernel = cupy.ReductionKernel(\n 'T x, S mask', 'T out',\n 'mask ? x : 0',\n 'a + b', 'out = a', '0',\n 'sum_kernel'\n)\ncount_kernel = cupy.ReductionKernel(\n 'T mask', 'float32 out',\n 'mask ? 1.0 : 0.0',\n 'a + b', 'out = a', '0.0',\n 'count_kernel'\n)\n\n\ndef fit(X, n_clusters, max_iter, use_custom_kernel):\n assert X.ndim == 2\n xp = cupy.get_array_module(X)\n pred = xp.zeros(len(X), dtype=np.int32)\n initial_indexes = np.random.choice(len(X), n_clusters,\n replace=False).astype(np.int32)\n centers = X[initial_indexes]\n\n for _ in six.moves.range(max_iter):\n # calculate distances and label\n if not use_custom_kernel or xp == np:\n distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :],\n axis=2)\n else:\n distances = var_kernel(X[:, None, 0], X[:, None, 1],\n centers[None, :, 1], centers[None, :, 0])\n\n new_pred = xp.argmin(distances, axis=1).astype(np.int32)\n if xp.all(new_pred == pred):\n break\n pred = new_pred\n\n # calculate centers\n i = xp.arange(n_clusters)\n mask = pred == i[:, None]\n if not use_custom_kernel or xp == np:\n sums = xp.where(mask[:, :, None], X, 0).sum(axis=1)\n counts = xp.count_nonzero(mask, axis=1)\n centers = sums / counts\n else:\n sums = sum_kernel(X, mask[:, :, None], axis=1)\n counts = count_kernel(mask, axis=1)\n centers = sums / counts\n\n return centers, pred\n\n\ndef draw(X, n_clusters, centers, pred, output):\n xp = cupy.get_array_module(X)\n for i in six.moves.range(n_clusters):\n labels = X[pred == i]\n if xp == cupy:\n labels = labels.get()\n plt.scatter(labels[:, 0], labels[:, 1], c=np.random.rand(3))\n if xp == cupy:\n centers = centers.get()\n plt.scatter(centers[:, 0], centers[:, 1], s=120, marker='s',\n facecolors='y', edgecolors='k')\n plt.savefig(output)\n\n\ndef run(gpuid, n_clusters, num, max_iter, use_custom_kernel, output):\n samples = np.random.randn(num, 2).astype(np.float32)\n X_train = np.r_[samples + 1, samples - 1]\n repeat = 1\n\n with timer(' CPU '):\n for i in range(repeat):\n centers, pred = fit(X_train, n_clusters, max_iter,\n use_custom_kernel)\n\n with cupy.cuda.Device(gpuid):\n X_train = cupy.asarray(X_train)\n with timer(' GPU '):\n for i in range(repeat):\n centers, pred = fit(X_train, n_clusters, max_iter,\n use_custom_kernel)\n if output is not None:\n index = np.random.choice(10000000, 300, replace=False)\n draw(X_train[index], n_clusters, centers, pred[index], output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu-id', '-g', default=0, type=int,\n help='ID of GPU.')\n parser.add_argument('--n-clusters', '-n', default=2, type=int,\n help='number of clusters')\n parser.add_argument('--num', default=5000000, type=int,\n help='number of samples')\n parser.add_argument('--max-iter', '-m', default=10, type=int,\n help='number of iterations')\n parser.add_argument('--use-custom-kernel', action='store_true',\n default=False, help='use Elementwise kernel')\n parser.add_argument('--output-image', '-o', default=None, type=str,\n help='output image file name')\n args = parser.parse_args()\n run(args.gpu_id, args.n_clusters, args.num, args.max_iter,\n args.use_custom_kernel, args.output_image)\n", "path": "examples/kmeans/kmeans.py"}]} | 1,806 | 946 |
gh_patches_debug_38523 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5167 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/sync/discrepancies is missing `.distinct()`
At the moment in the second table:

Likely this will not show the same when viewed shortly after now, as I expect the state to be reconciled quickly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/sync/discrepancies.py`
Content:
```
1 from django.db import models
2 from ietf.doc.models import Document, State
3
4 def find_discrepancies():
5 res = []
6
7 title = "Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state"
8
9 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor")))
10
11 res.append((title, docs))
12
13 title = "Internet-Drafts that have the IANA Action state \"In Progress\" but do not have a \"IANA\" RFC-Editor state/tag"
14
15 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))
16
17 res.append((title, docs))
18
19 title = "Internet-Drafts that have the IANA Action state \"Waiting on RFC Editor\" or \"RFC-Ed-Ack\" but are in the RFC Editor state \"IANA\"/tagged with \"IANA\""
20
21 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))))
22
23 res.append((title, docs))
24
25 title = "Internet-Drafts that have a state other than \"RFC Ed Queue\", \"RFC Published\" or \"Sent to the RFC Editor\" and have an RFC Editor or IANA Action state"
26
27 docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor"))))
28
29 res.append((title, docs))
30
31 for _, docs in res:
32 for d in docs:
33 d.iesg_state = d.get_state("draft-iesg")
34 d.rfc_state = d.get_state("draft-rfceditor")
35 d.iana_action_state = d.get_state("draft-iana-action")
36
37 return res
38
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/sync/discrepancies.py b/ietf/sync/discrepancies.py
--- a/ietf/sync/discrepancies.py
+++ b/ietf/sync/discrepancies.py
@@ -6,25 +6,25 @@
title = "Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state"
- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor")))
+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor"))).distinct()
res.append((title, docs))
title = "Internet-Drafts that have the IANA Action state \"In Progress\" but do not have a \"IANA\" RFC-Editor state/tag"
- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))
+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))).distinct()
res.append((title, docs))
title = "Internet-Drafts that have the IANA Action state \"Waiting on RFC Editor\" or \"RFC-Ed-Ack\" but are in the RFC Editor state \"IANA\"/tagged with \"IANA\""
- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))))
+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))).distinct()
res.append((title, docs))
title = "Internet-Drafts that have a state other than \"RFC Ed Queue\", \"RFC Published\" or \"Sent to the RFC Editor\" and have an RFC Editor or IANA Action state"
- docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor"))))
+ docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor")))).distinct()
res.append((title, docs))
| {"golden_diff": "diff --git a/ietf/sync/discrepancies.py b/ietf/sync/discrepancies.py\n--- a/ietf/sync/discrepancies.py\n+++ b/ietf/sync/discrepancies.py\n@@ -6,25 +6,25 @@\n \n title = \"Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\")))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\"))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have the IANA Action state \\\"In Progress\\\" but do not have a \\\"IANA\\\" RFC-Editor state/tag\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have the IANA Action state \\\"Waiting on RFC Editor\\\" or \\\"RFC-Ed-Ack\\\" but are in the RFC Editor state \\\"IANA\\\"/tagged with \\\"IANA\\\"\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have a state other than \\\"RFC Ed Queue\\\", \\\"RFC Published\\\" or \\\"Sent to the RFC Editor\\\" and have an RFC Editor or IANA Action state\"\n \n- docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\"))))\n+ docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\")))).distinct()\n \n res.append((title, docs))\n", "issue": "/sync/discrepancies is missing `.distinct()`\nAt the moment in the second table:\r\n\r\nLikely this will not show the same when viewed shortly after now, as I expect the state to be reconciled quickly.\n", "before_files": [{"content": "from django.db import models\nfrom ietf.doc.models import Document, State\n\ndef find_discrepancies():\n res = []\n\n title = \"Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\")))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"In Progress\\\" but do not have a \\\"IANA\\\" RFC-Editor state/tag\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"Waiting on RFC Editor\\\" or \\\"RFC-Ed-Ack\\\" but are in the RFC Editor state \\\"IANA\\\"/tagged with \\\"IANA\\\"\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have a state other than \\\"RFC Ed Queue\\\", \\\"RFC Published\\\" or \\\"Sent to the RFC Editor\\\" and have an RFC Editor or IANA Action state\"\n\n docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\"))))\n\n res.append((title, docs))\n\n for _, docs in res:\n for d in docs:\n d.iesg_state = d.get_state(\"draft-iesg\")\n d.rfc_state = d.get_state(\"draft-rfceditor\")\n d.iana_action_state = d.get_state(\"draft-iana-action\")\n\n return res\n\n", "path": "ietf/sync/discrepancies.py"}], "after_files": [{"content": "from django.db import models\nfrom ietf.doc.models import Document, State\n\ndef find_discrepancies():\n res = []\n\n title = \"Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\"))).distinct()\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"In Progress\\\" but do not have a \\\"IANA\\\" RFC-Editor state/tag\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))).distinct()\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"Waiting on RFC Editor\\\" or \\\"RFC-Ed-Ack\\\" but are in the RFC Editor state \\\"IANA\\\"/tagged with \\\"IANA\\\"\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))).distinct()\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have a state other than \\\"RFC Ed Queue\\\", \\\"RFC Published\\\" or \\\"Sent to the RFC Editor\\\" and have an RFC Editor or IANA Action state\"\n\n docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\")))).distinct()\n\n res.append((title, docs))\n\n for _, docs in res:\n for d in docs:\n d.iesg_state = d.get_state(\"draft-iesg\")\n d.rfc_state = d.get_state(\"draft-rfceditor\")\n d.iana_action_state = d.get_state(\"draft-iana-action\")\n\n return res\n\n", "path": "ietf/sync/discrepancies.py"}]} | 1,017 | 854 |
gh_patches_debug_9564 | rasdani/github-patches | git_diff | watchdogpolska__small_eod-494 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
udostępnienie id w API dla tags i documentType
Id dla tags i documetType, jest niezbędne dla zbudowania listy tych elementów w reakcie.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend-project/small_eod/letters/serializers.py`
Content:
```
1 from uuid import uuid4
2 from django.conf import settings
3 from rest_framework import serializers
4 from .models import Letter, DocumentType
5 from ..generic.serializers import UserLogModelSerializer
6 from ..cases.models import Case
7 from ..institutions.models import Institution
8 from ..channels.models import Channel
9 from ..files.apps import minio_app
10 from ..files.serializers import FileSerializer
11
12
13 class DocumentTypeSerializer(serializers.ModelSerializer):
14 class Meta:
15 model = DocumentType
16 fields = ["name"]
17
18
19 class LetterSerializer(UserLogModelSerializer):
20 document_type = serializers.PrimaryKeyRelatedField(
21 many=False, default=None, queryset=DocumentType.objects.all()
22 )
23 case = serializers.PrimaryKeyRelatedField(
24 many=False, default=None, queryset=Case.objects.all()
25 )
26 institution = serializers.PrimaryKeyRelatedField(
27 many=False, default=None, queryset=Institution.objects.all()
28 )
29 channel = serializers.PrimaryKeyRelatedField(
30 many=False, default=None, queryset=Channel.objects.all()
31 )
32 attachments = FileSerializer(many=True, read_only=True)
33
34 class Meta:
35 model = Letter
36 fields = [
37 "id",
38 "direction",
39 "channel",
40 "final",
41 "date",
42 "reference_number",
43 "institution",
44 "case",
45 "attachments",
46 "ordering",
47 "comment",
48 "excerpt",
49 "document_type",
50 "created_on",
51 "created_by",
52 "modified_on",
53 "modified_by",
54 ]
55
56 def create(self, validated_data):
57 channel = validated_data.pop("channel")
58 document_type = validated_data.pop("document_type")
59 institution = validated_data.pop("institution")
60 case = validated_data.pop("case")
61
62 letter = super().create(validated_data)
63 letter.channel = channel
64 letter.document_type = document_type
65 letter.institution = institution
66 letter.case = case
67 letter.save()
68 return letter
69
70 def update(self, instance, validated_data):
71 """
72 nested - variable storing representations of the nested objects
73 of LetterSerializer (Channel, Address and DocumentType).
74 Iterating over those 3 and updating fields of the related objects,
75 using key-value pairs from PATCH request.
76 """
77 nested = []
78 for nested_object in nested:
79 for attr, value in nested_object["data"].items():
80 setattr(nested_object["instance"], attr, value)
81 nested_object["instance"].save()
82 return super().update(instance, validated_data)
83
84
85 class SignRequestSerializer(serializers.Serializer):
86 name = serializers.CharField(max_length=200)
87 method = serializers.CharField(read_only=True)
88 url = serializers.CharField(read_only=True)
89 formData = serializers.DictField(read_only=True, child=serializers.CharField())
90 path = serializers.CharField(read_only=True)
91
92 def create(self, validated_data):
93 path = f'{uuid4()}/{validated_data["name"]}'
94 url, form_data = minio_app.presigned_post_form_data(settings.MINIO_BUCKET, path)
95 return {
96 "name": validated_data["name"],
97 "method": "POST",
98 "url": url,
99 "formData": form_data,
100 "path": path,
101 }
102
```
Path: `backend-project/small_eod/tags/serializers.py`
Content:
```
1 from rest_framework import serializers
2 from .models import Tag
3
4
5 class TagSerializer(serializers.ModelSerializer):
6 class Meta:
7 model = Tag
8 fields = [
9 "name",
10 ]
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend-project/small_eod/letters/serializers.py b/backend-project/small_eod/letters/serializers.py
--- a/backend-project/small_eod/letters/serializers.py
+++ b/backend-project/small_eod/letters/serializers.py
@@ -13,7 +13,7 @@
class DocumentTypeSerializer(serializers.ModelSerializer):
class Meta:
model = DocumentType
- fields = ["name"]
+ fields = ["id", "name"]
class LetterSerializer(UserLogModelSerializer):
diff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py
--- a/backend-project/small_eod/tags/serializers.py
+++ b/backend-project/small_eod/tags/serializers.py
@@ -6,5 +6,6 @@
class Meta:
model = Tag
fields = [
+ "id",
"name",
]
| {"golden_diff": "diff --git a/backend-project/small_eod/letters/serializers.py b/backend-project/small_eod/letters/serializers.py\n--- a/backend-project/small_eod/letters/serializers.py\n+++ b/backend-project/small_eod/letters/serializers.py\n@@ -13,7 +13,7 @@\n class DocumentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DocumentType\n- fields = [\"name\"]\n+ fields = [\"id\", \"name\"]\n \n \n class LetterSerializer(UserLogModelSerializer):\ndiff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py\n--- a/backend-project/small_eod/tags/serializers.py\n+++ b/backend-project/small_eod/tags/serializers.py\n@@ -6,5 +6,6 @@\n class Meta:\n model = Tag\n fields = [\n+ \"id\",\n \"name\",\n ]\n", "issue": "udost\u0119pnienie id w API dla tags i documentType \nId dla tags i documetType, jest niezb\u0119dne dla zbudowania listy tych element\u00f3w w reakcie. \n", "before_files": [{"content": "from uuid import uuid4\nfrom django.conf import settings\nfrom rest_framework import serializers\nfrom .models import Letter, DocumentType\nfrom ..generic.serializers import UserLogModelSerializer\nfrom ..cases.models import Case\nfrom ..institutions.models import Institution\nfrom ..channels.models import Channel\nfrom ..files.apps import minio_app\nfrom ..files.serializers import FileSerializer\n\n\nclass DocumentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DocumentType\n fields = [\"name\"]\n\n\nclass LetterSerializer(UserLogModelSerializer):\n document_type = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=DocumentType.objects.all()\n )\n case = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Case.objects.all()\n )\n institution = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Institution.objects.all()\n )\n channel = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Channel.objects.all()\n )\n attachments = FileSerializer(many=True, read_only=True)\n\n class Meta:\n model = Letter\n fields = [\n \"id\",\n \"direction\",\n \"channel\",\n \"final\",\n \"date\",\n \"reference_number\",\n \"institution\",\n \"case\",\n \"attachments\",\n \"ordering\",\n \"comment\",\n \"excerpt\",\n \"document_type\",\n \"created_on\",\n \"created_by\",\n \"modified_on\",\n \"modified_by\",\n ]\n\n def create(self, validated_data):\n channel = validated_data.pop(\"channel\")\n document_type = validated_data.pop(\"document_type\")\n institution = validated_data.pop(\"institution\")\n case = validated_data.pop(\"case\")\n\n letter = super().create(validated_data)\n letter.channel = channel\n letter.document_type = document_type\n letter.institution = institution\n letter.case = case\n letter.save()\n return letter\n\n def update(self, instance, validated_data):\n \"\"\"\n nested - variable storing representations of the nested objects\n of LetterSerializer (Channel, Address and DocumentType).\n Iterating over those 3 and updating fields of the related objects,\n using key-value pairs from PATCH request.\n \"\"\"\n nested = []\n for nested_object in nested:\n for attr, value in nested_object[\"data\"].items():\n setattr(nested_object[\"instance\"], attr, value)\n nested_object[\"instance\"].save()\n return super().update(instance, validated_data)\n\n\nclass SignRequestSerializer(serializers.Serializer):\n name = serializers.CharField(max_length=200)\n method = serializers.CharField(read_only=True)\n url = serializers.CharField(read_only=True)\n formData = serializers.DictField(read_only=True, child=serializers.CharField())\n path = serializers.CharField(read_only=True)\n\n def create(self, validated_data):\n path = f'{uuid4()}/{validated_data[\"name\"]}'\n url, form_data = minio_app.presigned_post_form_data(settings.MINIO_BUCKET, path)\n return {\n \"name\": validated_data[\"name\"],\n \"method\": \"POST\",\n \"url\": url,\n \"formData\": form_data,\n \"path\": path,\n }\n", "path": "backend-project/small_eod/letters/serializers.py"}, {"content": "from rest_framework import serializers\nfrom .models import Tag\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = [\n \"name\",\n ]\n", "path": "backend-project/small_eod/tags/serializers.py"}], "after_files": [{"content": "from uuid import uuid4\nfrom django.conf import settings\nfrom rest_framework import serializers\nfrom .models import Letter, DocumentType\nfrom ..generic.serializers import UserLogModelSerializer\nfrom ..cases.models import Case\nfrom ..institutions.models import Institution\nfrom ..channels.models import Channel\nfrom ..files.apps import minio_app\nfrom ..files.serializers import FileSerializer\n\n\nclass DocumentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DocumentType\n fields = [\"id\", \"name\"]\n\n\nclass LetterSerializer(UserLogModelSerializer):\n document_type = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=DocumentType.objects.all()\n )\n case = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Case.objects.all()\n )\n institution = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Institution.objects.all()\n )\n channel = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Channel.objects.all()\n )\n attachments = FileSerializer(many=True, read_only=True)\n\n class Meta:\n model = Letter\n fields = [\n \"id\",\n \"direction\",\n \"channel\",\n \"final\",\n \"date\",\n \"reference_number\",\n \"institution\",\n \"case\",\n \"attachments\",\n \"ordering\",\n \"comment\",\n \"excerpt\",\n \"document_type\",\n \"created_on\",\n \"created_by\",\n \"modified_on\",\n \"modified_by\",\n ]\n\n def create(self, validated_data):\n channel = validated_data.pop(\"channel\")\n document_type = validated_data.pop(\"document_type\")\n institution = validated_data.pop(\"institution\")\n case = validated_data.pop(\"case\")\n\n letter = super().create(validated_data)\n letter.channel = channel\n letter.document_type = document_type\n letter.institution = institution\n letter.case = case\n letter.save()\n return letter\n\n def update(self, instance, validated_data):\n \"\"\"\n nested - variable storing representations of the nested objects\n of LetterSerializer (Channel, Address and DocumentType).\n Iterating over those 3 and updating fields of the related objects,\n using key-value pairs from PATCH request.\n \"\"\"\n nested = []\n for nested_object in nested:\n for attr, value in nested_object[\"data\"].items():\n setattr(nested_object[\"instance\"], attr, value)\n nested_object[\"instance\"].save()\n return super().update(instance, validated_data)\n\n\nclass SignRequestSerializer(serializers.Serializer):\n name = serializers.CharField(max_length=200)\n method = serializers.CharField(read_only=True)\n url = serializers.CharField(read_only=True)\n formData = serializers.DictField(read_only=True, child=serializers.CharField())\n path = serializers.CharField(read_only=True)\n\n def create(self, validated_data):\n path = f'{uuid4()}/{validated_data[\"name\"]}'\n url, form_data = minio_app.presigned_post_form_data(settings.MINIO_BUCKET, path)\n return {\n \"name\": validated_data[\"name\"],\n \"method\": \"POST\",\n \"url\": url,\n \"formData\": form_data,\n \"path\": path,\n }\n", "path": "backend-project/small_eod/letters/serializers.py"}, {"content": "from rest_framework import serializers\nfrom .models import Tag\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = [\n \"id\",\n \"name\",\n ]\n", "path": "backend-project/small_eod/tags/serializers.py"}]} | 1,248 | 197 |
gh_patches_debug_19197 | rasdani/github-patches | git_diff | enthought__chaco-717 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove "PopupablePlot"
`PopupablePlot` was introduced in the commit https://github.com/enthought/chaco/commit/4af154cec6f678862ba7e695ae62d681312b50e8#diff-c93657842da48caff25acdb9de9f919f9c04b5301d0fafc28598c9fdeee829f8 but it is not clear why it exists or who uses it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/ui/popupable_plot.py`
Content:
```
1 # Enthought library imports
2 from traits.api import List
3 from chaco.plot import Plot
4 from chaco.plot_containers import VPlotContainer
5 from chaco.tools.pan_tool import PanTool
6 from chaco.tools.zoom_tool import ZoomTool
7 from chaco.ui.plot_window import PlotWindow
8
9 from traitsui.wx.constants import WindowColor
10
11
12 class PopupablePlot(Plot):
13 """A Plot class that pops up in a new window on double click"""
14
15 # FIXME: It would be nice to queue up other types of commands and settings
16 command_queue = List()
17
18 def normal_left_dclick(self, event):
19 plot = Plot(self.data)
20 for data, kw in self.command_queue:
21 plot.plot(data, **kw)
22 plot.title = self.title
23
24 plot.title = self.title
25 container = VPlotContainer(bgcolor=WindowColor)
26 container.add(plot)
27 plot.tools.append(PanTool(plot))
28 plot.overlays.append(ZoomTool(plot))
29 window = PlotWindow(plot=container)
30 window.edit_traits(kind="live", parent=event.window.control)
31
32 def plot(self, data, **kw):
33 """Queue up the plot commands"""
34 self.command_queue.append((data, kw))
35 super().plot(data, **kw)
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/ui/popupable_plot.py b/chaco/ui/popupable_plot.py
deleted file mode 100644
--- a/chaco/ui/popupable_plot.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Enthought library imports
-from traits.api import List
-from chaco.plot import Plot
-from chaco.plot_containers import VPlotContainer
-from chaco.tools.pan_tool import PanTool
-from chaco.tools.zoom_tool import ZoomTool
-from chaco.ui.plot_window import PlotWindow
-
-from traitsui.wx.constants import WindowColor
-
-
-class PopupablePlot(Plot):
- """A Plot class that pops up in a new window on double click"""
-
- # FIXME: It would be nice to queue up other types of commands and settings
- command_queue = List()
-
- def normal_left_dclick(self, event):
- plot = Plot(self.data)
- for data, kw in self.command_queue:
- plot.plot(data, **kw)
- plot.title = self.title
-
- plot.title = self.title
- container = VPlotContainer(bgcolor=WindowColor)
- container.add(plot)
- plot.tools.append(PanTool(plot))
- plot.overlays.append(ZoomTool(plot))
- window = PlotWindow(plot=container)
- window.edit_traits(kind="live", parent=event.window.control)
-
- def plot(self, data, **kw):
- """Queue up the plot commands"""
- self.command_queue.append((data, kw))
- super().plot(data, **kw)
| {"golden_diff": "diff --git a/chaco/ui/popupable_plot.py b/chaco/ui/popupable_plot.py\ndeleted file mode 100644\n--- a/chaco/ui/popupable_plot.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-# Enthought library imports\n-from traits.api import List\n-from chaco.plot import Plot\n-from chaco.plot_containers import VPlotContainer\n-from chaco.tools.pan_tool import PanTool\n-from chaco.tools.zoom_tool import ZoomTool\n-from chaco.ui.plot_window import PlotWindow\n-\n-from traitsui.wx.constants import WindowColor\n-\n-\n-class PopupablePlot(Plot):\n- \"\"\"A Plot class that pops up in a new window on double click\"\"\"\n-\n- # FIXME: It would be nice to queue up other types of commands and settings\n- command_queue = List()\n-\n- def normal_left_dclick(self, event):\n- plot = Plot(self.data)\n- for data, kw in self.command_queue:\n- plot.plot(data, **kw)\n- plot.title = self.title\n-\n- plot.title = self.title\n- container = VPlotContainer(bgcolor=WindowColor)\n- container.add(plot)\n- plot.tools.append(PanTool(plot))\n- plot.overlays.append(ZoomTool(plot))\n- window = PlotWindow(plot=container)\n- window.edit_traits(kind=\"live\", parent=event.window.control)\n-\n- def plot(self, data, **kw):\n- \"\"\"Queue up the plot commands\"\"\"\n- self.command_queue.append((data, kw))\n- super().plot(data, **kw)\n", "issue": "Remove \"PopupablePlot\"\n`PopupablePlot` was introduced in the commit https://github.com/enthought/chaco/commit/4af154cec6f678862ba7e695ae62d681312b50e8#diff-c93657842da48caff25acdb9de9f919f9c04b5301d0fafc28598c9fdeee829f8 but it is not clear why it exists or who uses it.\n", "before_files": [{"content": "# Enthought library imports\nfrom traits.api import List\nfrom chaco.plot import Plot\nfrom chaco.plot_containers import VPlotContainer\nfrom chaco.tools.pan_tool import PanTool\nfrom chaco.tools.zoom_tool import ZoomTool\nfrom chaco.ui.plot_window import PlotWindow\n\nfrom traitsui.wx.constants import WindowColor\n\n\nclass PopupablePlot(Plot):\n \"\"\"A Plot class that pops up in a new window on double click\"\"\"\n\n # FIXME: It would be nice to queue up other types of commands and settings\n command_queue = List()\n\n def normal_left_dclick(self, event):\n plot = Plot(self.data)\n for data, kw in self.command_queue:\n plot.plot(data, **kw)\n plot.title = self.title\n\n plot.title = self.title\n container = VPlotContainer(bgcolor=WindowColor)\n container.add(plot)\n plot.tools.append(PanTool(plot))\n plot.overlays.append(ZoomTool(plot))\n window = PlotWindow(plot=container)\n window.edit_traits(kind=\"live\", parent=event.window.control)\n\n def plot(self, data, **kw):\n \"\"\"Queue up the plot commands\"\"\"\n self.command_queue.append((data, kw))\n super().plot(data, **kw)\n", "path": "chaco/ui/popupable_plot.py"}], "after_files": [{"content": null, "path": "chaco/ui/popupable_plot.py"}]} | 723 | 347 |
gh_patches_debug_12556 | rasdani/github-patches | git_diff | DDMAL__CantusDB-726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure that dummy users (i.e. old indexers) can't log in.
Currently, on NewCantus we have created dummy users for people who had an Indexer object but no User account on OldCantus. These dummy users have randomly generated email addresses and no passwords, the rationale being that a user with no password will not be able to log in.
A comment on [this stackoverflow topic](https://stackoverflow.com/questions/35690022/django-create-new-user-without-password) suggests that this is not strictly the case. It would probably be safer to set these dummy users to have [has_useable_password = False](https://docs.djangoproject.com/en/4.2/ref/contrib/auth/#django.contrib.auth.models.User.has_usable_password) if it is not already set up this way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/management/commands/sync_indexers.py`
Content:
```
1 from django.core.management.base import BaseCommand
2 from django.contrib.auth import get_user_model
3 import requests, json
4 from faker import Faker
5
6 INDEXER_ID_FILE = "indexer_list.txt"
7
8
9 def get_id_list(file_path):
10 indexer_list = []
11 file = open(file_path, "r")
12 for line in file:
13 line = line.strip("\n")
14 indexer_list.append(line)
15 file.close()
16 return indexer_list
17
18
19 def get_new_indexer(indexer_id):
20 # use json-export to get indexer information
21 url = f"http://cantus.uwaterloo.ca/json-node/{indexer_id}"
22 response = requests.get(url)
23 json_response = json.loads(response.content)
24 if json_response["field_first_name"]:
25 first_name = json_response["field_first_name"]["und"][0]["value"]
26 else:
27 first_name = None
28 if json_response["field_family_name"]:
29 family_name = json_response["field_family_name"]["und"][0]["value"]
30 else:
31 family_name = None
32 if json_response["field_indexer_institution"]:
33 institution = json_response["field_indexer_institution"]["und"][0]["value"]
34 else:
35 institution = None
36 if json_response["field_indexer_city"]:
37 city = json_response["field_indexer_city"]["und"][0]["value"]
38 else:
39 city = None
40 if json_response["field_indexer_country"]:
41 country = json_response["field_indexer_country"]["und"][0]["value"]
42 else:
43 country = None
44
45 # check whether the current indexer has a user entry of the same name
46 indexer_full_name = f"{first_name} {family_name}"
47 print(f"{indexer_id} {indexer_full_name}")
48 homonymous_users = get_user_model().objects.filter(
49 full_name__iexact=indexer_full_name
50 )
51 # if the indexer also exists as a user
52 if homonymous_users:
53 assert homonymous_users.count() == 1
54 homonymous_user = homonymous_users.get()
55 print(f"homonymous: {homonymous_user.full_name}")
56 # keep the user as it is (merge the indexer into existing user)
57 # and store the ID of its indexer object
58 homonymous_user.old_indexer_id = indexer_id
59 homonymous_user.is_indexer = True
60 homonymous_user.save()
61 # if the indexer doesn't exist as a user
62 else:
63 faker = Faker()
64 # create a new user with the indexer information
65 get_user_model().objects.create(
66 institution=institution,
67 city=city,
68 country=country,
69 full_name=indexer_full_name,
70 # assign random email to dummy users
71 email=f"{faker.lexify('????????')}@fakeemail.com",
72 # leave the password empty for dummy users
73 # the password can't be empty in login form, so they can't log in
74 password="",
75 old_indexer_id=indexer_id,
76 is_indexer=True,
77 )
78
79
80 class Command(BaseCommand):
81 def add_arguments(self, parser):
82 pass
83
84 def handle(self, *args, **options):
85 indexer_list = get_id_list(INDEXER_ID_FILE)
86 for id in indexer_list:
87 get_new_indexer(id)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/management/commands/sync_indexers.py b/django/cantusdb_project/main_app/management/commands/sync_indexers.py
--- a/django/cantusdb_project/main_app/management/commands/sync_indexers.py
+++ b/django/cantusdb_project/main_app/management/commands/sync_indexers.py
@@ -69,12 +69,9 @@
full_name=indexer_full_name,
# assign random email to dummy users
email=f"{faker.lexify('????????')}@fakeemail.com",
- # leave the password empty for dummy users
- # the password can't be empty in login form, so they can't log in
- password="",
old_indexer_id=indexer_id,
is_indexer=True,
- )
+ ).set_unusable_password() # Set unusable password so the user can't log in or access reset password page
class Command(BaseCommand):
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/management/commands/sync_indexers.py b/django/cantusdb_project/main_app/management/commands/sync_indexers.py\n--- a/django/cantusdb_project/main_app/management/commands/sync_indexers.py\n+++ b/django/cantusdb_project/main_app/management/commands/sync_indexers.py\n@@ -69,12 +69,9 @@\n full_name=indexer_full_name,\n # assign random email to dummy users\n email=f\"{faker.lexify('????????')}@fakeemail.com\",\n- # leave the password empty for dummy users\n- # the password can't be empty in login form, so they can't log in\n- password=\"\",\n old_indexer_id=indexer_id,\n is_indexer=True,\n- )\n+ ).set_unusable_password() # Set unusable password so the user can't log in or access reset password page\n \n \n class Command(BaseCommand):\n", "issue": "Ensure that dummy users (i.e. old indexers) can't log in.\nCurrently, on NewCantus we have created dummy users for people who had an Indexer object but no User account on OldCantus. These dummy users have randomly generated email addresses and no passwords, the rationale being that a user with no password will not be able to log in.\r\n\r\nA comment on [this stackoverflow topic](https://stackoverflow.com/questions/35690022/django-create-new-user-without-password) suggests that this is not strictly the case. It would probably be safer to set these dummy users to have [has_useable_password = False](https://docs.djangoproject.com/en/4.2/ref/contrib/auth/#django.contrib.auth.models.User.has_usable_password) if it is not already set up this way.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.contrib.auth import get_user_model\nimport requests, json\nfrom faker import Faker\n\nINDEXER_ID_FILE = \"indexer_list.txt\"\n\n\ndef get_id_list(file_path):\n indexer_list = []\n file = open(file_path, \"r\")\n for line in file:\n line = line.strip(\"\\n\")\n indexer_list.append(line)\n file.close()\n return indexer_list\n\n\ndef get_new_indexer(indexer_id):\n # use json-export to get indexer information\n url = f\"http://cantus.uwaterloo.ca/json-node/{indexer_id}\"\n response = requests.get(url)\n json_response = json.loads(response.content)\n if json_response[\"field_first_name\"]:\n first_name = json_response[\"field_first_name\"][\"und\"][0][\"value\"]\n else:\n first_name = None\n if json_response[\"field_family_name\"]:\n family_name = json_response[\"field_family_name\"][\"und\"][0][\"value\"]\n else:\n family_name = None\n if json_response[\"field_indexer_institution\"]:\n institution = json_response[\"field_indexer_institution\"][\"und\"][0][\"value\"]\n else:\n institution = None\n if json_response[\"field_indexer_city\"]:\n city = json_response[\"field_indexer_city\"][\"und\"][0][\"value\"]\n else:\n city = None\n if json_response[\"field_indexer_country\"]:\n country = json_response[\"field_indexer_country\"][\"und\"][0][\"value\"]\n else:\n country = None\n\n # check whether the current indexer has a user entry of the same name\n indexer_full_name = f\"{first_name} {family_name}\"\n print(f\"{indexer_id} {indexer_full_name}\")\n homonymous_users = get_user_model().objects.filter(\n full_name__iexact=indexer_full_name\n )\n # if the indexer also exists as a user\n if homonymous_users:\n assert homonymous_users.count() == 1\n homonymous_user = homonymous_users.get()\n print(f\"homonymous: {homonymous_user.full_name}\")\n # keep the user as it is (merge the indexer into existing user)\n # and store the ID of its indexer object\n homonymous_user.old_indexer_id = indexer_id\n homonymous_user.is_indexer = True\n homonymous_user.save()\n # if the indexer doesn't exist as a user\n else:\n faker = Faker()\n # create a new user with the indexer information\n get_user_model().objects.create(\n institution=institution,\n city=city,\n country=country,\n full_name=indexer_full_name,\n # assign random email to dummy users\n email=f\"{faker.lexify('????????')}@fakeemail.com\",\n # leave the password empty for dummy users\n # the password can't be empty in login form, so they can't log in\n password=\"\",\n old_indexer_id=indexer_id,\n is_indexer=True,\n )\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n indexer_list = get_id_list(INDEXER_ID_FILE)\n for id in indexer_list:\n get_new_indexer(id)\n", "path": "django/cantusdb_project/main_app/management/commands/sync_indexers.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.contrib.auth import get_user_model\nimport requests, json\nfrom faker import Faker\n\nINDEXER_ID_FILE = \"indexer_list.txt\"\n\n\ndef get_id_list(file_path):\n indexer_list = []\n file = open(file_path, \"r\")\n for line in file:\n line = line.strip(\"\\n\")\n indexer_list.append(line)\n file.close()\n return indexer_list\n\n\ndef get_new_indexer(indexer_id):\n # use json-export to get indexer information\n url = f\"http://cantus.uwaterloo.ca/json-node/{indexer_id}\"\n response = requests.get(url)\n json_response = json.loads(response.content)\n if json_response[\"field_first_name\"]:\n first_name = json_response[\"field_first_name\"][\"und\"][0][\"value\"]\n else:\n first_name = None\n if json_response[\"field_family_name\"]:\n family_name = json_response[\"field_family_name\"][\"und\"][0][\"value\"]\n else:\n family_name = None\n if json_response[\"field_indexer_institution\"]:\n institution = json_response[\"field_indexer_institution\"][\"und\"][0][\"value\"]\n else:\n institution = None\n if json_response[\"field_indexer_city\"]:\n city = json_response[\"field_indexer_city\"][\"und\"][0][\"value\"]\n else:\n city = None\n if json_response[\"field_indexer_country\"]:\n country = json_response[\"field_indexer_country\"][\"und\"][0][\"value\"]\n else:\n country = None\n\n # check whether the current indexer has a user entry of the same name\n indexer_full_name = f\"{first_name} {family_name}\"\n print(f\"{indexer_id} {indexer_full_name}\")\n homonymous_users = get_user_model().objects.filter(\n full_name__iexact=indexer_full_name\n )\n # if the indexer also exists as a user\n if homonymous_users:\n assert homonymous_users.count() == 1\n homonymous_user = homonymous_users.get()\n print(f\"homonymous: {homonymous_user.full_name}\")\n # keep the user as it is (merge the indexer into existing user)\n # and store the ID of its indexer object\n homonymous_user.old_indexer_id = indexer_id\n homonymous_user.is_indexer = True\n homonymous_user.save()\n # if the indexer doesn't exist as a user\n else:\n faker = Faker()\n # create a new user with the indexer information\n get_user_model().objects.create(\n institution=institution,\n city=city,\n country=country,\n full_name=indexer_full_name,\n # assign random email to dummy users\n email=f\"{faker.lexify('????????')}@fakeemail.com\",\n old_indexer_id=indexer_id,\n is_indexer=True,\n ).set_unusable_password() # Set unusable password so the user can't log in or access reset password page\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n indexer_list = get_id_list(INDEXER_ID_FILE)\n for id in indexer_list:\n get_new_indexer(id)\n", "path": "django/cantusdb_project/main_app/management/commands/sync_indexers.py"}]} | 1,307 | 217 |
gh_patches_debug_5627 | rasdani/github-patches | git_diff | nf-core__tools-2075 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lint linter config for Python < 3.6 and issue warning if Actions are using Python 3.6 and older
### Description of the bug
```
Run nf-core -l lint_log.txt lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md
```
```python
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.6.15/x64/bin/nf-core", line 5, in <module>
from nf_core.__main__ import run_nf_core
File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/__main__.py", line 19, in <module>
import nf_core.lint
File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/lint/__init__.py", line 23, in <module>
import nf_core.modules.lint
File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/__init__.py", line 1, in <module>
from .bump_versions import ModuleVersionBumper
File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/bump_versions.py", line 24, in <module>
from .modules_command import ModuleCommand
File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_command.py", line [11](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:12), in <module>
from .modules_json import ModulesJson
File "/opt/hostedtoolcache/Python/3.6.[15](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:16)/x64/lib/python3.6/site-packages/nf_core/modules/modules_json.py", line 15, in <module>
import nf_core.modules.modules_repo
File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_repo.py", line 5, in <module>
from importlib.resources import path
ModuleNotFoundError: No module named 'importlib.resources'
```
```
Error: Process completed with exit code 1.
```
### Command used and terminal output
```console
https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519
```
### System information
GHA
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import find_packages, setup
4
5 version = "2.7dev"
6
7 with open("README.md") as f:
8 readme = f.read()
9
10 with open("requirements.txt") as f:
11 required = f.read().splitlines()
12
13 setup(
14 name="nf-core",
15 version=version,
16 description="Helper tools for use with nf-core Nextflow pipelines.",
17 long_description=readme,
18 long_description_content_type="text/markdown",
19 keywords=[
20 "nf-core",
21 "nextflow",
22 "bioinformatics",
23 "workflow",
24 "pipeline",
25 "biology",
26 "sequencing",
27 "NGS",
28 "next generation sequencing",
29 ],
30 author="Phil Ewels",
31 author_email="[email protected]",
32 url="https://github.com/nf-core/tools",
33 license="MIT",
34 entry_points={
35 "console_scripts": ["nf-core=nf_core.__main__:run_nf_core"],
36 "refgenie.hooks.post_update": ["nf-core-refgenie=nf_core.refgenie:update_config"],
37 },
38 install_requires=required,
39 packages=find_packages(exclude=("docs")),
40 include_package_data=True,
41 zip_safe=False,
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,6 +35,7 @@
"console_scripts": ["nf-core=nf_core.__main__:run_nf_core"],
"refgenie.hooks.post_update": ["nf-core-refgenie=nf_core.refgenie:update_config"],
},
+ python_requires=">=3.7, <4",
install_requires=required,
packages=find_packages(exclude=("docs")),
include_package_data=True,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,6 +35,7 @@\n \"console_scripts\": [\"nf-core=nf_core.__main__:run_nf_core\"],\n \"refgenie.hooks.post_update\": [\"nf-core-refgenie=nf_core.refgenie:update_config\"],\n },\n+ python_requires=\">=3.7, <4\",\n install_requires=required,\n packages=find_packages(exclude=(\"docs\")),\n include_package_data=True,\n", "issue": "Lint linter config for Python < 3.6 and issue warning if Actions are using Python 3.6 and older\n### Description of the bug\r\n\r\n```\r\nRun nf-core -l lint_log.txt lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md\r\n```\r\n```python\r\nTraceback (most recent call last):\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/bin/nf-core\", line 5, in <module>\r\n from nf_core.__main__ import run_nf_core\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/__main__.py\", line 19, in <module>\r\n import nf_core.lint\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/lint/__init__.py\", line 23, in <module>\r\n import nf_core.modules.lint\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/__init__.py\", line 1, in <module>\r\n from .bump_versions import ModuleVersionBumper\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/bump_versions.py\", line 24, in <module>\r\n from .modules_command import ModuleCommand\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_command.py\", line [11](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:12), in <module>\r\n from .modules_json import ModulesJson\r\n File \"/opt/hostedtoolcache/Python/3.6.[15](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:16)/x64/lib/python3.6/site-packages/nf_core/modules/modules_json.py\", line 15, in <module>\r\n import nf_core.modules.modules_repo\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_repo.py\", line 5, in <module>\r\n from importlib.resources import path\r\nModuleNotFoundError: No module named 'importlib.resources'\r\n```\r\n```\r\nError: Process completed with exit code 1.\r\n```\r\n\r\n### Command used and terminal output\r\n\r\n```console\r\nhttps://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519\r\n```\r\n\r\n\r\n### System information\r\n\r\nGHA\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\n\nversion = \"2.7dev\"\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\nsetup(\n name=\"nf-core\",\n version=version,\n description=\"Helper tools for use with nf-core Nextflow pipelines.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"nf-core\",\n \"nextflow\",\n \"bioinformatics\",\n \"workflow\",\n \"pipeline\",\n \"biology\",\n \"sequencing\",\n \"NGS\",\n \"next generation sequencing\",\n ],\n author=\"Phil Ewels\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nf-core/tools\",\n license=\"MIT\",\n entry_points={\n \"console_scripts\": [\"nf-core=nf_core.__main__:run_nf_core\"],\n \"refgenie.hooks.post_update\": [\"nf-core-refgenie=nf_core.refgenie:update_config\"],\n },\n install_requires=required,\n packages=find_packages(exclude=(\"docs\")),\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\n\nversion = \"2.7dev\"\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\nsetup(\n name=\"nf-core\",\n version=version,\n description=\"Helper tools for use with nf-core Nextflow pipelines.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"nf-core\",\n \"nextflow\",\n \"bioinformatics\",\n \"workflow\",\n \"pipeline\",\n \"biology\",\n \"sequencing\",\n \"NGS\",\n \"next generation sequencing\",\n ],\n author=\"Phil Ewels\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nf-core/tools\",\n license=\"MIT\",\n entry_points={\n \"console_scripts\": [\"nf-core=nf_core.__main__:run_nf_core\"],\n \"refgenie.hooks.post_update\": [\"nf-core-refgenie=nf_core.refgenie:update_config\"],\n },\n python_requires=\">=3.7, <4\",\n install_requires=required,\n packages=find_packages(exclude=(\"docs\")),\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,269 | 111 |
gh_patches_debug_9252 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-2889 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve handling of self-referential strategies
As mentioned in #2783, the following strategy is not explicitly forbidden but fails with an `AssertionError`:
```python
from hypothesis import strategies as st
SELF_REF = st.recursive(
st.deferred(lambda: SELF_REF | st.booleans()),
lambda s: st.lists(s, min_size=1)
)
```
There is an alternative strategy that produces the same data, but doesn't fail the same way:
```python
SELF_REF = st.recursive(
st.booleans(),
lambda s: st.lists(s, min_size=1)
)
```
I am not sure if all self-referential strategies can be rewritten like this, but probably we can either explicitly forbid such strategies or revisit our drawing approach for them.
Here is my reasoning from #2783
> As far as I see, the cap is needed to prevent the drawing from this strategy & generating a certain maximum amount of leaves. However, assuming a single thread (more on the multi-threaded behavior in the next section) and such a self-referential strategy, I am not sure if capping is needed as it is - we can just apply it once on the first capped usage and make all subsequent calls no-op (e.g., just yield without modifying marked). Then we still have the marker set only once on the very first RecursiveStrategy.do_draw call, and it will be monotonically decreasing. Therefore, we'll have the max size properly maintained, and there will be no oversized subtrees because, at some point, LimitReached will occur.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hypothesis-python/src/hypothesis/strategies/_internal/recursive.py`
Content:
```
1 # This file is part of Hypothesis, which may be found at
2 # https://github.com/HypothesisWorks/hypothesis/
3 #
4 # Most of this work is copyright (C) 2013-2021 David R. MacIver
5 # ([email protected]), but it contains contributions by others. See
6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
7 # consult the git log if you need to determine who owns an individual
8 # contribution.
9 #
10 # This Source Code Form is subject to the terms of the Mozilla Public License,
11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
12 # obtain one at https://mozilla.org/MPL/2.0/.
13 #
14 # END HEADER
15
16 import threading
17 from contextlib import contextmanager
18
19 from hypothesis.internal.lazyformat import lazyformat
20 from hypothesis.internal.reflection import get_pretty_function_description
21 from hypothesis.strategies._internal.strategies import (
22 OneOfStrategy,
23 SearchStrategy,
24 check_strategy,
25 )
26
27
28 class LimitReached(BaseException):
29 pass
30
31
32 class LimitedStrategy(SearchStrategy):
33 def __init__(self, strategy):
34 super().__init__()
35 self.base_strategy = strategy
36 self._threadlocal = threading.local()
37
38 @property
39 def marker(self):
40 return getattr(self._threadlocal, "marker", 0)
41
42 @marker.setter
43 def marker(self, value):
44 self._threadlocal.marker = value
45
46 @property
47 def currently_capped(self):
48 return getattr(self._threadlocal, "currently_capped", False)
49
50 @currently_capped.setter
51 def currently_capped(self, value):
52 self._threadlocal.currently_capped = value
53
54 def __repr__(self):
55 return f"LimitedStrategy({self.base_strategy!r})"
56
57 def do_validate(self):
58 self.base_strategy.validate()
59
60 def do_draw(self, data):
61 assert self.currently_capped
62 if self.marker <= 0:
63 raise LimitReached()
64 self.marker -= 1
65 return data.draw(self.base_strategy)
66
67 @contextmanager
68 def capped(self, max_templates):
69 assert not self.currently_capped
70 try:
71 self.currently_capped = True
72 self.marker = max_templates
73 yield
74 finally:
75 self.currently_capped = False
76
77
78 class RecursiveStrategy(SearchStrategy):
79 def __init__(self, base, extend, max_leaves):
80 self.max_leaves = max_leaves
81 self.base = base
82 self.limited_base = LimitedStrategy(base)
83 self.extend = extend
84
85 strategies = [self.limited_base, self.extend(self.limited_base)]
86 while 2 ** (len(strategies) - 1) <= max_leaves:
87 strategies.append(extend(OneOfStrategy(tuple(strategies))))
88 self.strategy = OneOfStrategy(strategies)
89
90 def __repr__(self):
91 if not hasattr(self, "_cached_repr"):
92 self._cached_repr = "recursive(%r, %s, max_leaves=%d)" % (
93 self.base,
94 get_pretty_function_description(self.extend),
95 self.max_leaves,
96 )
97 return self._cached_repr
98
99 def do_validate(self):
100 check_strategy(self.base, "base")
101 extended = self.extend(self.limited_base)
102 check_strategy(extended, f"extend({self.limited_base!r})")
103 self.limited_base.validate()
104 extended.validate()
105
106 def do_draw(self, data):
107 count = 0
108 while True:
109 try:
110 with self.limited_base.capped(self.max_leaves):
111 return data.draw(self.strategy)
112 except LimitReached:
113 # Workaround for possible coverage bug - this branch is definitely
114 # covered but for some reason is showing up as not covered.
115 if count == 0: # pragma: no branch
116 data.note_event(
117 lazyformat(
118 "Draw for %r exceeded max_leaves and had to be retried",
119 self,
120 )
121 )
122 count += 1
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py
--- a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py
+++ b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py
@@ -66,13 +66,13 @@
@contextmanager
def capped(self, max_templates):
- assert not self.currently_capped
try:
+ was_capped = self.currently_capped
self.currently_capped = True
self.marker = max_templates
yield
finally:
- self.currently_capped = False
+ self.currently_capped = was_capped
class RecursiveStrategy(SearchStrategy):
| {"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py\n--- a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py\n+++ b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py\n@@ -66,13 +66,13 @@\n \n @contextmanager\n def capped(self, max_templates):\n- assert not self.currently_capped\n try:\n+ was_capped = self.currently_capped\n self.currently_capped = True\n self.marker = max_templates\n yield\n finally:\n- self.currently_capped = False\n+ self.currently_capped = was_capped\n \n \n class RecursiveStrategy(SearchStrategy):\n", "issue": "Improve handling of self-referential strategies\nAs mentioned in #2783, the following strategy is not explicitly forbidden but fails with an `AssertionError`:\r\n\r\n```python\r\nfrom hypothesis import strategies as st\r\n\r\nSELF_REF = st.recursive(\r\n st.deferred(lambda: SELF_REF | st.booleans()),\r\n lambda s: st.lists(s, min_size=1)\r\n)\r\n```\r\n\r\nThere is an alternative strategy that produces the same data, but doesn't fail the same way:\r\n\r\n```python\r\nSELF_REF = st.recursive(\r\n st.booleans(),\r\n lambda s: st.lists(s, min_size=1)\r\n)\r\n```\r\n\r\nI am not sure if all self-referential strategies can be rewritten like this, but probably we can either explicitly forbid such strategies or revisit our drawing approach for them.\r\n\r\nHere is my reasoning from #2783 \r\n\r\n> As far as I see, the cap is needed to prevent the drawing from this strategy & generating a certain maximum amount of leaves. However, assuming a single thread (more on the multi-threaded behavior in the next section) and such a self-referential strategy, I am not sure if capping is needed as it is - we can just apply it once on the first capped usage and make all subsequent calls no-op (e.g., just yield without modifying marked). Then we still have the marker set only once on the very first RecursiveStrategy.do_draw call, and it will be monotonically decreasing. Therefore, we'll have the max size properly maintained, and there will be no oversized subtrees because, at some point, LimitReached will occur.\r\n\r\n\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2021 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport threading\nfrom contextlib import contextmanager\n\nfrom hypothesis.internal.lazyformat import lazyformat\nfrom hypothesis.internal.reflection import get_pretty_function_description\nfrom hypothesis.strategies._internal.strategies import (\n OneOfStrategy,\n SearchStrategy,\n check_strategy,\n)\n\n\nclass LimitReached(BaseException):\n pass\n\n\nclass LimitedStrategy(SearchStrategy):\n def __init__(self, strategy):\n super().__init__()\n self.base_strategy = strategy\n self._threadlocal = threading.local()\n\n @property\n def marker(self):\n return getattr(self._threadlocal, \"marker\", 0)\n\n @marker.setter\n def marker(self, value):\n self._threadlocal.marker = value\n\n @property\n def currently_capped(self):\n return getattr(self._threadlocal, \"currently_capped\", False)\n\n @currently_capped.setter\n def currently_capped(self, value):\n self._threadlocal.currently_capped = value\n\n def __repr__(self):\n return f\"LimitedStrategy({self.base_strategy!r})\"\n\n def do_validate(self):\n self.base_strategy.validate()\n\n def do_draw(self, data):\n assert self.currently_capped\n if self.marker <= 0:\n raise LimitReached()\n self.marker -= 1\n return data.draw(self.base_strategy)\n\n @contextmanager\n def capped(self, max_templates):\n assert not self.currently_capped\n try:\n self.currently_capped = True\n self.marker = max_templates\n yield\n finally:\n self.currently_capped = False\n\n\nclass RecursiveStrategy(SearchStrategy):\n def __init__(self, base, extend, max_leaves):\n self.max_leaves = max_leaves\n self.base = base\n self.limited_base = LimitedStrategy(base)\n self.extend = extend\n\n strategies = [self.limited_base, self.extend(self.limited_base)]\n while 2 ** (len(strategies) - 1) <= max_leaves:\n strategies.append(extend(OneOfStrategy(tuple(strategies))))\n self.strategy = OneOfStrategy(strategies)\n\n def __repr__(self):\n if not hasattr(self, \"_cached_repr\"):\n self._cached_repr = \"recursive(%r, %s, max_leaves=%d)\" % (\n self.base,\n get_pretty_function_description(self.extend),\n self.max_leaves,\n )\n return self._cached_repr\n\n def do_validate(self):\n check_strategy(self.base, \"base\")\n extended = self.extend(self.limited_base)\n check_strategy(extended, f\"extend({self.limited_base!r})\")\n self.limited_base.validate()\n extended.validate()\n\n def do_draw(self, data):\n count = 0\n while True:\n try:\n with self.limited_base.capped(self.max_leaves):\n return data.draw(self.strategy)\n except LimitReached:\n # Workaround for possible coverage bug - this branch is definitely\n # covered but for some reason is showing up as not covered.\n if count == 0: # pragma: no branch\n data.note_event(\n lazyformat(\n \"Draw for %r exceeded max_leaves and had to be retried\",\n self,\n )\n )\n count += 1\n", "path": "hypothesis-python/src/hypothesis/strategies/_internal/recursive.py"}], "after_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2021 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport threading\nfrom contextlib import contextmanager\n\nfrom hypothesis.internal.lazyformat import lazyformat\nfrom hypothesis.internal.reflection import get_pretty_function_description\nfrom hypothesis.strategies._internal.strategies import (\n OneOfStrategy,\n SearchStrategy,\n check_strategy,\n)\n\n\nclass LimitReached(BaseException):\n pass\n\n\nclass LimitedStrategy(SearchStrategy):\n def __init__(self, strategy):\n super().__init__()\n self.base_strategy = strategy\n self._threadlocal = threading.local()\n\n @property\n def marker(self):\n return getattr(self._threadlocal, \"marker\", 0)\n\n @marker.setter\n def marker(self, value):\n self._threadlocal.marker = value\n\n @property\n def currently_capped(self):\n return getattr(self._threadlocal, \"currently_capped\", False)\n\n @currently_capped.setter\n def currently_capped(self, value):\n self._threadlocal.currently_capped = value\n\n def __repr__(self):\n return f\"LimitedStrategy({self.base_strategy!r})\"\n\n def do_validate(self):\n self.base_strategy.validate()\n\n def do_draw(self, data):\n assert self.currently_capped\n if self.marker <= 0:\n raise LimitReached()\n self.marker -= 1\n return data.draw(self.base_strategy)\n\n @contextmanager\n def capped(self, max_templates):\n try:\n was_capped = self.currently_capped\n self.currently_capped = True\n self.marker = max_templates\n yield\n finally:\n self.currently_capped = was_capped\n\n\nclass RecursiveStrategy(SearchStrategy):\n def __init__(self, base, extend, max_leaves):\n self.max_leaves = max_leaves\n self.base = base\n self.limited_base = LimitedStrategy(base)\n self.extend = extend\n\n strategies = [self.limited_base, self.extend(self.limited_base)]\n while 2 ** (len(strategies) - 1) <= max_leaves:\n strategies.append(extend(OneOfStrategy(tuple(strategies))))\n self.strategy = OneOfStrategy(strategies)\n\n def __repr__(self):\n if not hasattr(self, \"_cached_repr\"):\n self._cached_repr = \"recursive(%r, %s, max_leaves=%d)\" % (\n self.base,\n get_pretty_function_description(self.extend),\n self.max_leaves,\n )\n return self._cached_repr\n\n def do_validate(self):\n check_strategy(self.base, \"base\")\n extended = self.extend(self.limited_base)\n check_strategy(extended, f\"extend({self.limited_base!r})\")\n self.limited_base.validate()\n extended.validate()\n\n def do_draw(self, data):\n count = 0\n while True:\n try:\n with self.limited_base.capped(self.max_leaves):\n return data.draw(self.strategy)\n except LimitReached:\n # Workaround for possible coverage bug - this branch is definitely\n # covered but for some reason is showing up as not covered.\n if count == 0: # pragma: no branch\n data.note_event(\n lazyformat(\n \"Draw for %r exceeded max_leaves and had to be retried\",\n self,\n )\n )\n count += 1\n", "path": "hypothesis-python/src/hypothesis/strategies/_internal/recursive.py"}]} | 1,745 | 180 |
gh_patches_debug_65637 | rasdani/github-patches | git_diff | saulpw__visidata-2036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: cannot read .vds with expression column
**Small description**
Visidata cannot read back sheet(s) it saved as `.vds` if they contain an
expression column.
"KeyError: 'ExprColumn'" shows as error, resulting in a partial read.
**Expected result**
It should be able to read those files.
**Actual result with ~~screenshot~~ stacktrace**
```
Traceback (most recent call last):
File "/nix/store/z4xjb4j8i73894r2wqjvlnps9j60rjr0-visidata-2.11/lib/python3.10/site-packages/visidata/threads.py", line 198, in _toplevelTryFunc
t.status = func(*args, **kwargs)
File "/nix/store/z4xjb4j8i73894r2wqjvlnps9j60rjr0-visidata-2.11/lib/python3.10/site-packages/visidata/pyobj.py", line 26, in reload
for r in self.iterload():
File "/nix/store/z4xjb4j8i73894r2wqjvlnps9j60rjr0-visidata-2.11/lib/python3.10/site-packages/visidata/loaders/vds.py", line 76, in iterload
c = globals()[classname](d.pop('name'), sheet=self)
KeyError: 'ExprColumn'
```
**Steps to reproduce with sample data and a .vd**
Create and save some test sheet with an expr column with this `cmdlog.vdj`:
```
#!vd -p
{"col": "", "row": "", "longname": "open-new", "input": "", "keystrokes": "Shift+A", "comment": "Open new empty sheet"}
{"sheet": "unnamed", "col": "A", "row": "", "longname": "type-int", "input": "", "keystrokes": "#", "comment": "set type of current column to int"}
{"sheet": "unnamed", "col": "", "row": "", "longname": "add-row", "input": "", "keystrokes": "a", "comment": "append a blank row"}
{"sheet": "unnamed", "col": "A", "row": "0", "longname": "edit-cell", "input": "2", "keystrokes": "e", "comment": "edit contents of current cell"}
{"sheet": "unnamed", "col": "A", "row": "", "longname": "addcol-expr", "input": "A*2", "keystrokes": "=", "comment": "create new column from Python expression, with column names as variables"}
{"sheet": "unnamed", "col": "", "row": "", "longname": "save-sheet", "input": "sheet.vds", "keystrokes": "Ctrl+S", "comment": "save current sheet to filename in format determined by extension (default .tsv)"}
```
This produces `sheet.vds` as follows, which seems valid:
```
#{"name": "unnamed"}
#{"name": "A", "width": 4, "height": 1, "expr": null, "keycol": 0, "formatter": "", "fmtstr": "", "voffset": 0, "hoffset": 0, "aggstr": "", "type": "int", "col": "Column"}
#{"name": "A*2", "width": 5, "height": 1, "expr": "A*2", "keycol": 0, "formatter": "", "fmtstr": "", "voffset": 0, "hoffset": 0, "aggstr": "", "type": "", "col": "ExprColumn"}
{"A": 2, "A*2": 4}
```
Quit visidata and open that file again with `vd sheet.vds`,
and observe the loading error.
**Additional context**
- visidata v2.11
- python 3.10.12
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/vds.py`
Content:
```
1 'Custom VisiData save format'
2
3 import json
4
5 from visidata import VisiData, JsonSheet, Progress, IndexSheet, SettableColumn, ItemColumn
6
7
8 NL='\n'
9
10 @VisiData.api
11 def open_vds(vd, p):
12 return VdsIndexSheet(p.name, source=p)
13
14
15 @VisiData.api
16 def save_vds(vd, p, *sheets):
17 'Save in custom VisiData format, preserving columns and their attributes.'
18
19 with p.open(mode='w', encoding='utf-8') as fp:
20 for vs in sheets:
21 # class and attrs for vs
22 d = { 'name': vs.name, }
23 fp.write('#'+json.dumps(d)+NL)
24
25 # class and attrs for each column in vs
26 for col in vs.visibleCols:
27 d = col.__getstate__()
28 if isinstance(col, SettableColumn):
29 d['col'] = 'Column'
30 else:
31 d['col'] = type(col).__name__
32 fp.write('#'+json.dumps(d)+NL)
33
34 with Progress(gerund='saving'):
35 for row in vs.iterdispvals(*vs.visibleCols, format=False):
36 d = {col.name:val for col, val in row.items()}
37 fp.write(json.dumps(d, default=str)+NL)
38
39
40 class VdsIndexSheet(IndexSheet):
41 def iterload(self):
42 vs = None
43 with self.source.open(encoding='utf-8') as fp:
44 line = fp.readline()
45 while line:
46 if line.startswith('#{'):
47 d = json.loads(line[1:])
48 if 'col' not in d:
49 vs = VdsSheet(d.pop('name'), columns=[], source=self.source, source_fpos=fp.tell())
50 yield vs
51 line = fp.readline()
52
53
54 class VdsSheet(JsonSheet):
55 def newRow(self):
56 return {} # rowdef: dict
57
58 def iterload(self):
59 self.colnames = {}
60 self.columns = []
61
62 with self.source.open(encoding='utf-8') as fp:
63 fp.seek(self.source_fpos)
64
65 # consume all metadata, create columns
66 line = fp.readline()
67 while line and line.startswith('#{'):
68 d = json.loads(line[1:])
69 if 'col' not in d:
70 raise Exception(d)
71 classname = d.pop('col')
72 if classname == 'Column':
73 classname = 'ItemColumn'
74 d['expr'] = d['name']
75
76 c = globals()[classname](d.pop('name'), sheet=self)
77 self.addColumn(c)
78 self.colnames[c.name] = c
79 for k, v in d.items():
80 setattr(c, k, v)
81
82 line = fp.readline()
83
84 while line and not line.startswith('#{'):
85 d = json.loads(line)
86 yield d
87 line = fp.readline()
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/loaders/vds.py b/visidata/loaders/vds.py
--- a/visidata/loaders/vds.py
+++ b/visidata/loaders/vds.py
@@ -2,7 +2,7 @@
import json
-from visidata import VisiData, JsonSheet, Progress, IndexSheet, SettableColumn, ItemColumn
+from visidata import VisiData, JsonSheet, Progress, IndexSheet, SettableColumn, ItemColumn, ExprColumn
NL='\n'
| {"golden_diff": "diff --git a/visidata/loaders/vds.py b/visidata/loaders/vds.py\n--- a/visidata/loaders/vds.py\n+++ b/visidata/loaders/vds.py\n@@ -2,7 +2,7 @@\n \n import json\n \n-from visidata import VisiData, JsonSheet, Progress, IndexSheet, SettableColumn, ItemColumn\n+from visidata import VisiData, JsonSheet, Progress, IndexSheet, SettableColumn, ItemColumn, ExprColumn\n \n \n NL='\\n'\n", "issue": "Bug: cannot read .vds with expression column\n**Small description**\n\nVisidata cannot read back sheet(s) it saved as `.vds` if they contain an\nexpression column.\n\n\"KeyError: 'ExprColumn'\" shows as error, resulting in a partial read.\n\n\n**Expected result**\n\nIt should be able to read those files.\n\n\n**Actual result with ~~screenshot~~ stacktrace**\n\n```\nTraceback (most recent call last):\n File \"/nix/store/z4xjb4j8i73894r2wqjvlnps9j60rjr0-visidata-2.11/lib/python3.10/site-packages/visidata/threads.py\", line 198, in _toplevelTryFunc\n t.status = func(*args, **kwargs)\n File \"/nix/store/z4xjb4j8i73894r2wqjvlnps9j60rjr0-visidata-2.11/lib/python3.10/site-packages/visidata/pyobj.py\", line 26, in reload\n for r in self.iterload():\n File \"/nix/store/z4xjb4j8i73894r2wqjvlnps9j60rjr0-visidata-2.11/lib/python3.10/site-packages/visidata/loaders/vds.py\", line 76, in iterload\n c = globals()[classname](d.pop('name'), sheet=self)\nKeyError: 'ExprColumn'\n```\n\n\n**Steps to reproduce with sample data and a .vd**\n\nCreate and save some test sheet with an expr column with this `cmdlog.vdj`:\n\n```\n#!vd -p\n{\"col\": \"\", \"row\": \"\", \"longname\": \"open-new\", \"input\": \"\", \"keystrokes\": \"Shift+A\", \"comment\": \"Open new empty sheet\"}\n{\"sheet\": \"unnamed\", \"col\": \"A\", \"row\": \"\", \"longname\": \"type-int\", \"input\": \"\", \"keystrokes\": \"#\", \"comment\": \"set type of current column to int\"}\n{\"sheet\": \"unnamed\", \"col\": \"\", \"row\": \"\", \"longname\": \"add-row\", \"input\": \"\", \"keystrokes\": \"a\", \"comment\": \"append a blank row\"}\n{\"sheet\": \"unnamed\", \"col\": \"A\", \"row\": \"0\", \"longname\": \"edit-cell\", \"input\": \"2\", \"keystrokes\": \"e\", \"comment\": \"edit contents of current cell\"}\n{\"sheet\": \"unnamed\", \"col\": \"A\", \"row\": \"\", \"longname\": \"addcol-expr\", \"input\": \"A*2\", \"keystrokes\": \"=\", \"comment\": \"create new column from Python expression, with column names as variables\"}\n{\"sheet\": \"unnamed\", \"col\": \"\", \"row\": \"\", \"longname\": \"save-sheet\", \"input\": \"sheet.vds\", \"keystrokes\": \"Ctrl+S\", \"comment\": \"save current sheet to filename in format determined by extension (default .tsv)\"}\n```\n\nThis produces `sheet.vds` as follows, which seems valid:\n\n```\n#{\"name\": \"unnamed\"}\n#{\"name\": \"A\", \"width\": 4, \"height\": 1, \"expr\": null, \"keycol\": 0, \"formatter\": \"\", \"fmtstr\": \"\", \"voffset\": 0, \"hoffset\": 0, \"aggstr\": \"\", \"type\": \"int\", \"col\": \"Column\"}\n#{\"name\": \"A*2\", \"width\": 5, \"height\": 1, \"expr\": \"A*2\", \"keycol\": 0, \"formatter\": \"\", \"fmtstr\": \"\", \"voffset\": 0, \"hoffset\": 0, \"aggstr\": \"\", \"type\": \"\", \"col\": \"ExprColumn\"}\n{\"A\": 2, \"A*2\": 4}\n```\n\nQuit visidata and open that file again with `vd sheet.vds`,\nand observe the loading error.\n\n\n**Additional context**\n\n- visidata v2.11\n- python 3.10.12\n\n", "before_files": [{"content": "'Custom VisiData save format'\n\nimport json\n\nfrom visidata import VisiData, JsonSheet, Progress, IndexSheet, SettableColumn, ItemColumn\n\n\nNL='\\n'\n\[email protected]\ndef open_vds(vd, p):\n return VdsIndexSheet(p.name, source=p)\n\n\[email protected]\ndef save_vds(vd, p, *sheets):\n 'Save in custom VisiData format, preserving columns and their attributes.'\n\n with p.open(mode='w', encoding='utf-8') as fp:\n for vs in sheets:\n # class and attrs for vs\n d = { 'name': vs.name, }\n fp.write('#'+json.dumps(d)+NL)\n\n # class and attrs for each column in vs\n for col in vs.visibleCols:\n d = col.__getstate__()\n if isinstance(col, SettableColumn):\n d['col'] = 'Column'\n else:\n d['col'] = type(col).__name__\n fp.write('#'+json.dumps(d)+NL)\n\n with Progress(gerund='saving'):\n for row in vs.iterdispvals(*vs.visibleCols, format=False):\n d = {col.name:val for col, val in row.items()}\n fp.write(json.dumps(d, default=str)+NL)\n\n\nclass VdsIndexSheet(IndexSheet):\n def iterload(self):\n vs = None\n with self.source.open(encoding='utf-8') as fp:\n line = fp.readline()\n while line:\n if line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n vs = VdsSheet(d.pop('name'), columns=[], source=self.source, source_fpos=fp.tell())\n yield vs\n line = fp.readline()\n\n\nclass VdsSheet(JsonSheet):\n def newRow(self):\n return {} # rowdef: dict\n\n def iterload(self):\n self.colnames = {}\n self.columns = []\n\n with self.source.open(encoding='utf-8') as fp:\n fp.seek(self.source_fpos)\n\n # consume all metadata, create columns\n line = fp.readline()\n while line and line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n raise Exception(d)\n classname = d.pop('col')\n if classname == 'Column':\n classname = 'ItemColumn'\n d['expr'] = d['name']\n\n c = globals()[classname](d.pop('name'), sheet=self)\n self.addColumn(c)\n self.colnames[c.name] = c\n for k, v in d.items():\n setattr(c, k, v)\n\n line = fp.readline()\n\n while line and not line.startswith('#{'):\n d = json.loads(line)\n yield d\n line = fp.readline()\n", "path": "visidata/loaders/vds.py"}], "after_files": [{"content": "'Custom VisiData save format'\n\nimport json\n\nfrom visidata import VisiData, JsonSheet, Progress, IndexSheet, SettableColumn, ItemColumn, ExprColumn\n\n\nNL='\\n'\n\[email protected]\ndef open_vds(vd, p):\n return VdsIndexSheet(p.name, source=p)\n\n\[email protected]\ndef save_vds(vd, p, *sheets):\n 'Save in custom VisiData format, preserving columns and their attributes.'\n\n with p.open(mode='w', encoding='utf-8') as fp:\n for vs in sheets:\n # class and attrs for vs\n d = { 'name': vs.name, }\n fp.write('#'+json.dumps(d)+NL)\n\n # class and attrs for each column in vs\n for col in vs.visibleCols:\n d = col.__getstate__()\n if isinstance(col, SettableColumn):\n d['col'] = 'Column'\n else:\n d['col'] = type(col).__name__\n fp.write('#'+json.dumps(d)+NL)\n\n with Progress(gerund='saving'):\n for row in vs.iterdispvals(*vs.visibleCols, format=False):\n d = {col.name:val for col, val in row.items()}\n fp.write(json.dumps(d, default=str)+NL)\n\n\nclass VdsIndexSheet(IndexSheet):\n def iterload(self):\n vs = None\n with self.source.open(encoding='utf-8') as fp:\n line = fp.readline()\n while line:\n if line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n vs = VdsSheet(d.pop('name'), columns=[], source=self.source, source_fpos=fp.tell())\n yield vs\n line = fp.readline()\n\n\nclass VdsSheet(JsonSheet):\n def newRow(self):\n return {} # rowdef: dict\n\n def iterload(self):\n self.colnames = {}\n self.columns = []\n\n with self.source.open(encoding='utf-8') as fp:\n fp.seek(self.source_fpos)\n\n # consume all metadata, create columns\n line = fp.readline()\n while line and line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n raise Exception(d)\n classname = d.pop('col')\n if classname == 'Column':\n classname = 'ItemColumn'\n d['expr'] = d['name']\n\n c = globals()[classname](d.pop('name'), sheet=self)\n self.addColumn(c)\n self.colnames[c.name] = c\n for k, v in d.items():\n setattr(c, k, v)\n\n line = fp.readline()\n\n while line and not line.startswith('#{'):\n d = json.loads(line)\n yield d\n line = fp.readline()\n", "path": "visidata/loaders/vds.py"}]} | 1,949 | 113 |
gh_patches_debug_7921 | rasdani/github-patches | git_diff | OpenMined__PySyft-5330 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check and Fix notebook / jupyter client warning on Colab
## Description
This needs to be checked on Colab, since it seems colab has its own outdated versions of notebook and jupyter-client as well.
https://github.com/OpenMined/PySyft/issues/4915
## Definition of Done
Fix for Colab if possible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/syft/core/common/environment.py`
Content:
```
1 # stdlib
2 import sys
3
4 # checks if we are in a python repl or python -i script.py
5 is_interactive = bool(getattr(sys, "ps1", sys.flags.interactive))
6
7 # checks if we are in jupyter
8 is_jupyter = True
9
10 try:
11 get_ipython() # type: ignore
12 # third party
13 import notebook
14 from packaging import version
15
16 NOTEBOOK_VERSION = version.parse(notebook.__version__.split("+")[0])
17 if NOTEBOOK_VERSION < version.parse("6.0.0"):
18 raise Exception(
19 "Your Jupyter Notebook is too old. Please upgrade to version 6 or higher."
20 )
21 except NameError:
22 is_jupyter = False
23
24 __all__ = ["is_interactive", "is_jupyter"]
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/syft/core/common/environment.py b/src/syft/core/common/environment.py
--- a/src/syft/core/common/environment.py
+++ b/src/syft/core/common/environment.py
@@ -14,7 +14,8 @@
from packaging import version
NOTEBOOK_VERSION = version.parse(notebook.__version__.split("+")[0])
- if NOTEBOOK_VERSION < version.parse("6.0.0"):
+ if NOTEBOOK_VERSION < version.parse("6.0.0") and "google.colab" not in sys.modules:
+ # google.colab check to fix issue #5315
raise Exception(
"Your Jupyter Notebook is too old. Please upgrade to version 6 or higher."
)
| {"golden_diff": "diff --git a/src/syft/core/common/environment.py b/src/syft/core/common/environment.py\n--- a/src/syft/core/common/environment.py\n+++ b/src/syft/core/common/environment.py\n@@ -14,7 +14,8 @@\n from packaging import version\n \n NOTEBOOK_VERSION = version.parse(notebook.__version__.split(\"+\")[0])\n- if NOTEBOOK_VERSION < version.parse(\"6.0.0\"):\n+ if NOTEBOOK_VERSION < version.parse(\"6.0.0\") and \"google.colab\" not in sys.modules:\n+ # google.colab check to fix issue #5315\n raise Exception(\n \"Your Jupyter Notebook is too old. Please upgrade to version 6 or higher.\"\n )\n", "issue": "Check and Fix notebook / jupyter client warning on Colab\n## Description\r\nThis needs to be checked on Colab, since it seems colab has its own outdated versions of notebook and jupyter-client as well.\r\nhttps://github.com/OpenMined/PySyft/issues/4915\r\n\r\n## Definition of Done\r\nFix for Colab if possible.\n", "before_files": [{"content": "# stdlib\nimport sys\n\n# checks if we are in a python repl or python -i script.py\nis_interactive = bool(getattr(sys, \"ps1\", sys.flags.interactive))\n\n# checks if we are in jupyter\nis_jupyter = True\n\ntry:\n get_ipython() # type: ignore\n # third party\n import notebook\n from packaging import version\n\n NOTEBOOK_VERSION = version.parse(notebook.__version__.split(\"+\")[0])\n if NOTEBOOK_VERSION < version.parse(\"6.0.0\"):\n raise Exception(\n \"Your Jupyter Notebook is too old. Please upgrade to version 6 or higher.\"\n )\nexcept NameError:\n is_jupyter = False\n\n__all__ = [\"is_interactive\", \"is_jupyter\"]\n", "path": "src/syft/core/common/environment.py"}], "after_files": [{"content": "# stdlib\nimport sys\n\n# checks if we are in a python repl or python -i script.py\nis_interactive = bool(getattr(sys, \"ps1\", sys.flags.interactive))\n\n# checks if we are in jupyter\nis_jupyter = True\n\ntry:\n get_ipython() # type: ignore\n # third party\n import notebook\n from packaging import version\n\n NOTEBOOK_VERSION = version.parse(notebook.__version__.split(\"+\")[0])\n if NOTEBOOK_VERSION < version.parse(\"6.0.0\") and \"google.colab\" not in sys.modules:\n # google.colab check to fix issue #5315\n raise Exception(\n \"Your Jupyter Notebook is too old. Please upgrade to version 6 or higher.\"\n )\nexcept NameError:\n is_jupyter = False\n\n__all__ = [\"is_interactive\", \"is_jupyter\"]\n", "path": "src/syft/core/common/environment.py"}]} | 548 | 163 |
gh_patches_debug_42569 | rasdani/github-patches | git_diff | NVIDIA-Merlin__NVTabular-1262 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEA] Create a padding option in the ListSlice Op
The right padding option is required
- to create fixed length lists generated from ETL workflow (see ([example nb](https://github.com/NVIDIA-Merlin/Transformers4Rec/blob/main/examples/getting-started-session-based/01-ETL-with-NVTabular.ipynb)).
- to be able to serve a seq-to-seq model (ex. session-based model) to TIS with proper reshaped tensors and get the proper reshaped outputs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvtabular/ops/list_slice.py`
Content:
```
1 #
2 # Copyright (c) 2021, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 import numba.cuda
17 import numpy as np
18
19 try:
20 import cupy as cp
21 except ImportError:
22 cp = None
23
24 from nvtabular.dispatch import DataFrameType, _build_cudf_list_column, _is_cpu_object, annotate
25
26 from ..tags import Tags
27 from .operator import ColumnSelector, Operator
28
29
30 class ListSlice(Operator):
31 """Slices a list column
32
33 This operator provides the ability to slice list column by row. For example, to truncate a
34 list column to only include the first 10 elements per row::
35
36 truncated = column_names >> ops.ListSlice(10)
37
38 Take the first 10 items, ignoring the first element::
39
40 truncated = column_names >> ops.ListSlice(1, 11)
41
42 Take the last 10 items from each row::
43
44 truncated = column_names >> ops.ListSlice(-10)
45 """
46
47 def __init__(self, start, end=None):
48 super().__init__()
49 self.start = start
50 self.end = end
51
52 if self.start > 0 and self.end is None:
53 self.end = self.start
54 self.start = 0
55
56 if self.end is None:
57 self.end = np.iinfo(np.int64).max
58
59 @annotate("ListSlice_op", color="darkgreen", domain="nvt_python")
60 def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
61 on_cpu = _is_cpu_object(df)
62 ret = type(df)()
63 for col in col_selector.names:
64 # handle CPU via normal python slicing (not very efficient)
65 if on_cpu:
66 ret[col] = [row[self.start : self.end] for row in df[col]]
67 else:
68 # figure out the size of each row from the list offsets
69 c = df[col]._column
70 offsets = c.offsets.values
71 elements = c.elements.values
72
73 # figure out the size of each row after slicing start/end
74 new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)
75 threads = 32
76 blocks = (offsets.size + threads - 1) // threads
77
78 # calculate new row offsets after slicing
79 _calculate_row_sizes[blocks, threads](self.start, self.end, offsets, new_offsets)
80 new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)
81
82 # create a new array for the sliced elements
83 new_elements = cp.zeros(new_offsets[-1].item(), dtype=elements.dtype)
84 if new_elements.size:
85 _slice_rows[blocks, threads](
86 self.start, offsets, elements, new_offsets, new_elements
87 )
88
89 # build up a list column with the sliced values
90 ret[col] = _build_cudf_list_column(new_elements, new_offsets)
91
92 return ret
93
94 def output_tags(self):
95 return [Tags.LIST]
96
97 transform.__doc__ = Operator.transform.__doc__
98
99
100 @numba.cuda.jit
101 def _calculate_row_sizes(start, end, offsets, row_sizes):
102 """given a slice (start/end) and existing offsets indicating row lengths, this
103 calculates the size for each new row after slicing"""
104 rowid = numba.cuda.grid(1)
105 if rowid < offsets.size - 1:
106 original_row_size = offsets[rowid + 1] - offsets[rowid]
107
108 # handle negative slicing appropriately
109 if start < 0:
110 start = original_row_size + start
111 if end < 0:
112 end = original_row_size + end
113
114 # clamp start/end to be in (0, original_row_size)
115 start = min(max(0, start), original_row_size)
116 end = min(max(0, end), original_row_size)
117
118 row_sizes[rowid + 1] = end - start
119
120
121 @numba.cuda.jit
122 def _slice_rows(start, offsets, elements, new_offsets, new_elements):
123 """slices rows of a list column. requires the 'new_offsets' to
124 be previously calculated (meaning that we don't need the 'end' slice index
125 since that's baked into the new_offsets"""
126 rowid = numba.cuda.grid(1)
127 if rowid < (new_offsets.size - 1):
128 if start >= 0:
129 offset = offsets[rowid] + start
130 else:
131 offset = offsets[rowid + 1] + start
132 if offset < offsets[rowid]:
133 offset = offsets[rowid]
134
135 for new_offset in range(new_offsets[rowid], new_offsets[rowid + 1]):
136 new_elements[new_offset] = elements[offset]
137 offset += 1
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvtabular/ops/list_slice.py b/nvtabular/ops/list_slice.py
--- a/nvtabular/ops/list_slice.py
+++ b/nvtabular/ops/list_slice.py
@@ -44,10 +44,12 @@
truncated = column_names >> ops.ListSlice(-10)
"""
- def __init__(self, start, end=None):
+ def __init__(self, start, end=None, pad=False, pad_value=0.0):
super().__init__()
self.start = start
self.end = end
+ self.pad = pad
+ self.pad_value = pad_value
if self.start > 0 and self.end is None:
self.end = self.start
@@ -60,27 +62,47 @@
def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
on_cpu = _is_cpu_object(df)
ret = type(df)()
+
+ max_elements = self.end - self.start
+
for col in col_selector.names:
# handle CPU via normal python slicing (not very efficient)
if on_cpu:
- ret[col] = [row[self.start : self.end] for row in df[col]]
+ values = [row[self.start : self.end] for row in df[col]]
+
+ # pad out to so each row has max_elements if askeed
+ if self.pad:
+ for v in values:
+ if len(v) < max_elements:
+ v.extend([self.pad_value] * (max_elements - len(v)))
+
+ ret[col] = values
else:
# figure out the size of each row from the list offsets
c = df[col]._column
offsets = c.offsets.values
elements = c.elements.values
- # figure out the size of each row after slicing start/end
- new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)
threads = 32
blocks = (offsets.size + threads - 1) // threads
- # calculate new row offsets after slicing
- _calculate_row_sizes[blocks, threads](self.start, self.end, offsets, new_offsets)
- new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)
+ if self.pad:
+ new_offsets = cp.arange(offsets.size, dtype=offsets.dtype) * max_elements
+
+ else:
+ # figure out the size of each row after slicing start/end
+ new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)
+
+ # calculate new row offsets after slicing
+ _calculate_row_sizes[blocks, threads](
+ self.start, self.end, offsets, new_offsets
+ )
+ new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)
# create a new array for the sliced elements
- new_elements = cp.zeros(new_offsets[-1].item(), dtype=elements.dtype)
+ new_elements = cp.full(
+ new_offsets[-1].item(), fill_value=self.pad_value, dtype=elements.dtype
+ )
if new_elements.size:
_slice_rows[blocks, threads](
self.start, offsets, elements, new_offsets, new_elements
@@ -132,6 +154,15 @@
if offset < offsets[rowid]:
offset = offsets[rowid]
- for new_offset in range(new_offsets[rowid], new_offsets[rowid + 1]):
+ new_start = new_offsets[rowid]
+ new_end = new_offsets[rowid + 1]
+
+ # if we are padding (more new offsets than old olffsets) - don't keep on iterating past
+ # the end
+ offset_delta = (new_end - new_start) - (offsets[rowid + 1] - offset)
+ if offset_delta > 0:
+ new_end -= offset_delta
+
+ for new_offset in range(new_start, new_end):
new_elements[new_offset] = elements[offset]
offset += 1
| {"golden_diff": "diff --git a/nvtabular/ops/list_slice.py b/nvtabular/ops/list_slice.py\n--- a/nvtabular/ops/list_slice.py\n+++ b/nvtabular/ops/list_slice.py\n@@ -44,10 +44,12 @@\n truncated = column_names >> ops.ListSlice(-10)\n \"\"\"\n \n- def __init__(self, start, end=None):\n+ def __init__(self, start, end=None, pad=False, pad_value=0.0):\n super().__init__()\n self.start = start\n self.end = end\n+ self.pad = pad\n+ self.pad_value = pad_value\n \n if self.start > 0 and self.end is None:\n self.end = self.start\n@@ -60,27 +62,47 @@\n def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:\n on_cpu = _is_cpu_object(df)\n ret = type(df)()\n+\n+ max_elements = self.end - self.start\n+\n for col in col_selector.names:\n # handle CPU via normal python slicing (not very efficient)\n if on_cpu:\n- ret[col] = [row[self.start : self.end] for row in df[col]]\n+ values = [row[self.start : self.end] for row in df[col]]\n+\n+ # pad out to so each row has max_elements if askeed\n+ if self.pad:\n+ for v in values:\n+ if len(v) < max_elements:\n+ v.extend([self.pad_value] * (max_elements - len(v)))\n+\n+ ret[col] = values\n else:\n # figure out the size of each row from the list offsets\n c = df[col]._column\n offsets = c.offsets.values\n elements = c.elements.values\n \n- # figure out the size of each row after slicing start/end\n- new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)\n threads = 32\n blocks = (offsets.size + threads - 1) // threads\n \n- # calculate new row offsets after slicing\n- _calculate_row_sizes[blocks, threads](self.start, self.end, offsets, new_offsets)\n- new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)\n+ if self.pad:\n+ new_offsets = cp.arange(offsets.size, dtype=offsets.dtype) * max_elements\n+\n+ else:\n+ # figure out the size of each row after slicing start/end\n+ new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)\n+\n+ # calculate new row offsets after slicing\n+ _calculate_row_sizes[blocks, threads](\n+ self.start, self.end, offsets, new_offsets\n+ )\n+ new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)\n \n # create a new array for the sliced elements\n- new_elements = cp.zeros(new_offsets[-1].item(), dtype=elements.dtype)\n+ new_elements = cp.full(\n+ new_offsets[-1].item(), fill_value=self.pad_value, dtype=elements.dtype\n+ )\n if new_elements.size:\n _slice_rows[blocks, threads](\n self.start, offsets, elements, new_offsets, new_elements\n@@ -132,6 +154,15 @@\n if offset < offsets[rowid]:\n offset = offsets[rowid]\n \n- for new_offset in range(new_offsets[rowid], new_offsets[rowid + 1]):\n+ new_start = new_offsets[rowid]\n+ new_end = new_offsets[rowid + 1]\n+\n+ # if we are padding (more new offsets than old olffsets) - don't keep on iterating past\n+ # the end\n+ offset_delta = (new_end - new_start) - (offsets[rowid + 1] - offset)\n+ if offset_delta > 0:\n+ new_end -= offset_delta\n+\n+ for new_offset in range(new_start, new_end):\n new_elements[new_offset] = elements[offset]\n offset += 1\n", "issue": "[FEA] Create a padding option in the ListSlice Op\nThe right padding option is required\r\n- to create fixed length lists generated from ETL workflow (see ([example nb](https://github.com/NVIDIA-Merlin/Transformers4Rec/blob/main/examples/getting-started-session-based/01-ETL-with-NVTabular.ipynb)). \r\n- to be able to serve a seq-to-seq model (ex. session-based model) to TIS with proper reshaped tensors and get the proper reshaped outputs.\r\n\r\n\n", "before_files": [{"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport numba.cuda\nimport numpy as np\n\ntry:\n import cupy as cp\nexcept ImportError:\n cp = None\n\nfrom nvtabular.dispatch import DataFrameType, _build_cudf_list_column, _is_cpu_object, annotate\n\nfrom ..tags import Tags\nfrom .operator import ColumnSelector, Operator\n\n\nclass ListSlice(Operator):\n \"\"\"Slices a list column\n\n This operator provides the ability to slice list column by row. For example, to truncate a\n list column to only include the first 10 elements per row::\n\n truncated = column_names >> ops.ListSlice(10)\n\n Take the first 10 items, ignoring the first element::\n\n truncated = column_names >> ops.ListSlice(1, 11)\n\n Take the last 10 items from each row::\n\n truncated = column_names >> ops.ListSlice(-10)\n \"\"\"\n\n def __init__(self, start, end=None):\n super().__init__()\n self.start = start\n self.end = end\n\n if self.start > 0 and self.end is None:\n self.end = self.start\n self.start = 0\n\n if self.end is None:\n self.end = np.iinfo(np.int64).max\n\n @annotate(\"ListSlice_op\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:\n on_cpu = _is_cpu_object(df)\n ret = type(df)()\n for col in col_selector.names:\n # handle CPU via normal python slicing (not very efficient)\n if on_cpu:\n ret[col] = [row[self.start : self.end] for row in df[col]]\n else:\n # figure out the size of each row from the list offsets\n c = df[col]._column\n offsets = c.offsets.values\n elements = c.elements.values\n\n # figure out the size of each row after slicing start/end\n new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)\n threads = 32\n blocks = (offsets.size + threads - 1) // threads\n\n # calculate new row offsets after slicing\n _calculate_row_sizes[blocks, threads](self.start, self.end, offsets, new_offsets)\n new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)\n\n # create a new array for the sliced elements\n new_elements = cp.zeros(new_offsets[-1].item(), dtype=elements.dtype)\n if new_elements.size:\n _slice_rows[blocks, threads](\n self.start, offsets, elements, new_offsets, new_elements\n )\n\n # build up a list column with the sliced values\n ret[col] = _build_cudf_list_column(new_elements, new_offsets)\n\n return ret\n\n def output_tags(self):\n return [Tags.LIST]\n\n transform.__doc__ = Operator.transform.__doc__\n\n\[email protected]\ndef _calculate_row_sizes(start, end, offsets, row_sizes):\n \"\"\"given a slice (start/end) and existing offsets indicating row lengths, this\n calculates the size for each new row after slicing\"\"\"\n rowid = numba.cuda.grid(1)\n if rowid < offsets.size - 1:\n original_row_size = offsets[rowid + 1] - offsets[rowid]\n\n # handle negative slicing appropriately\n if start < 0:\n start = original_row_size + start\n if end < 0:\n end = original_row_size + end\n\n # clamp start/end to be in (0, original_row_size)\n start = min(max(0, start), original_row_size)\n end = min(max(0, end), original_row_size)\n\n row_sizes[rowid + 1] = end - start\n\n\[email protected]\ndef _slice_rows(start, offsets, elements, new_offsets, new_elements):\n \"\"\"slices rows of a list column. requires the 'new_offsets' to\n be previously calculated (meaning that we don't need the 'end' slice index\n since that's baked into the new_offsets\"\"\"\n rowid = numba.cuda.grid(1)\n if rowid < (new_offsets.size - 1):\n if start >= 0:\n offset = offsets[rowid] + start\n else:\n offset = offsets[rowid + 1] + start\n if offset < offsets[rowid]:\n offset = offsets[rowid]\n\n for new_offset in range(new_offsets[rowid], new_offsets[rowid + 1]):\n new_elements[new_offset] = elements[offset]\n offset += 1\n", "path": "nvtabular/ops/list_slice.py"}], "after_files": [{"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport numba.cuda\nimport numpy as np\n\ntry:\n import cupy as cp\nexcept ImportError:\n cp = None\n\nfrom nvtabular.dispatch import DataFrameType, _build_cudf_list_column, _is_cpu_object, annotate\n\nfrom ..tags import Tags\nfrom .operator import ColumnSelector, Operator\n\n\nclass ListSlice(Operator):\n \"\"\"Slices a list column\n\n This operator provides the ability to slice list column by row. For example, to truncate a\n list column to only include the first 10 elements per row::\n\n truncated = column_names >> ops.ListSlice(10)\n\n Take the first 10 items, ignoring the first element::\n\n truncated = column_names >> ops.ListSlice(1, 11)\n\n Take the last 10 items from each row::\n\n truncated = column_names >> ops.ListSlice(-10)\n \"\"\"\n\n def __init__(self, start, end=None, pad=False, pad_value=0.0):\n super().__init__()\n self.start = start\n self.end = end\n self.pad = pad\n self.pad_value = pad_value\n\n if self.start > 0 and self.end is None:\n self.end = self.start\n self.start = 0\n\n if self.end is None:\n self.end = np.iinfo(np.int64).max\n\n @annotate(\"ListSlice_op\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:\n on_cpu = _is_cpu_object(df)\n ret = type(df)()\n\n max_elements = self.end - self.start\n\n for col in col_selector.names:\n # handle CPU via normal python slicing (not very efficient)\n if on_cpu:\n values = [row[self.start : self.end] for row in df[col]]\n\n # pad out to so each row has max_elements if askeed\n if self.pad:\n for v in values:\n if len(v) < max_elements:\n v.extend([self.pad_value] * (max_elements - len(v)))\n\n ret[col] = values\n else:\n # figure out the size of each row from the list offsets\n c = df[col]._column\n offsets = c.offsets.values\n elements = c.elements.values\n\n threads = 32\n blocks = (offsets.size + threads - 1) // threads\n\n if self.pad:\n new_offsets = cp.arange(offsets.size, dtype=offsets.dtype) * max_elements\n\n else:\n # figure out the size of each row after slicing start/end\n new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)\n\n # calculate new row offsets after slicing\n _calculate_row_sizes[blocks, threads](\n self.start, self.end, offsets, new_offsets\n )\n new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)\n\n # create a new array for the sliced elements\n new_elements = cp.full(\n new_offsets[-1].item(), fill_value=self.pad_value, dtype=elements.dtype\n )\n if new_elements.size:\n _slice_rows[blocks, threads](\n self.start, offsets, elements, new_offsets, new_elements\n )\n\n # build up a list column with the sliced values\n ret[col] = _build_cudf_list_column(new_elements, new_offsets)\n\n return ret\n\n def output_tags(self):\n return [Tags.LIST]\n\n transform.__doc__ = Operator.transform.__doc__\n\n\[email protected]\ndef _calculate_row_sizes(start, end, offsets, row_sizes):\n \"\"\"given a slice (start/end) and existing offsets indicating row lengths, this\n calculates the size for each new row after slicing\"\"\"\n rowid = numba.cuda.grid(1)\n if rowid < offsets.size - 1:\n original_row_size = offsets[rowid + 1] - offsets[rowid]\n\n # handle negative slicing appropriately\n if start < 0:\n start = original_row_size + start\n if end < 0:\n end = original_row_size + end\n\n # clamp start/end to be in (0, original_row_size)\n start = min(max(0, start), original_row_size)\n end = min(max(0, end), original_row_size)\n\n row_sizes[rowid + 1] = end - start\n\n\[email protected]\ndef _slice_rows(start, offsets, elements, new_offsets, new_elements):\n \"\"\"slices rows of a list column. requires the 'new_offsets' to\n be previously calculated (meaning that we don't need the 'end' slice index\n since that's baked into the new_offsets\"\"\"\n rowid = numba.cuda.grid(1)\n if rowid < (new_offsets.size - 1):\n if start >= 0:\n offset = offsets[rowid] + start\n else:\n offset = offsets[rowid + 1] + start\n if offset < offsets[rowid]:\n offset = offsets[rowid]\n\n new_start = new_offsets[rowid]\n new_end = new_offsets[rowid + 1]\n\n # if we are padding (more new offsets than old olffsets) - don't keep on iterating past\n # the end\n offset_delta = (new_end - new_start) - (offsets[rowid + 1] - offset)\n if offset_delta > 0:\n new_end -= offset_delta\n\n for new_offset in range(new_start, new_end):\n new_elements[new_offset] = elements[offset]\n offset += 1\n", "path": "nvtabular/ops/list_slice.py"}]} | 1,829 | 892 |
gh_patches_debug_2135 | rasdani/github-patches | git_diff | chainer__chainer-258 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Shape of output value of `concat` is list in GPU
`cuda.empty([1])` makes a `GPUArray` with `list` shape. It causes a type invalid error.
https://github.com/pfnet/chainer/blob/master/chainer/functions/concat.py#L69
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/concat.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7 _args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'
8 _preamble = '''
9 #define COPY(statement) \
10 int l = i / (rdim * cdimx); \
11 int c = i / rdim % cdimx + coffset; \
12 int r = i % rdim; \
13 int idx = r + rdim * (c + cdimy * l); \
14 statement;
15 '''
16
17
18 class Concat(function.Function):
19
20 """Concatenate multiple tensors towards specified axis."""
21
22 # concat along the channel dimension by default
23 def __init__(self, axis=1):
24 self.axis = axis
25
26 def check_type_forward(self, in_types):
27 type_check.expect(in_types.size() > 0)
28 type_check.expect(in_types[0].ndim >
29 type_check.Variable(self.axis, 'axis'))
30
31 ndim = in_types[0].ndim.eval()
32 for i in range(1, in_types.size().eval()):
33 type_check.expect(
34 in_types[0].dtype == in_types[i].dtype,
35 in_types[0].ndim == in_types[i].ndim,
36 )
37 for d in range(0, ndim):
38 if d == self.axis:
39 continue
40 type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
41
42 def check_type_backward(self, in_types, out_types):
43 type_check.expect(
44 in_types.size() > 0,
45 out_types.size() == 1,
46 )
47 y_type, = out_types
48
49 type_check.expect(y_type.dtype == in_types[0].dtype)
50 ndim = in_types[0].ndim.eval()
51 concat_size = sum(typ.shape[self.axis] for typ in in_types)
52 type_check.expect(concat_size == y_type.shape[self.axis])
53
54 for d in range(0, ndim):
55 if d == self.axis:
56 continue
57 type_check.expect(y_type.shape[d] == in_types[0].shape[d])
58
59 def forward_cpu(self, xs):
60 return numpy.concatenate(xs, axis=self.axis),
61
62 def forward_gpu(self, xs):
63 # TODO(beam2d): Unify the process into a single kernel.
64 shape = list(xs[0].shape)
65 for x in xs[1:]:
66 shape[self.axis] += x.shape[self.axis]
67 self.shape = shape
68
69 y = cuda.empty(shape, dtype=xs[0].dtype)
70 self.cdimy = y.shape[self.axis]
71 self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)
72
73 coffset = 0
74 kernel = cuda.elementwise(
75 _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)
76 for x in xs:
77 cdimx = x.shape[self.axis]
78 kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)
79 coffset += cdimx
80
81 return y,
82
83 def backward_cpu(self, xs, gy):
84 sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()
85 return numpy.split(gy[0], sizes, axis=self.axis)
86
87 def backward_gpu(self, xs, gy):
88 gxs = tuple(cuda.empty_like(x) for x in xs)
89
90 coffset = 0
91 kernel = cuda.elementwise(
92 _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)
93 for gx in gxs:
94 cdimx = gx.shape[self.axis]
95 kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)
96 coffset += cdimx
97
98 return gxs
99
100
101 def concat(xs, axis=1):
102 """Concatenates given variables along an axis.
103
104 Args:
105 xs (tuple of Variables): Variables to be concatenated.
106 axis (int): Axis that the input arrays are concatenated along.
107
108 Returns:
109 ~chainer.Variable: Output variable.
110
111 """
112 return Concat(axis=axis)(*xs)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/concat.py b/chainer/functions/concat.py
--- a/chainer/functions/concat.py
+++ b/chainer/functions/concat.py
@@ -64,6 +64,7 @@
shape = list(xs[0].shape)
for x in xs[1:]:
shape[self.axis] += x.shape[self.axis]
+ shape = tuple(shape)
self.shape = shape
y = cuda.empty(shape, dtype=xs[0].dtype)
| {"golden_diff": "diff --git a/chainer/functions/concat.py b/chainer/functions/concat.py\n--- a/chainer/functions/concat.py\n+++ b/chainer/functions/concat.py\n@@ -64,6 +64,7 @@\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n+ shape = tuple(shape)\n self.shape = shape\n \n y = cuda.empty(shape, dtype=xs[0].dtype)\n", "issue": "Shape of output value of `concat` is list in GPU\n`cuda.empty([1])` makes a `GPUArray` with `list` shape. It causes a type invalid error.\n\nhttps://github.com/pfnet/chainer/blob/master/chainer/functions/concat.py#L69\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n_args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimx); \\\n int c = i / rdim % cdimx + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimy * l); \\\n statement;\n'''\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() > 0,\n out_types.size() == 1,\n )\n y_type, = out_types\n\n type_check.expect(y_type.dtype == in_types[0].dtype)\n ndim = in_types[0].ndim.eval()\n concat_size = sum(typ.shape[self.axis] for typ in in_types)\n type_check.expect(concat_size == y_type.shape[self.axis])\n\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(y_type.shape[d] == in_types[0].shape[d])\n\n def forward_cpu(self, xs):\n return numpy.concatenate(xs, axis=self.axis),\n\n def forward_gpu(self, xs):\n # TODO(beam2d): Unify the process into a single kernel.\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n self.shape = shape\n\n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)\n for x in xs:\n cdimx = x.shape[self.axis]\n kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return y,\n\n def backward_cpu(self, xs, gy):\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return numpy.split(gy[0], sizes, axis=self.axis)\n\n def backward_gpu(self, xs, gy):\n gxs = tuple(cuda.empty_like(x) for x in xs)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)\n for gx in gxs:\n cdimx = gx.shape[self.axis]\n kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return gxs\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/concat.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n_args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimx); \\\n int c = i / rdim % cdimx + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimy * l); \\\n statement;\n'''\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() > 0,\n out_types.size() == 1,\n )\n y_type, = out_types\n\n type_check.expect(y_type.dtype == in_types[0].dtype)\n ndim = in_types[0].ndim.eval()\n concat_size = sum(typ.shape[self.axis] for typ in in_types)\n type_check.expect(concat_size == y_type.shape[self.axis])\n\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(y_type.shape[d] == in_types[0].shape[d])\n\n def forward_cpu(self, xs):\n return numpy.concatenate(xs, axis=self.axis),\n\n def forward_gpu(self, xs):\n # TODO(beam2d): Unify the process into a single kernel.\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n shape = tuple(shape)\n self.shape = shape\n\n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)\n for x in xs:\n cdimx = x.shape[self.axis]\n kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return y,\n\n def backward_cpu(self, xs, gy):\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return numpy.split(gy[0], sizes, axis=self.axis)\n\n def backward_gpu(self, xs, gy):\n gxs = tuple(cuda.empty_like(x) for x in xs)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)\n for gx in gxs:\n cdimx = gx.shape[self.axis]\n kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return gxs\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/concat.py"}]} | 1,491 | 105 |
gh_patches_debug_38697 | rasdani/github-patches | git_diff | facebookresearch__hydra-605 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] Allow Initialize to take a module
# 🚀 Feature Request
Allow hydra.experimental.initialize to take a calling module
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/experimental/compose.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from typing import List, Optional
3
4 from omegaconf import DictConfig, open_dict
5
6 from hydra._internal.hydra import Hydra
7 from hydra._internal.utils import detect_calling_file_or_module_from_stack_frame
8 from hydra.core.global_hydra import GlobalHydra
9
10
11 def initialize(
12 config_dir: Optional[str] = None,
13 strict: Optional[bool] = None,
14 caller_stack_depth: int = 1,
15 ) -> None:
16 """
17 :param config_dir: config directory relative to the calling script
18 :param strict:
19 :param caller_stack_depth:
20 :return:
21 """
22 calling_file, calling_module = detect_calling_file_or_module_from_stack_frame(
23 caller_stack_depth + 1
24 )
25 Hydra.create_main_hydra_file_or_module(
26 calling_file, calling_module, config_dir, strict
27 )
28
29
30 def compose(
31 config_name: Optional[str] = None,
32 overrides: List[str] = [],
33 strict: Optional[bool] = None,
34 ) -> DictConfig:
35 """
36 :param config_name: optional config name to load
37 :param overrides: list of overrides for config file
38 :param strict: optionally override the default strict mode
39 :return: the composed config
40 """
41 assert (
42 GlobalHydra().is_initialized()
43 ), "GlobalHydra is not initialized, use @hydra.main() or call hydra.experimental.initialize() first"
44
45 gh = GlobalHydra.instance()
46 assert gh.hydra is not None
47 cfg = gh.hydra.compose_config(
48 config_name=config_name, overrides=overrides, strict=strict
49 )
50 assert isinstance(cfg, DictConfig)
51
52 if "hydra" in cfg:
53 with open_dict(cfg):
54 del cfg["hydra"]
55 return cfg
56
```
Path: `examples/advanced/ad_hoc_composition/hydra_compose_example.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from hydra.experimental import compose, initialize
3
4 if __name__ == "__main__":
5 # initialize the Hydra subsystem.
6 # This is needed for apps that cannot have a standard @hydra.main() entry point
7 initialize(config_dir="conf", strict=True)
8
9 cfg = compose("config.yaml", overrides=["db=mysql", "db.user=${env:USER}"])
10 print(cfg.pretty(resolve=True))
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/advanced/ad_hoc_composition/hydra_compose_example.py b/examples/advanced/ad_hoc_composition/hydra_compose_example.py
--- a/examples/advanced/ad_hoc_composition/hydra_compose_example.py
+++ b/examples/advanced/ad_hoc_composition/hydra_compose_example.py
@@ -4,7 +4,7 @@
if __name__ == "__main__":
# initialize the Hydra subsystem.
# This is needed for apps that cannot have a standard @hydra.main() entry point
- initialize(config_dir="conf", strict=True)
+ initialize(config_path="conf", strict=True)
cfg = compose("config.yaml", overrides=["db=mysql", "db.user=${env:USER}"])
print(cfg.pretty(resolve=True))
diff --git a/hydra/experimental/compose.py b/hydra/experimental/compose.py
--- a/hydra/experimental/compose.py
+++ b/hydra/experimental/compose.py
@@ -9,31 +9,58 @@
def initialize(
- config_dir: Optional[str] = None,
+ config_path: Optional[str] = None,
strict: Optional[bool] = None,
caller_stack_depth: int = 1,
) -> None:
"""
- :param config_dir: config directory relative to the calling script
- :param strict:
- :param caller_stack_depth:
- :return:
+ Initialize automatically detect the calling file or module.
+ config_path is relative to the detected calling for or module.
+
+ :param config_path: A directory relative to the declaring python file or module
+ :param strict: (Deprecated), will be removed in the next major version
+ :param caller_stack_depth: stack depth of module the config_path is relative to
"""
calling_file, calling_module = detect_calling_file_or_module_from_stack_frame(
caller_stack_depth + 1
)
Hydra.create_main_hydra_file_or_module(
- calling_file, calling_module, config_dir, strict
+ calling_file, calling_module, config_path, strict
)
+def initialize_with_file(
+ calling_file: Optional[str], config_path: Optional[str] = None
+) -> None:
+ """
+ Initialize Hydra and add the config_path to the search path.
+ The config path is relative to the calling_file.
+ :param calling_file : The file to make the config_path relative to
+ :param config_path : The config path
+ """
+ Hydra.create_main_hydra_file_or_module(calling_file, None, config_path, None)
+
+
+def initialize_with_module(
+ calling_module: Optional[str], config_path: Optional[str] = None
+) -> None:
+ """
+ Initialize Hydra and add the config_path to the search path.
+ The config path is relative to the calling_module.
+ :param calling_module : The module to make the config_path relative to
+ :param config_path : The config path
+ """
+
+ Hydra.create_main_hydra_file_or_module(None, calling_module, config_path, None)
+
+
def compose(
config_name: Optional[str] = None,
overrides: List[str] = [],
strict: Optional[bool] = None,
) -> DictConfig:
"""
- :param config_name: optional config name to load
+ :param config_name: the name of the config (usually the file name without the .yaml extension)
:param overrides: list of overrides for config file
:param strict: optionally override the default strict mode
:return: the composed config
| {"golden_diff": "diff --git a/examples/advanced/ad_hoc_composition/hydra_compose_example.py b/examples/advanced/ad_hoc_composition/hydra_compose_example.py\n--- a/examples/advanced/ad_hoc_composition/hydra_compose_example.py\n+++ b/examples/advanced/ad_hoc_composition/hydra_compose_example.py\n@@ -4,7 +4,7 @@\n if __name__ == \"__main__\":\n # initialize the Hydra subsystem.\n # This is needed for apps that cannot have a standard @hydra.main() entry point\n- initialize(config_dir=\"conf\", strict=True)\n+ initialize(config_path=\"conf\", strict=True)\n \n cfg = compose(\"config.yaml\", overrides=[\"db=mysql\", \"db.user=${env:USER}\"])\n print(cfg.pretty(resolve=True))\ndiff --git a/hydra/experimental/compose.py b/hydra/experimental/compose.py\n--- a/hydra/experimental/compose.py\n+++ b/hydra/experimental/compose.py\n@@ -9,31 +9,58 @@\n \n \n def initialize(\n- config_dir: Optional[str] = None,\n+ config_path: Optional[str] = None,\n strict: Optional[bool] = None,\n caller_stack_depth: int = 1,\n ) -> None:\n \"\"\"\n- :param config_dir: config directory relative to the calling script\n- :param strict:\n- :param caller_stack_depth:\n- :return:\n+ Initialize automatically detect the calling file or module.\n+ config_path is relative to the detected calling for or module.\n+\n+ :param config_path: A directory relative to the declaring python file or module\n+ :param strict: (Deprecated), will be removed in the next major version\n+ :param caller_stack_depth: stack depth of module the config_path is relative to\n \"\"\"\n calling_file, calling_module = detect_calling_file_or_module_from_stack_frame(\n caller_stack_depth + 1\n )\n Hydra.create_main_hydra_file_or_module(\n- calling_file, calling_module, config_dir, strict\n+ calling_file, calling_module, config_path, strict\n )\n \n \n+def initialize_with_file(\n+ calling_file: Optional[str], config_path: Optional[str] = None\n+) -> None:\n+ \"\"\"\n+ Initialize Hydra and add the config_path to the search path.\n+ The config path is relative to the calling_file.\n+ :param calling_file : The file to make the config_path relative to\n+ :param config_path : The config path\n+ \"\"\"\n+ Hydra.create_main_hydra_file_or_module(calling_file, None, config_path, None)\n+\n+\n+def initialize_with_module(\n+ calling_module: Optional[str], config_path: Optional[str] = None\n+) -> None:\n+ \"\"\"\n+ Initialize Hydra and add the config_path to the search path.\n+ The config path is relative to the calling_module.\n+ :param calling_module : The module to make the config_path relative to\n+ :param config_path : The config path\n+ \"\"\"\n+\n+ Hydra.create_main_hydra_file_or_module(None, calling_module, config_path, None)\n+\n+\n def compose(\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n strict: Optional[bool] = None,\n ) -> DictConfig:\n \"\"\"\n- :param config_name: optional config name to load\n+ :param config_name: the name of the config (usually the file name without the .yaml extension)\n :param overrides: list of overrides for config file\n :param strict: optionally override the default strict mode\n :return: the composed config\n", "issue": "[Feature Request] Allow Initialize to take a module\n# \ud83d\ude80 Feature Request\r\n\r\nAllow hydra.experimental.initialize to take a calling module\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import List, Optional\n\nfrom omegaconf import DictConfig, open_dict\n\nfrom hydra._internal.hydra import Hydra\nfrom hydra._internal.utils import detect_calling_file_or_module_from_stack_frame\nfrom hydra.core.global_hydra import GlobalHydra\n\n\ndef initialize(\n config_dir: Optional[str] = None,\n strict: Optional[bool] = None,\n caller_stack_depth: int = 1,\n) -> None:\n \"\"\"\n :param config_dir: config directory relative to the calling script\n :param strict:\n :param caller_stack_depth:\n :return:\n \"\"\"\n calling_file, calling_module = detect_calling_file_or_module_from_stack_frame(\n caller_stack_depth + 1\n )\n Hydra.create_main_hydra_file_or_module(\n calling_file, calling_module, config_dir, strict\n )\n\n\ndef compose(\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n strict: Optional[bool] = None,\n) -> DictConfig:\n \"\"\"\n :param config_name: optional config name to load\n :param overrides: list of overrides for config file\n :param strict: optionally override the default strict mode\n :return: the composed config\n \"\"\"\n assert (\n GlobalHydra().is_initialized()\n ), \"GlobalHydra is not initialized, use @hydra.main() or call hydra.experimental.initialize() first\"\n\n gh = GlobalHydra.instance()\n assert gh.hydra is not None\n cfg = gh.hydra.compose_config(\n config_name=config_name, overrides=overrides, strict=strict\n )\n assert isinstance(cfg, DictConfig)\n\n if \"hydra\" in cfg:\n with open_dict(cfg):\n del cfg[\"hydra\"]\n return cfg\n", "path": "hydra/experimental/compose.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom hydra.experimental import compose, initialize\n\nif __name__ == \"__main__\":\n # initialize the Hydra subsystem.\n # This is needed for apps that cannot have a standard @hydra.main() entry point\n initialize(config_dir=\"conf\", strict=True)\n\n cfg = compose(\"config.yaml\", overrides=[\"db=mysql\", \"db.user=${env:USER}\"])\n print(cfg.pretty(resolve=True))\n", "path": "examples/advanced/ad_hoc_composition/hydra_compose_example.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import List, Optional\n\nfrom omegaconf import DictConfig, open_dict\n\nfrom hydra._internal.hydra import Hydra\nfrom hydra._internal.utils import detect_calling_file_or_module_from_stack_frame\nfrom hydra.core.global_hydra import GlobalHydra\n\n\ndef initialize(\n config_path: Optional[str] = None,\n strict: Optional[bool] = None,\n caller_stack_depth: int = 1,\n) -> None:\n \"\"\"\n Initialize automatically detect the calling file or module.\n config_path is relative to the detected calling for or module.\n\n :param config_path: A directory relative to the declaring python file or module\n :param strict: (Deprecated), will be removed in the next major version\n :param caller_stack_depth: stack depth of module the config_path is relative to\n \"\"\"\n calling_file, calling_module = detect_calling_file_or_module_from_stack_frame(\n caller_stack_depth + 1\n )\n Hydra.create_main_hydra_file_or_module(\n calling_file, calling_module, config_path, strict\n )\n\n\ndef initialize_with_file(\n calling_file: Optional[str], config_path: Optional[str] = None\n) -> None:\n \"\"\"\n Initialize Hydra and add the config_path to the search path.\n The config path is relative to the calling_file.\n :param calling_file : The file to make the config_path relative to\n :param config_path : The config path\n \"\"\"\n Hydra.create_main_hydra_file_or_module(calling_file, None, config_path, None)\n\n\ndef initialize_with_module(\n calling_module: Optional[str], config_path: Optional[str] = None\n) -> None:\n \"\"\"\n Initialize Hydra and add the config_path to the search path.\n The config path is relative to the calling_module.\n :param calling_module : The module to make the config_path relative to\n :param config_path : The config path\n \"\"\"\n\n Hydra.create_main_hydra_file_or_module(None, calling_module, config_path, None)\n\n\ndef compose(\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n strict: Optional[bool] = None,\n) -> DictConfig:\n \"\"\"\n :param config_name: the name of the config (usually the file name without the .yaml extension)\n :param overrides: list of overrides for config file\n :param strict: optionally override the default strict mode\n :return: the composed config\n \"\"\"\n assert (\n GlobalHydra().is_initialized()\n ), \"GlobalHydra is not initialized, use @hydra.main() or call hydra.experimental.initialize() first\"\n\n gh = GlobalHydra.instance()\n assert gh.hydra is not None\n cfg = gh.hydra.compose_config(\n config_name=config_name, overrides=overrides, strict=strict\n )\n assert isinstance(cfg, DictConfig)\n\n if \"hydra\" in cfg:\n with open_dict(cfg):\n del cfg[\"hydra\"]\n return cfg\n", "path": "hydra/experimental/compose.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom hydra.experimental import compose, initialize\n\nif __name__ == \"__main__\":\n # initialize the Hydra subsystem.\n # This is needed for apps that cannot have a standard @hydra.main() entry point\n initialize(config_path=\"conf\", strict=True)\n\n cfg = compose(\"config.yaml\", overrides=[\"db=mysql\", \"db.user=${env:USER}\"])\n print(cfg.pretty(resolve=True))\n", "path": "examples/advanced/ad_hoc_composition/hydra_compose_example.py"}]} | 935 | 789 |
gh_patches_debug_25543 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_265 false positive
**Describe the issue**
CKV_AWS_265 fails checks on KeySpaces not using a Customer Managed KMS key even though it is configured to do so
This is because the code is looking for ["CUSTOMER_MANAGED_KEY"](https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py#L17)
When the specification is listed as actually ["CUSTOMER_MANAGED_KMS_KEY"](https://docs.aws.amazon.com/keyspaces/latest/APIReference/API_EncryptionSpecification.html)
**Examples**
```
resource "aws_keyspaces_table" "keyspace_table" {
keyspace_name = "foo"
table_name = "bar"
encryption_specification {
## This should not fail the check CKV_AWS_265 but it does
kms_key_identifier = var.kms_key_arn
type = "CUSTOMER_MANAGED_KMS_KEY"
}
}
```
**Version (please complete the following information):**
- 3.0.32
**Additional context**
```
~/Downloads> checkov -f ./keyspaces.tf --support
2023-11-10 09:21:38,953 [MainThread ] [WARNI] --bc-api-key argument is required when using --support
[ terraform framework ]: 100%|████████████████████|[1/1], Current File Scanned=keyspaces.tf
[ secrets framework ]: 100%|████████████████████|[1/1], Current File Scanned=./keyspaces.tf
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By Prisma Cloud | version: 3.0.32
terraform scan results:
Passed checks: 0, Failed checks: 1, Skipped checks: 0
Check: CKV_AWS_265: "Ensure Keyspaces Table uses CMK"
FAILED for resource: aws_keyspaces_table.keyspace_table
File: /keyspaces.tf:1-9
Guide: https://docs.prismacloud.io/en/enterprise-edition/policy-reference/aws-policies/aws-general-policies/ensure-aws-keyspace-table-uses-customer-managed-keys-cmks
1 | resource "aws_keyspaces_table" "keyspace_table" {
2 | keyspace_name = "foo"
3 | table_name = "bar"
4 | encryption_specification {
5 | ## This should not fail the check CKV_AWS_265 but it does
6 | kms_key_identifier = var.kms_key_arn
7 | type = "CUSTOMER_MANAGED_KMS_KEY"
8 | }
9 | }
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class KeyspacesTableUsesCMK(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure Keyspaces Table uses CMK"
8 id = "CKV_AWS_265"
9 supported_resources = ['aws_keyspaces_table']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf) -> CheckResult:
14 if conf.get("encryption_specification") and isinstance(conf.get("encryption_specification"), list):
15 encrypt = conf.get("encryption_specification")[0]
16 if encrypt.get("kms_key_identifier") and isinstance(encrypt.get("kms_key_identifier"), list):
17 if encrypt.get("type") == ["CUSTOMER_MANAGED_KEY"]:
18 return CheckResult.PASSED
19 self.evaluated_keys = ["encryption_specification/[0]/type"]
20 self.evaluated_keys = ["encryption_specification/[0]/kms_key_identifier"]
21 self.evaluated_keys = ["encryption_specification"]
22 return CheckResult.FAILED
23
24
25 check = KeyspacesTableUsesCMK()
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py
--- a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py
+++ b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py
@@ -3,6 +3,10 @@
class KeyspacesTableUsesCMK(BaseResourceCheck):
+ """
+ Valid values for encryption_specification type:
+ AWS_OWNED_KMS_KEY (default), CUSTOMER_MANAGED_KMS_KEY (requires kms_key_identifier:ARN)
+ """
def __init__(self):
name = "Ensure Keyspaces Table uses CMK"
id = "CKV_AWS_265"
@@ -14,7 +18,7 @@
if conf.get("encryption_specification") and isinstance(conf.get("encryption_specification"), list):
encrypt = conf.get("encryption_specification")[0]
if encrypt.get("kms_key_identifier") and isinstance(encrypt.get("kms_key_identifier"), list):
- if encrypt.get("type") == ["CUSTOMER_MANAGED_KEY"]:
+ if encrypt.get("type") == ["CUSTOMER_MANAGED_KMS_KEY"]:
return CheckResult.PASSED
self.evaluated_keys = ["encryption_specification/[0]/type"]
self.evaluated_keys = ["encryption_specification/[0]/kms_key_identifier"]
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py\n--- a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py\n+++ b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py\n@@ -3,6 +3,10 @@\n \n \n class KeyspacesTableUsesCMK(BaseResourceCheck):\n+ \"\"\"\n+ Valid values for encryption_specification type:\n+ AWS_OWNED_KMS_KEY (default), CUSTOMER_MANAGED_KMS_KEY (requires kms_key_identifier:ARN)\n+ \"\"\"\n def __init__(self):\n name = \"Ensure Keyspaces Table uses CMK\"\n id = \"CKV_AWS_265\"\n@@ -14,7 +18,7 @@\n if conf.get(\"encryption_specification\") and isinstance(conf.get(\"encryption_specification\"), list):\n encrypt = conf.get(\"encryption_specification\")[0]\n if encrypt.get(\"kms_key_identifier\") and isinstance(encrypt.get(\"kms_key_identifier\"), list):\n- if encrypt.get(\"type\") == [\"CUSTOMER_MANAGED_KEY\"]:\n+ if encrypt.get(\"type\") == [\"CUSTOMER_MANAGED_KMS_KEY\"]:\n return CheckResult.PASSED\n self.evaluated_keys = [\"encryption_specification/[0]/type\"]\n self.evaluated_keys = [\"encryption_specification/[0]/kms_key_identifier\"]\n", "issue": "CKV_AWS_265 false positive\n**Describe the issue**\r\nCKV_AWS_265 fails checks on KeySpaces not using a Customer Managed KMS key even though it is configured to do so\r\n\r\nThis is because the code is looking for [\"CUSTOMER_MANAGED_KEY\"](https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py#L17)\r\n\r\nWhen the specification is listed as actually [\"CUSTOMER_MANAGED_KMS_KEY\"](https://docs.aws.amazon.com/keyspaces/latest/APIReference/API_EncryptionSpecification.html)\r\n\r\n**Examples**\r\n```\r\nresource \"aws_keyspaces_table\" \"keyspace_table\" {\r\n keyspace_name = \"foo\"\r\n table_name = \"bar\"\r\n encryption_specification {\r\n ## This should not fail the check CKV_AWS_265 but it does\r\n kms_key_identifier = var.kms_key_arn\r\n type = \"CUSTOMER_MANAGED_KMS_KEY\"\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - 3.0.32\r\n\r\n**Additional context**\r\n\r\n```\r\n~/Downloads> checkov -f ./keyspaces.tf --support\r\n2023-11-10 09:21:38,953 [MainThread ] [WARNI] --bc-api-key argument is required when using --support\r\n[ terraform framework ]: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588|[1/1], Current File Scanned=keyspaces.tf\r\n[ secrets framework ]: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588|[1/1], Current File Scanned=./keyspaces.tf\r\n\r\n _ _\r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V /\r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/\r\n\r\nBy Prisma Cloud | version: 3.0.32\r\n\r\nterraform scan results:\r\n\r\nPassed checks: 0, Failed checks: 1, Skipped checks: 0\r\n\r\nCheck: CKV_AWS_265: \"Ensure Keyspaces Table uses CMK\"\r\n\tFAILED for resource: aws_keyspaces_table.keyspace_table\r\n\tFile: /keyspaces.tf:1-9\r\n\tGuide: https://docs.prismacloud.io/en/enterprise-edition/policy-reference/aws-policies/aws-general-policies/ensure-aws-keyspace-table-uses-customer-managed-keys-cmks\r\n\r\n\t\t1 | resource \"aws_keyspaces_table\" \"keyspace_table\" {\r\n\t\t2 | keyspace_name = \"foo\"\r\n\t\t3 | table_name = \"bar\"\r\n\t\t4 | encryption_specification {\r\n\t\t5 | ## This should not fail the check CKV_AWS_265 but it does\r\n\t\t6 | kms_key_identifier = var.kms_key_arn\r\n\t\t7 | type = \"CUSTOMER_MANAGED_KMS_KEY\"\r\n\t\t8 | }\r\n\t\t9 | }\r\n```\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass KeyspacesTableUsesCMK(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure Keyspaces Table uses CMK\"\n id = \"CKV_AWS_265\"\n supported_resources = ['aws_keyspaces_table']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n if conf.get(\"encryption_specification\") and isinstance(conf.get(\"encryption_specification\"), list):\n encrypt = conf.get(\"encryption_specification\")[0]\n if encrypt.get(\"kms_key_identifier\") and isinstance(encrypt.get(\"kms_key_identifier\"), list):\n if encrypt.get(\"type\") == [\"CUSTOMER_MANAGED_KEY\"]:\n return CheckResult.PASSED\n self.evaluated_keys = [\"encryption_specification/[0]/type\"]\n self.evaluated_keys = [\"encryption_specification/[0]/kms_key_identifier\"]\n self.evaluated_keys = [\"encryption_specification\"]\n return CheckResult.FAILED\n\n\ncheck = KeyspacesTableUsesCMK()\n", "path": "checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass KeyspacesTableUsesCMK(BaseResourceCheck):\n \"\"\"\n Valid values for encryption_specification type:\n AWS_OWNED_KMS_KEY (default), CUSTOMER_MANAGED_KMS_KEY (requires kms_key_identifier:ARN)\n \"\"\"\n def __init__(self):\n name = \"Ensure Keyspaces Table uses CMK\"\n id = \"CKV_AWS_265\"\n supported_resources = ['aws_keyspaces_table']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n if conf.get(\"encryption_specification\") and isinstance(conf.get(\"encryption_specification\"), list):\n encrypt = conf.get(\"encryption_specification\")[0]\n if encrypt.get(\"kms_key_identifier\") and isinstance(encrypt.get(\"kms_key_identifier\"), list):\n if encrypt.get(\"type\") == [\"CUSTOMER_MANAGED_KMS_KEY\"]:\n return CheckResult.PASSED\n self.evaluated_keys = [\"encryption_specification/[0]/type\"]\n self.evaluated_keys = [\"encryption_specification/[0]/kms_key_identifier\"]\n self.evaluated_keys = [\"encryption_specification\"]\n return CheckResult.FAILED\n\n\ncheck = KeyspacesTableUsesCMK()\n", "path": "checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py"}]} | 1,267 | 316 |
gh_patches_debug_24317 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-3757 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tri des évènements touristiques dans l'API
Par défaut les évènement ne sont pas triés par date de départ, ce qui implique qu'ils apparaissent dans le désordre dans Geotrek-Rando.
Il faut modifier l'API pour changer cela.
https://github.com/GeotrekCE/Geotrek-admin/blob/master/geotrek/api/v2/views/tourism.py#LL103C30-L103C30
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/api/v2/views/tourism.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.gis.db.models.functions import Transform
3 from django.db.models import F, Case, When
4 from django.db.models.query import Prefetch
5 from django.shortcuts import get_object_or_404
6 from django.utils.translation import activate
7
8 from rest_framework.response import Response
9
10 from geotrek.api.v2 import serializers as api_serializers, \
11 filters as api_filters, viewsets as api_viewsets
12 from geotrek.api.v2.decorators import cache_response_detail
13 from geotrek.common.models import Attachment
14 from geotrek.tourism import models as tourism_models
15
16
17 class LabelAccessibilityViewSet(api_viewsets.GeotrekViewSet):
18 serializer_class = api_serializers.LabelAccessibilitySerializer
19 queryset = tourism_models.LabelAccessibility.objects.order_by('pk') # Required for reliable pagination
20
21
22 class TouristicContentCategoryViewSet(api_viewsets.GeotrekViewSet):
23 filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TouristicContentRelatedPortalFilter,)
24 serializer_class = api_serializers.TouristicContentCategorySerializer
25 queryset = tourism_models.TouristicContentCategory.objects \
26 .prefetch_related('types') \
27 .order_by('pk') # Required for reliable pagination
28
29 @cache_response_detail()
30 def retrieve(self, request, pk=None, format=None):
31 # Allow to retrieve objects even if not visible in list view
32 elem = get_object_or_404(tourism_models.TouristicContentCategory, pk=pk)
33 serializer = api_serializers.TouristicContentCategorySerializer(elem, many=False, context={'request': request})
34 return Response(serializer.data)
35
36
37 class TouristicContentViewSet(api_viewsets.GeotrekGeometricViewset):
38 filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (
39 api_filters.GeotrekTouristicContentFilter,
40 api_filters.NearbyContentFilter,
41 api_filters.UpdateOrCreateDateFilter
42 )
43 serializer_class = api_serializers.TouristicContentSerializer
44
45 def get_queryset(self):
46 activate(self.request.GET.get('language'))
47 return tourism_models.TouristicContent.objects.existing()\
48 .select_related('category', 'reservation_system', 'label_accessibility') \
49 .prefetch_related('source', 'themes', 'type1', 'type2',
50 Prefetch('attachments',
51 queryset=Attachment.objects.select_related('license', 'filetype__structure').order_by('starred', '-date_insert'))
52 ) \
53 .annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \
54 .order_by('name') # Required for reliable pagination
55
56
57 class InformationDeskTypeViewSet(api_viewsets.GeotrekViewSet):
58 serializer_class = api_serializers.InformationDeskTypeSerializer
59 queryset = tourism_models.InformationDeskType.objects.order_by('pk')
60
61
62 class InformationDeskViewSet(api_viewsets.GeotrekViewSet):
63 filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TreksAndSitesRelatedPortalFilter,
64 api_filters.GeotrekInformationDeskFilter)
65 serializer_class = api_serializers.InformationDeskSerializer
66
67 def get_queryset(self):
68 activate(self.request.GET.get('language'))
69 return tourism_models.InformationDesk.objects.select_related('label_accessibility', 'type').order_by('name')
70
71 @cache_response_detail()
72 def retrieve(self, request, pk=None, format=None):
73 # Allow to retrieve objects even if not visible in list view
74 elem = get_object_or_404(tourism_models.InformationDesk, pk=pk)
75 serializer = api_serializers.InformationDeskSerializer(elem, many=False, context={'request': request})
76 return Response(serializer.data)
77
78
79 class TouristicEventTypeViewSet(api_viewsets.GeotrekViewSet):
80 filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TouristicEventRelatedPortalFilter, )
81 serializer_class = api_serializers.TouristicEventTypeSerializer
82 queryset = tourism_models.TouristicEventType.objects.order_by('pk') # Required for reliable pagination
83
84
85 class TouristicEventViewSet(api_viewsets.GeotrekGeometricViewset):
86 filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (
87 api_filters.GeotrekTouristicEventFilter,
88 api_filters.NearbyContentFilter,
89 api_filters.UpdateOrCreateDateFilter,
90 )
91 filterset_class = api_filters.TouristicEventFilterSet
92 serializer_class = api_serializers.TouristicEventSerializer
93
94 def get_queryset(self):
95 activate(self.request.GET.get('language'))
96 return tourism_models.TouristicEvent.objects.existing()\
97 .select_related('type') \
98 .prefetch_related('themes', 'source', 'portal',
99 Prefetch('attachments',
100 queryset=Attachment.objects.select_related('license', 'filetype', 'filetype__structure'))
101 ) \
102 .annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \
103 .annotate(ordering_date=Case(
104 When(end_date__isnull=True, then=F('begin_date')),
105 default=F('end_date'),
106 )) \
107 .order_by('ordering_date') # Required for reliable pagination
108
109
110 class TouristicEventPlaceViewSet(api_viewsets.GeotrekGeometricViewset):
111 filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (
112 api_filters.UpdateOrCreateDateFilter,
113 api_filters.TouristicEventsRelatedPortalFilter
114 )
115 serializer_class = api_serializers.TouristicEventPlaceSerializer
116
117 def get_queryset(self):
118 return tourism_models.TouristicEventPlace.objects.prefetch_related('touristicevents').annotate(
119 geom_transformed=Transform('geom', settings.API_SRID)
120 ).order_by('name')
121
122
123 class TouristicEventOrganizerViewSet(api_viewsets.GeotrekGeometricViewset):
124 filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (
125 api_filters.UpdateOrCreateDateFilter,
126 api_filters.TouristicEventRelatedPortalFilter
127 )
128 serializer_class = api_serializers.TouristicEventOrganizerSerializer
129 queryset = tourism_models.TouristicEventOrganizer.objects.order_by('label')
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/api/v2/views/tourism.py b/geotrek/api/v2/views/tourism.py
--- a/geotrek/api/v2/views/tourism.py
+++ b/geotrek/api/v2/views/tourism.py
@@ -1,6 +1,6 @@
from django.conf import settings
from django.contrib.gis.db.models.functions import Transform
-from django.db.models import F, Case, When
+from django.db.models import F
from django.db.models.query import Prefetch
from django.shortcuts import get_object_or_404
from django.utils.translation import activate
@@ -100,11 +100,7 @@
queryset=Attachment.objects.select_related('license', 'filetype', 'filetype__structure'))
) \
.annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \
- .annotate(ordering_date=Case(
- When(end_date__isnull=True, then=F('begin_date')),
- default=F('end_date'),
- )) \
- .order_by('ordering_date') # Required for reliable pagination
+ .order_by('begin_date') # Required for reliable pagination
class TouristicEventPlaceViewSet(api_viewsets.GeotrekGeometricViewset):
| {"golden_diff": "diff --git a/geotrek/api/v2/views/tourism.py b/geotrek/api/v2/views/tourism.py\n--- a/geotrek/api/v2/views/tourism.py\n+++ b/geotrek/api/v2/views/tourism.py\n@@ -1,6 +1,6 @@\n from django.conf import settings\n from django.contrib.gis.db.models.functions import Transform\n-from django.db.models import F, Case, When\n+from django.db.models import F\n from django.db.models.query import Prefetch\n from django.shortcuts import get_object_or_404\n from django.utils.translation import activate\n@@ -100,11 +100,7 @@\n queryset=Attachment.objects.select_related('license', 'filetype', 'filetype__structure'))\n ) \\\n .annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \\\n- .annotate(ordering_date=Case(\n- When(end_date__isnull=True, then=F('begin_date')),\n- default=F('end_date'),\n- )) \\\n- .order_by('ordering_date') # Required for reliable pagination\n+ .order_by('begin_date') # Required for reliable pagination\n \n \n class TouristicEventPlaceViewSet(api_viewsets.GeotrekGeometricViewset):\n", "issue": "Tri des \u00e9v\u00e8nements touristiques dans l'API\nPar d\u00e9faut les \u00e9v\u00e8nement ne sont pas tri\u00e9s par date de d\u00e9part, ce qui implique qu'ils apparaissent dans le d\u00e9sordre dans Geotrek-Rando.\r\n\r\nIl faut modifier l'API pour changer cela.\r\n\r\nhttps://github.com/GeotrekCE/Geotrek-admin/blob/master/geotrek/api/v2/views/tourism.py#LL103C30-L103C30\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.gis.db.models.functions import Transform\nfrom django.db.models import F, Case, When\nfrom django.db.models.query import Prefetch\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import activate\n\nfrom rest_framework.response import Response\n\nfrom geotrek.api.v2 import serializers as api_serializers, \\\n filters as api_filters, viewsets as api_viewsets\nfrom geotrek.api.v2.decorators import cache_response_detail\nfrom geotrek.common.models import Attachment\nfrom geotrek.tourism import models as tourism_models\n\n\nclass LabelAccessibilityViewSet(api_viewsets.GeotrekViewSet):\n serializer_class = api_serializers.LabelAccessibilitySerializer\n queryset = tourism_models.LabelAccessibility.objects.order_by('pk') # Required for reliable pagination\n\n\nclass TouristicContentCategoryViewSet(api_viewsets.GeotrekViewSet):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TouristicContentRelatedPortalFilter,)\n serializer_class = api_serializers.TouristicContentCategorySerializer\n queryset = tourism_models.TouristicContentCategory.objects \\\n .prefetch_related('types') \\\n .order_by('pk') # Required for reliable pagination\n\n @cache_response_detail()\n def retrieve(self, request, pk=None, format=None):\n # Allow to retrieve objects even if not visible in list view\n elem = get_object_or_404(tourism_models.TouristicContentCategory, pk=pk)\n serializer = api_serializers.TouristicContentCategorySerializer(elem, many=False, context={'request': request})\n return Response(serializer.data)\n\n\nclass TouristicContentViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (\n api_filters.GeotrekTouristicContentFilter,\n api_filters.NearbyContentFilter,\n api_filters.UpdateOrCreateDateFilter\n )\n serializer_class = api_serializers.TouristicContentSerializer\n\n def get_queryset(self):\n activate(self.request.GET.get('language'))\n return tourism_models.TouristicContent.objects.existing()\\\n .select_related('category', 'reservation_system', 'label_accessibility') \\\n .prefetch_related('source', 'themes', 'type1', 'type2',\n Prefetch('attachments',\n queryset=Attachment.objects.select_related('license', 'filetype__structure').order_by('starred', '-date_insert'))\n ) \\\n .annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \\\n .order_by('name') # Required for reliable pagination\n\n\nclass InformationDeskTypeViewSet(api_viewsets.GeotrekViewSet):\n serializer_class = api_serializers.InformationDeskTypeSerializer\n queryset = tourism_models.InformationDeskType.objects.order_by('pk')\n\n\nclass InformationDeskViewSet(api_viewsets.GeotrekViewSet):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TreksAndSitesRelatedPortalFilter,\n api_filters.GeotrekInformationDeskFilter)\n serializer_class = api_serializers.InformationDeskSerializer\n\n def get_queryset(self):\n activate(self.request.GET.get('language'))\n return tourism_models.InformationDesk.objects.select_related('label_accessibility', 'type').order_by('name')\n\n @cache_response_detail()\n def retrieve(self, request, pk=None, format=None):\n # Allow to retrieve objects even if not visible in list view\n elem = get_object_or_404(tourism_models.InformationDesk, pk=pk)\n serializer = api_serializers.InformationDeskSerializer(elem, many=False, context={'request': request})\n return Response(serializer.data)\n\n\nclass TouristicEventTypeViewSet(api_viewsets.GeotrekViewSet):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TouristicEventRelatedPortalFilter, )\n serializer_class = api_serializers.TouristicEventTypeSerializer\n queryset = tourism_models.TouristicEventType.objects.order_by('pk') # Required for reliable pagination\n\n\nclass TouristicEventViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (\n api_filters.GeotrekTouristicEventFilter,\n api_filters.NearbyContentFilter,\n api_filters.UpdateOrCreateDateFilter,\n )\n filterset_class = api_filters.TouristicEventFilterSet\n serializer_class = api_serializers.TouristicEventSerializer\n\n def get_queryset(self):\n activate(self.request.GET.get('language'))\n return tourism_models.TouristicEvent.objects.existing()\\\n .select_related('type') \\\n .prefetch_related('themes', 'source', 'portal',\n Prefetch('attachments',\n queryset=Attachment.objects.select_related('license', 'filetype', 'filetype__structure'))\n ) \\\n .annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \\\n .annotate(ordering_date=Case(\n When(end_date__isnull=True, then=F('begin_date')),\n default=F('end_date'),\n )) \\\n .order_by('ordering_date') # Required for reliable pagination\n\n\nclass TouristicEventPlaceViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (\n api_filters.UpdateOrCreateDateFilter,\n api_filters.TouristicEventsRelatedPortalFilter\n )\n serializer_class = api_serializers.TouristicEventPlaceSerializer\n\n def get_queryset(self):\n return tourism_models.TouristicEventPlace.objects.prefetch_related('touristicevents').annotate(\n geom_transformed=Transform('geom', settings.API_SRID)\n ).order_by('name')\n\n\nclass TouristicEventOrganizerViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (\n api_filters.UpdateOrCreateDateFilter,\n api_filters.TouristicEventRelatedPortalFilter\n )\n serializer_class = api_serializers.TouristicEventOrganizerSerializer\n queryset = tourism_models.TouristicEventOrganizer.objects.order_by('label')\n", "path": "geotrek/api/v2/views/tourism.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.gis.db.models.functions import Transform\nfrom django.db.models import F\nfrom django.db.models.query import Prefetch\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import activate\n\nfrom rest_framework.response import Response\n\nfrom geotrek.api.v2 import serializers as api_serializers, \\\n filters as api_filters, viewsets as api_viewsets\nfrom geotrek.api.v2.decorators import cache_response_detail\nfrom geotrek.common.models import Attachment\nfrom geotrek.tourism import models as tourism_models\n\n\nclass LabelAccessibilityViewSet(api_viewsets.GeotrekViewSet):\n serializer_class = api_serializers.LabelAccessibilitySerializer\n queryset = tourism_models.LabelAccessibility.objects.order_by('pk') # Required for reliable pagination\n\n\nclass TouristicContentCategoryViewSet(api_viewsets.GeotrekViewSet):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TouristicContentRelatedPortalFilter,)\n serializer_class = api_serializers.TouristicContentCategorySerializer\n queryset = tourism_models.TouristicContentCategory.objects \\\n .prefetch_related('types') \\\n .order_by('pk') # Required for reliable pagination\n\n @cache_response_detail()\n def retrieve(self, request, pk=None, format=None):\n # Allow to retrieve objects even if not visible in list view\n elem = get_object_or_404(tourism_models.TouristicContentCategory, pk=pk)\n serializer = api_serializers.TouristicContentCategorySerializer(elem, many=False, context={'request': request})\n return Response(serializer.data)\n\n\nclass TouristicContentViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (\n api_filters.GeotrekTouristicContentFilter,\n api_filters.NearbyContentFilter,\n api_filters.UpdateOrCreateDateFilter\n )\n serializer_class = api_serializers.TouristicContentSerializer\n\n def get_queryset(self):\n activate(self.request.GET.get('language'))\n return tourism_models.TouristicContent.objects.existing()\\\n .select_related('category', 'reservation_system', 'label_accessibility') \\\n .prefetch_related('source', 'themes', 'type1', 'type2',\n Prefetch('attachments',\n queryset=Attachment.objects.select_related('license', 'filetype__structure').order_by('starred', '-date_insert'))\n ) \\\n .annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \\\n .order_by('name') # Required for reliable pagination\n\n\nclass InformationDeskTypeViewSet(api_viewsets.GeotrekViewSet):\n serializer_class = api_serializers.InformationDeskTypeSerializer\n queryset = tourism_models.InformationDeskType.objects.order_by('pk')\n\n\nclass InformationDeskViewSet(api_viewsets.GeotrekViewSet):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TreksAndSitesRelatedPortalFilter,\n api_filters.GeotrekInformationDeskFilter)\n serializer_class = api_serializers.InformationDeskSerializer\n\n def get_queryset(self):\n activate(self.request.GET.get('language'))\n return tourism_models.InformationDesk.objects.select_related('label_accessibility', 'type').order_by('name')\n\n @cache_response_detail()\n def retrieve(self, request, pk=None, format=None):\n # Allow to retrieve objects even if not visible in list view\n elem = get_object_or_404(tourism_models.InformationDesk, pk=pk)\n serializer = api_serializers.InformationDeskSerializer(elem, many=False, context={'request': request})\n return Response(serializer.data)\n\n\nclass TouristicEventTypeViewSet(api_viewsets.GeotrekViewSet):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.TouristicEventRelatedPortalFilter, )\n serializer_class = api_serializers.TouristicEventTypeSerializer\n queryset = tourism_models.TouristicEventType.objects.order_by('pk') # Required for reliable pagination\n\n\nclass TouristicEventViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (\n api_filters.GeotrekTouristicEventFilter,\n api_filters.NearbyContentFilter,\n api_filters.UpdateOrCreateDateFilter,\n )\n filterset_class = api_filters.TouristicEventFilterSet\n serializer_class = api_serializers.TouristicEventSerializer\n\n def get_queryset(self):\n activate(self.request.GET.get('language'))\n return tourism_models.TouristicEvent.objects.existing()\\\n .select_related('type') \\\n .prefetch_related('themes', 'source', 'portal',\n Prefetch('attachments',\n queryset=Attachment.objects.select_related('license', 'filetype', 'filetype__structure'))\n ) \\\n .annotate(geom_transformed=Transform(F('geom'), settings.API_SRID)) \\\n .order_by('begin_date') # Required for reliable pagination\n\n\nclass TouristicEventPlaceViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (\n api_filters.UpdateOrCreateDateFilter,\n api_filters.TouristicEventsRelatedPortalFilter\n )\n serializer_class = api_serializers.TouristicEventPlaceSerializer\n\n def get_queryset(self):\n return tourism_models.TouristicEventPlace.objects.prefetch_related('touristicevents').annotate(\n geom_transformed=Transform('geom', settings.API_SRID)\n ).order_by('name')\n\n\nclass TouristicEventOrganizerViewSet(api_viewsets.GeotrekGeometricViewset):\n filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (\n api_filters.UpdateOrCreateDateFilter,\n api_filters.TouristicEventRelatedPortalFilter\n )\n serializer_class = api_serializers.TouristicEventOrganizerSerializer\n queryset = tourism_models.TouristicEventOrganizer.objects.order_by('label')\n", "path": "geotrek/api/v2/views/tourism.py"}]} | 1,972 | 275 |
gh_patches_debug_253 | rasdani/github-patches | git_diff | kserve__kserve-2726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Knative installation keeps failing in e2e tests
/kind bug
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
The e2e tests are failing every now and then while running the knative installation step, more specifically while patching the configmap. A solution has to be provided so that the installation completes successfully using some kind of retry mechanism.
**What did you expect to happen:**
All e2e tests to run without any issues.
**Environment:**
e2e environment
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/setup.py`
Content:
```
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import pathlib
15
16 import setuptools
17
18 TESTS_REQUIRES = [
19 'pytest',
20 'pytest-xdist',
21 'pytest-cov',
22 'pytest-asyncio',
23 'mypy',
24 'portforward',
25 ]
26
27 with open('requirements.txt') as f:
28 REQUIRES = f.readlines()
29
30 with open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:
31 version = version_file.read().strip()
32
33 setuptools.setup(
34 name='kserve',
35 version=version,
36 author="The KServe Authors",
37 author_email='[email protected], [email protected], [email protected]',
38 license="Apache License Version 2.0",
39 url="https://github.com/kserve/kserve/tree/master/python/kserve",
40 description="KServe Python SDK",
41 long_description="Python SDK for KServe Server and Client.",
42 python_requires='>=3.7',
43 packages=[
44 'kserve',
45 'kserve.api',
46 'kserve.constants',
47 'kserve.models',
48 'kserve.utils',
49 'kserve.protocol',
50 'kserve.protocol.rest',
51 'kserve.protocol.grpc',
52 ],
53 package_data={'': ['requirements.txt']},
54 include_package_data=True,
55 zip_safe=False,
56 classifiers=[
57 'Intended Audience :: Developers',
58 'Intended Audience :: Education',
59 'Intended Audience :: Science/Research',
60 'Programming Language :: Python :: 3',
61 'Programming Language :: Python :: 3.7',
62 'Programming Language :: Python :: 3.8',
63 'Programming Language :: Python :: 3.9',
64 "License :: OSI Approved :: Apache Software License",
65 "Operating System :: OS Independent",
66 'Topic :: Scientific/Engineering',
67 'Topic :: Scientific/Engineering :: Artificial Intelligence',
68 'Topic :: Software Development',
69 'Topic :: Software Development :: Libraries',
70 'Topic :: Software Development :: Libraries :: Python Modules',
71 ],
72 install_requires=REQUIRES,
73 tests_require=TESTS_REQUIRES,
74 extras_require={'test': TESTS_REQUIRES}
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kserve/setup.py b/python/kserve/setup.py
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -21,7 +21,7 @@
'pytest-cov',
'pytest-asyncio',
'mypy',
- 'portforward',
+ 'portforward==0.4.0',
]
with open('requirements.txt') as f:
| {"golden_diff": "diff --git a/python/kserve/setup.py b/python/kserve/setup.py\n--- a/python/kserve/setup.py\n+++ b/python/kserve/setup.py\n@@ -21,7 +21,7 @@\n 'pytest-cov',\n 'pytest-asyncio',\n 'mypy',\n- 'portforward',\n+ 'portforward==0.4.0',\n ]\n \n with open('requirements.txt') as f:\n", "issue": "Knative installation keeps failing in e2e tests\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nThe e2e tests are failing every now and then while running the knative installation step, more specifically while patching the configmap. A solution has to be provided so that the installation completes successfully using some kind of retry mechanism.\r\n\r\n**What did you expect to happen:**\r\nAll e2e tests to run without any issues.\r\n\r\n\r\n**Environment:**\r\n\r\ne2e environment\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'mypy',\n 'portforward',\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nwith open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:\n version = version_file.read().strip()\n\nsetuptools.setup(\n name='kserve',\n version=version,\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.utils',\n 'kserve.protocol',\n 'kserve.protocol.rest',\n 'kserve.protocol.grpc',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'mypy',\n 'portforward==0.4.0',\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nwith open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:\n version = version_file.read().strip()\n\nsetuptools.setup(\n name='kserve',\n version=version,\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.utils',\n 'kserve.protocol',\n 'kserve.protocol.rest',\n 'kserve.protocol.grpc',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}]} | 1,098 | 91 |
gh_patches_debug_37628 | rasdani/github-patches | git_diff | ansible-collections__community.general-1595 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support GSSAPI authentication for ldap_* module
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
Add the attribute `sasl_class` with values `external` (default) or `gssapi` to `community.general/plugins/module_utils/ldap.py`
to select the SASL authentication algorithm for python-ldap when user name and password are not provide. Currently, the ansible modules default to EXTERNAL.
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
ldap_attr
ldap_attrs
ldap_entry
ldap_passwd
ldap_search
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Adding the attribute `sasl_class` would allow to switch the default SASL authentication mechanism from `EXTERNAL` to `GSSAPI` which would help windows-based shops to transparently authenticate to Active Directory and use the LDAP modules to query and adjust their AD servers even from non-Windows servers.
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: admin group must exist
community.general.ldap_entry:
dn: CN=my_admin_group,OU=Groups,OU=Global,DC=dir,DC=example,DC=com
objectClass: group
server_uri: "{{ ad.url }}"
sasl_class: gssapi
state: present
```
<!--- HINT: You can also paste gist.github.com links for larger files -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/doc_fragments/ldap.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright: (c) 2016, Peter Sagerson <[email protected]>
4 # Copyright: (c) 2016, Jiri Tyr <[email protected]>
5 # Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <[email protected]>
6 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
7
8 from __future__ import (absolute_import, division, print_function)
9 __metaclass__ = type
10
11
12 class ModuleDocFragment(object):
13 # Standard LDAP documentation fragment
14 DOCUMENTATION = r'''
15 options:
16 bind_dn:
17 description:
18 - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
19 - If this is blank, we'll use an anonymous bind.
20 type: str
21 bind_pw:
22 description:
23 - The password to use with I(bind_dn).
24 type: str
25 dn:
26 required: true
27 description:
28 - The DN of the entry to add or remove.
29 type: str
30 server_uri:
31 description:
32 - A URI to the LDAP server.
33 - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
34 type: str
35 default: ldapi:///
36 start_tls:
37 description:
38 - If true, we'll use the START_TLS LDAP extension.
39 type: bool
40 default: no
41 validate_certs:
42 description:
43 - If set to C(no), SSL certificates will not be validated.
44 - This should only be used on sites using self-signed certificates.
45 type: bool
46 default: yes
47 '''
48
```
Path: `plugins/module_utils/ldap.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright: (c) 2016, Peter Sagerson <[email protected]>
4 # Copyright: (c) 2016, Jiri Tyr <[email protected]>
5 # Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <[email protected]>
6 #
7 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12 import traceback
13 from ansible.module_utils._text import to_native
14
15 try:
16 import ldap
17 import ldap.sasl
18
19 HAS_LDAP = True
20 except ImportError:
21 HAS_LDAP = False
22
23
24 def gen_specs(**specs):
25 specs.update({
26 'bind_dn': dict(),
27 'bind_pw': dict(default='', no_log=True),
28 'dn': dict(required=True),
29 'server_uri': dict(default='ldapi:///'),
30 'start_tls': dict(default=False, type='bool'),
31 'validate_certs': dict(default=True, type='bool'),
32 })
33
34 return specs
35
36
37 class LdapGeneric(object):
38 def __init__(self, module):
39 # Shortcuts
40 self.module = module
41 self.bind_dn = self.module.params['bind_dn']
42 self.bind_pw = self.module.params['bind_pw']
43 self.dn = self.module.params['dn']
44 self.server_uri = self.module.params['server_uri']
45 self.start_tls = self.module.params['start_tls']
46 self.verify_cert = self.module.params['validate_certs']
47
48 # Establish connection
49 self.connection = self._connect_to_ldap()
50
51 def fail(self, msg, exn):
52 self.module.fail_json(
53 msg=msg,
54 details=to_native(exn),
55 exception=traceback.format_exc()
56 )
57
58 def _connect_to_ldap(self):
59 if not self.verify_cert:
60 ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
61
62 connection = ldap.initialize(self.server_uri)
63
64 if self.start_tls:
65 try:
66 connection.start_tls_s()
67 except ldap.LDAPError as e:
68 self.fail("Cannot start TLS.", e)
69
70 try:
71 if self.bind_dn is not None:
72 connection.simple_bind_s(self.bind_dn, self.bind_pw)
73 else:
74 connection.sasl_interactive_bind_s('', ldap.sasl.external())
75 except ldap.LDAPError as e:
76 self.fail("Cannot bind to the server.", e)
77
78 return connection
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py
--- a/plugins/doc_fragments/ldap.py
+++ b/plugins/doc_fragments/ldap.py
@@ -15,7 +15,7 @@
options:
bind_dn:
description:
- - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
+ - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
- If this is blank, we'll use an anonymous bind.
type: str
bind_pw:
@@ -44,4 +44,12 @@
- This should only be used on sites using self-signed certificates.
type: bool
default: yes
+ sasl_class:
+ description:
+ - The class to use for SASL authentication.
+ - possible choices are C(external), C(gssapi).
+ type: str
+ choices: ['external', 'gssapi']
+ default: external
+ version_added: "2.0.0"
'''
diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py
--- a/plugins/module_utils/ldap.py
+++ b/plugins/module_utils/ldap.py
@@ -17,6 +17,11 @@
import ldap.sasl
HAS_LDAP = True
+
+ SASCL_CLASS = {
+ 'gssapi': ldap.sasl.gssapi,
+ 'external': ldap.sasl.external,
+ }
except ImportError:
HAS_LDAP = False
@@ -29,6 +34,7 @@
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'validate_certs': dict(default=True, type='bool'),
+ 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
})
return specs
@@ -44,6 +50,7 @@
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.verify_cert = self.module.params['validate_certs']
+ self.sasl_class = self.module.params['sasl_class']
# Establish connection
self.connection = self._connect_to_ldap()
@@ -71,7 +78,8 @@
if self.bind_dn is not None:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
- connection.sasl_interactive_bind_s('', ldap.sasl.external())
+ klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external)
+ connection.sasl_interactive_bind_s('', klass())
except ldap.LDAPError as e:
self.fail("Cannot bind to the server.", e)
| {"golden_diff": "diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py\n--- a/plugins/doc_fragments/ldap.py\n+++ b/plugins/doc_fragments/ldap.py\n@@ -15,7 +15,7 @@\n options:\n bind_dn:\n description:\n- - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.\n+ - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.\n - If this is blank, we'll use an anonymous bind.\n type: str\n bind_pw:\n@@ -44,4 +44,12 @@\n - This should only be used on sites using self-signed certificates.\n type: bool\n default: yes\n+ sasl_class:\n+ description:\n+ - The class to use for SASL authentication.\n+ - possible choices are C(external), C(gssapi).\n+ type: str\n+ choices: ['external', 'gssapi']\n+ default: external\n+ version_added: \"2.0.0\"\n '''\ndiff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py\n--- a/plugins/module_utils/ldap.py\n+++ b/plugins/module_utils/ldap.py\n@@ -17,6 +17,11 @@\n import ldap.sasl\n \n HAS_LDAP = True\n+\n+ SASCL_CLASS = {\n+ 'gssapi': ldap.sasl.gssapi,\n+ 'external': ldap.sasl.external,\n+ }\n except ImportError:\n HAS_LDAP = False\n \n@@ -29,6 +34,7 @@\n 'server_uri': dict(default='ldapi:///'),\n 'start_tls': dict(default=False, type='bool'),\n 'validate_certs': dict(default=True, type='bool'),\n+ 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),\n })\n \n return specs\n@@ -44,6 +50,7 @@\n self.server_uri = self.module.params['server_uri']\n self.start_tls = self.module.params['start_tls']\n self.verify_cert = self.module.params['validate_certs']\n+ self.sasl_class = self.module.params['sasl_class']\n \n # Establish connection\n self.connection = self._connect_to_ldap()\n@@ -71,7 +78,8 @@\n if self.bind_dn is not None:\n connection.simple_bind_s(self.bind_dn, self.bind_pw)\n else:\n- connection.sasl_interactive_bind_s('', ldap.sasl.external())\n+ klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external)\n+ connection.sasl_interactive_bind_s('', klass())\n except ldap.LDAPError as e:\n self.fail(\"Cannot bind to the server.\", e)\n", "issue": "Support GSSAPI authentication for ldap_* module\n<!--- Verify first that your feature was not already discussed on GitHub -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\n<!--- Describe the new feature/improvement briefly below -->\r\nAdd the attribute `sasl_class` with values `external` (default) or `gssapi` to `community.general/plugins/module_utils/ldap.py`\r\nto select the SASL authentication algorithm for python-ldap when user name and password are not provide. Currently, the ansible modules default to EXTERNAL.\r\n\r\n##### ISSUE TYPE\r\n- Feature Idea\r\n\r\n##### COMPONENT NAME\r\n<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->\r\nldap_attr\r\nldap_attrs\r\nldap_entry\r\nldap_passwd\r\nldap_search\r\n\r\n##### ADDITIONAL INFORMATION\r\n<!--- Describe how the feature would be used, why it is needed and what it would solve -->\r\nAdding the attribute `sasl_class` would allow to switch the default SASL authentication mechanism from `EXTERNAL` to `GSSAPI` which would help windows-based shops to transparently authenticate to Active Directory and use the LDAP modules to query and adjust their AD servers even from non-Windows servers. \r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: admin group must exist\r\n community.general.ldap_entry:\r\n dn: CN=my_admin_group,OU=Groups,OU=Global,DC=dir,DC=example,DC=com\r\n objectClass: group\r\n server_uri: \"{{ ad.url }}\"\r\n sasl_class: gssapi\r\n state: present\r\n```\r\n\r\n<!--- HINT: You can also paste gist.github.com links for larger files -->\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2016, Peter Sagerson <[email protected]>\n# Copyright: (c) 2016, Jiri Tyr <[email protected]>\n# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n\nclass ModuleDocFragment(object):\n # Standard LDAP documentation fragment\n DOCUMENTATION = r'''\noptions:\n bind_dn:\n description:\n - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.\n - If this is blank, we'll use an anonymous bind.\n type: str\n bind_pw:\n description:\n - The password to use with I(bind_dn).\n type: str\n dn:\n required: true\n description:\n - The DN of the entry to add or remove.\n type: str\n server_uri:\n description:\n - A URI to the LDAP server.\n - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.\n type: str\n default: ldapi:///\n start_tls:\n description:\n - If true, we'll use the START_TLS LDAP extension.\n type: bool\n default: no\n validate_certs:\n description:\n - If set to C(no), SSL certificates will not be validated.\n - This should only be used on sites using self-signed certificates.\n type: bool\n default: yes\n'''\n", "path": "plugins/doc_fragments/ldap.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2016, Peter Sagerson <[email protected]>\n# Copyright: (c) 2016, Jiri Tyr <[email protected]>\n# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <[email protected]>\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport traceback\nfrom ansible.module_utils._text import to_native\n\ntry:\n import ldap\n import ldap.sasl\n\n HAS_LDAP = True\nexcept ImportError:\n HAS_LDAP = False\n\n\ndef gen_specs(**specs):\n specs.update({\n 'bind_dn': dict(),\n 'bind_pw': dict(default='', no_log=True),\n 'dn': dict(required=True),\n 'server_uri': dict(default='ldapi:///'),\n 'start_tls': dict(default=False, type='bool'),\n 'validate_certs': dict(default=True, type='bool'),\n })\n\n return specs\n\n\nclass LdapGeneric(object):\n def __init__(self, module):\n # Shortcuts\n self.module = module\n self.bind_dn = self.module.params['bind_dn']\n self.bind_pw = self.module.params['bind_pw']\n self.dn = self.module.params['dn']\n self.server_uri = self.module.params['server_uri']\n self.start_tls = self.module.params['start_tls']\n self.verify_cert = self.module.params['validate_certs']\n\n # Establish connection\n self.connection = self._connect_to_ldap()\n\n def fail(self, msg, exn):\n self.module.fail_json(\n msg=msg,\n details=to_native(exn),\n exception=traceback.format_exc()\n )\n\n def _connect_to_ldap(self):\n if not self.verify_cert:\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)\n\n connection = ldap.initialize(self.server_uri)\n\n if self.start_tls:\n try:\n connection.start_tls_s()\n except ldap.LDAPError as e:\n self.fail(\"Cannot start TLS.\", e)\n\n try:\n if self.bind_dn is not None:\n connection.simple_bind_s(self.bind_dn, self.bind_pw)\n else:\n connection.sasl_interactive_bind_s('', ldap.sasl.external())\n except ldap.LDAPError as e:\n self.fail(\"Cannot bind to the server.\", e)\n\n return connection\n", "path": "plugins/module_utils/ldap.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2016, Peter Sagerson <[email protected]>\n# Copyright: (c) 2016, Jiri Tyr <[email protected]>\n# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n\nclass ModuleDocFragment(object):\n # Standard LDAP documentation fragment\n DOCUMENTATION = r'''\noptions:\n bind_dn:\n description:\n - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.\n - If this is blank, we'll use an anonymous bind.\n type: str\n bind_pw:\n description:\n - The password to use with I(bind_dn).\n type: str\n dn:\n required: true\n description:\n - The DN of the entry to add or remove.\n type: str\n server_uri:\n description:\n - A URI to the LDAP server.\n - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.\n type: str\n default: ldapi:///\n start_tls:\n description:\n - If true, we'll use the START_TLS LDAP extension.\n type: bool\n default: no\n validate_certs:\n description:\n - If set to C(no), SSL certificates will not be validated.\n - This should only be used on sites using self-signed certificates.\n type: bool\n default: yes\n sasl_class:\n description:\n - The class to use for SASL authentication.\n - possible choices are C(external), C(gssapi).\n type: str\n choices: ['external', 'gssapi']\n default: external\n version_added: \"2.0.0\"\n'''\n", "path": "plugins/doc_fragments/ldap.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2016, Peter Sagerson <[email protected]>\n# Copyright: (c) 2016, Jiri Tyr <[email protected]>\n# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <[email protected]>\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport traceback\nfrom ansible.module_utils._text import to_native\n\ntry:\n import ldap\n import ldap.sasl\n\n HAS_LDAP = True\n\n SASCL_CLASS = {\n 'gssapi': ldap.sasl.gssapi,\n 'external': ldap.sasl.external,\n }\nexcept ImportError:\n HAS_LDAP = False\n\n\ndef gen_specs(**specs):\n specs.update({\n 'bind_dn': dict(),\n 'bind_pw': dict(default='', no_log=True),\n 'dn': dict(required=True),\n 'server_uri': dict(default='ldapi:///'),\n 'start_tls': dict(default=False, type='bool'),\n 'validate_certs': dict(default=True, type='bool'),\n 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),\n })\n\n return specs\n\n\nclass LdapGeneric(object):\n def __init__(self, module):\n # Shortcuts\n self.module = module\n self.bind_dn = self.module.params['bind_dn']\n self.bind_pw = self.module.params['bind_pw']\n self.dn = self.module.params['dn']\n self.server_uri = self.module.params['server_uri']\n self.start_tls = self.module.params['start_tls']\n self.verify_cert = self.module.params['validate_certs']\n self.sasl_class = self.module.params['sasl_class']\n\n # Establish connection\n self.connection = self._connect_to_ldap()\n\n def fail(self, msg, exn):\n self.module.fail_json(\n msg=msg,\n details=to_native(exn),\n exception=traceback.format_exc()\n )\n\n def _connect_to_ldap(self):\n if not self.verify_cert:\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)\n\n connection = ldap.initialize(self.server_uri)\n\n if self.start_tls:\n try:\n connection.start_tls_s()\n except ldap.LDAPError as e:\n self.fail(\"Cannot start TLS.\", e)\n\n try:\n if self.bind_dn is not None:\n connection.simple_bind_s(self.bind_dn, self.bind_pw)\n else:\n klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external)\n connection.sasl_interactive_bind_s('', klass())\n except ldap.LDAPError as e:\n self.fail(\"Cannot bind to the server.\", e)\n\n return connection\n", "path": "plugins/module_utils/ldap.py"}]} | 1,851 | 626 |
gh_patches_debug_1688 | rasdani/github-patches | git_diff | conda__conda-build-570 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AppVeyor: AttributeError: 'module' object has no attribute 'get_pid_list
https://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1187
conda info:
https://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1076
Please note a few lines above I ran:
`C:\Anaconda\Scripts\conda.exe install --yes --quiet anaconda-client conda-build jinja2`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/windows.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import sys
5 import shutil
6 from os.path import dirname, isdir, isfile, join, exists
7
8 import conda.config as cc
9 from conda.compat import iteritems
10
11 from conda_build.config import config
12 from conda_build import environ
13 from conda_build import source
14 from conda_build.utils import _check_call
15
16 try:
17 import psutil
18 except ImportError:
19 psutil = None
20
21 assert sys.platform == 'win32'
22
23
24 def fix_staged_scripts():
25 """
26 Fixes scripts which have been installed unix-style to have a .bat
27 helper
28 """
29 scripts_dir = join(config.build_prefix, 'Scripts')
30 if not isdir(scripts_dir):
31 return
32 for fn in os.listdir(scripts_dir):
33 # process all the extensionless files
34 if not isfile(join(scripts_dir, fn)) or '.' in fn:
35 continue
36
37 with open(join(scripts_dir, fn)) as f:
38 line = f.readline().lower()
39 # If it's a #!python script
40 if not (line.startswith('#!') and 'python' in line.lower()):
41 continue
42 print('Adjusting unix-style #! script %s, '
43 'and adding a .bat file for it' % fn)
44 # copy it with a .py extension (skipping that first #! line)
45 with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:
46 fo.write(f.read())
47 # now create the .exe file
48 shutil.copyfile(join(dirname(__file__),
49 'cli-%d.exe' % (8 * tuple.__itemsize__)),
50 join(scripts_dir, fn + '.exe'))
51
52 # remove the original script
53 os.remove(join(scripts_dir, fn))
54
55
56 def msvc_env_cmd():
57 if 'ProgramFiles(x86)' in os.environ:
58 program_files = os.environ['ProgramFiles(x86)']
59 else:
60 program_files = os.environ['ProgramFiles']
61
62 localappdata = os.environ.get("localappdata")
63
64 if config.PY3K:
65 vcvarsall = os.path.join(program_files,
66 r'Microsoft Visual Studio 10.0'
67 r'\VC\vcvarsall.bat')
68 else:
69 vcvarsall = os.path.join(program_files,
70 r'Microsoft Visual Studio 9.0'
71 r'\VC\vcvarsall.bat')
72
73 # Try the Microsoft Visual C++ Compiler for Python 2.7
74 if not isfile(vcvarsall) and localappdata and not config.PY3K:
75 vcvarsall = os.path.join(localappdata, "Programs", "Common",
76 "Microsoft", "Visual C++ for Python", "9.0", "vcvarsall.bat")
77 if not isfile(vcvarsall) and program_files and not config.PY3K:
78 vcvarsall = os.path.join(program_files, 'Common Files',
79 'Microsoft', 'Visual C++ for Python', "9.0", "vcvarsall.bat")
80 if not isfile(vcvarsall):
81 print("Warning: Couldn't find Visual Studio: %r" % vcvarsall)
82 return ''
83
84 return '''\
85 call "%s" %s
86 ''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits])
87
88
89 def kill_processes():
90 if psutil is None:
91 return
92 for n in psutil.get_pid_list():
93 try:
94 p = psutil.Process(n)
95 if p.name.lower() == 'msbuild.exe':
96 print('Terminating:', p.name)
97 p.terminate()
98 except:
99 continue
100
101
102 def build(m):
103 env = dict(os.environ)
104 env.update(environ.get_dict(m))
105
106 for name in 'BIN', 'INC', 'LIB':
107 path = env['LIBRARY_' + name]
108 if not isdir(path):
109 os.makedirs(path)
110
111 src_dir = source.get_dir()
112 bld_bat = join(m.path, 'bld.bat')
113 if exists(bld_bat):
114 with open(bld_bat) as fi:
115 data = fi.read()
116 with open(join(src_dir, 'bld.bat'), 'w') as fo:
117 fo.write(msvc_env_cmd())
118 for kv in iteritems(env):
119 fo.write('set "%s=%s"\n' % kv)
120 # more debuggable with echo on
121 fo.write('@echo on\n')
122 fo.write("REM ===== end generated header =====\n")
123 fo.write(data)
124
125 cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat']
126 _check_call(cmd, cwd=src_dir)
127 kill_processes()
128 fix_staged_scripts()
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_build/windows.py b/conda_build/windows.py
--- a/conda_build/windows.py
+++ b/conda_build/windows.py
@@ -89,7 +89,7 @@
def kill_processes():
if psutil is None:
return
- for n in psutil.get_pid_list():
+ for n in psutil.pids():
try:
p = psutil.Process(n)
if p.name.lower() == 'msbuild.exe':
| {"golden_diff": "diff --git a/conda_build/windows.py b/conda_build/windows.py\n--- a/conda_build/windows.py\n+++ b/conda_build/windows.py\n@@ -89,7 +89,7 @@\n def kill_processes():\n if psutil is None:\n return\n- for n in psutil.get_pid_list():\n+ for n in psutil.pids():\n try:\n p = psutil.Process(n)\n if p.name.lower() == 'msbuild.exe':\n", "issue": "AppVeyor: AttributeError: 'module' object has no attribute 'get_pid_list\nhttps://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1187\n\nconda info:\nhttps://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1076\n\nPlease note a few lines above I ran:\n`C:\\Anaconda\\Scripts\\conda.exe install --yes --quiet anaconda-client conda-build jinja2`\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport shutil\nfrom os.path import dirname, isdir, isfile, join, exists\n\nimport conda.config as cc\nfrom conda.compat import iteritems\n\nfrom conda_build.config import config\nfrom conda_build import environ\nfrom conda_build import source\nfrom conda_build.utils import _check_call\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nassert sys.platform == 'win32'\n\n\ndef fix_staged_scripts():\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n scripts_dir = join(config.build_prefix, 'Scripts')\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = f.readline().lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n shutil.copyfile(join(dirname(__file__),\n 'cli-%d.exe' % (8 * tuple.__itemsize__)),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef msvc_env_cmd():\n if 'ProgramFiles(x86)' in os.environ:\n program_files = os.environ['ProgramFiles(x86)']\n else:\n program_files = os.environ['ProgramFiles']\n\n localappdata = os.environ.get(\"localappdata\")\n\n if config.PY3K:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 10.0'\n r'\\VC\\vcvarsall.bat')\n else:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 9.0'\n r'\\VC\\vcvarsall.bat')\n\n # Try the Microsoft Visual C++ Compiler for Python 2.7\n if not isfile(vcvarsall) and localappdata and not config.PY3K:\n vcvarsall = os.path.join(localappdata, \"Programs\", \"Common\",\n \"Microsoft\", \"Visual C++ for Python\", \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall) and program_files and not config.PY3K:\n vcvarsall = os.path.join(program_files, 'Common Files',\n 'Microsoft', 'Visual C++ for Python', \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall):\n print(\"Warning: Couldn't find Visual Studio: %r\" % vcvarsall)\n return ''\n\n return '''\\\ncall \"%s\" %s\n''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits])\n\n\ndef kill_processes():\n if psutil is None:\n return\n for n in psutil.get_pid_list():\n try:\n p = psutil.Process(n)\n if p.name.lower() == 'msbuild.exe':\n print('Terminating:', p.name)\n p.terminate()\n except:\n continue\n\n\ndef build(m):\n env = dict(os.environ)\n env.update(environ.get_dict(m))\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = source.get_dir()\n bld_bat = join(m.path, 'bld.bat')\n if exists(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n fo.write(msvc_env_cmd())\n for kv in iteritems(env):\n fo.write('set \"%s=%s\"\\n' % kv)\n # more debuggable with echo on\n fo.write('@echo on\\n')\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n kill_processes()\n fix_staged_scripts()\n", "path": "conda_build/windows.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport shutil\nfrom os.path import dirname, isdir, isfile, join, exists\n\nimport conda.config as cc\nfrom conda.compat import iteritems\n\nfrom conda_build.config import config\nfrom conda_build import environ\nfrom conda_build import source\nfrom conda_build.utils import _check_call\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nassert sys.platform == 'win32'\n\n\ndef fix_staged_scripts():\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n scripts_dir = join(config.build_prefix, 'Scripts')\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = f.readline().lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n shutil.copyfile(join(dirname(__file__),\n 'cli-%d.exe' % (8 * tuple.__itemsize__)),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef msvc_env_cmd():\n if 'ProgramFiles(x86)' in os.environ:\n program_files = os.environ['ProgramFiles(x86)']\n else:\n program_files = os.environ['ProgramFiles']\n\n localappdata = os.environ.get(\"localappdata\")\n\n if config.PY3K:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 10.0'\n r'\\VC\\vcvarsall.bat')\n else:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 9.0'\n r'\\VC\\vcvarsall.bat')\n\n # Try the Microsoft Visual C++ Compiler for Python 2.7\n if not isfile(vcvarsall) and localappdata and not config.PY3K:\n vcvarsall = os.path.join(localappdata, \"Programs\", \"Common\",\n \"Microsoft\", \"Visual C++ for Python\", \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall) and program_files and not config.PY3K:\n vcvarsall = os.path.join(program_files, 'Common Files',\n 'Microsoft', 'Visual C++ for Python', \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall):\n print(\"Warning: Couldn't find Visual Studio: %r\" % vcvarsall)\n return ''\n\n return '''\\\ncall \"%s\" %s\n''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits])\n\n\ndef kill_processes():\n if psutil is None:\n return\n for n in psutil.pids():\n try:\n p = psutil.Process(n)\n if p.name.lower() == 'msbuild.exe':\n print('Terminating:', p.name)\n p.terminate()\n except:\n continue\n\n\ndef build(m):\n env = dict(os.environ)\n env.update(environ.get_dict(m))\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = source.get_dir()\n bld_bat = join(m.path, 'bld.bat')\n if exists(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n fo.write(msvc_env_cmd())\n for kv in iteritems(env):\n fo.write('set \"%s=%s\"\\n' % kv)\n # more debuggable with echo on\n fo.write('@echo on\\n')\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n kill_processes()\n fix_staged_scripts()\n", "path": "conda_build/windows.py"}]} | 1,744 | 102 |
gh_patches_debug_40563 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4169 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws - new generic resource action "invoke-stepfunction" enhancement
Enhancement which behaves just like action invoke-lambda but invokes (aka starts execution of) step function state machines for use cases which need sequencing and conditional logic which is more easily implemented in step functions.
Doc hint: See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/stepfunctions.html#SFN.Client.start_execution
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/sfn.py`
Content:
```
1 # Copyright 2015-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from c7n.manager import resources
17 from c7n.query import QueryResourceManager
18 from c7n.tags import Tag, RemoveTag
19
20
21 @resources.register('step-machine')
22 class StepFunction(QueryResourceManager):
23 """AWS Step Functions State Machine"""
24
25 class resource_type(object):
26 service = 'stepfunctions'
27 enum_spec = ('list_state_machines', 'stateMachines', None)
28 arn = id = 'stateMachineArn'
29 name = 'name'
30 date = 'creationDate'
31 dimension = None
32 detail_spec = (
33 "describe_state_machine", "stateMachineArn",
34 'stateMachineArn', None)
35 filter_name = None
36
37
38 @StepFunction.action_registry.register('tag')
39 class TagStepFunction(Tag):
40 """Action to create tag(s) on a step function
41
42 :example:
43
44 .. code-block:: yaml
45
46 policies:
47 - name: tag-step-function
48 resource: step-machine
49 actions:
50 - type: tag
51 key: target-tag
52 value: target-tag-value
53 """
54
55 permissions = ('stepfunctions:TagResource',)
56
57 def process_resource_set(self, client, resources, tags):
58
59 tags_lower = []
60
61 for tag in tags:
62 tags_lower.append({k.lower(): v for k, v in tag.items()})
63
64 for r in resources:
65 client.tag_resource(resourceArn=r['stateMachineArn'], tags=tags_lower)
66
67
68 @StepFunction.action_registry.register('remove-tag')
69 class UnTagStepFunction(RemoveTag):
70 """Action to create tag(s) on a step function
71
72 :example:
73
74 .. code-block:: yaml
75
76 policies:
77 - name: step-function-remove-tag
78 resource: step-machine
79 actions:
80 - type: remove-tag
81 tags: ["test"]
82 """
83
84 permissions = ('stepfunctions:UntagResource',)
85
86 def process_resource_set(self, client, resources, tag_keys):
87
88 for r in resources:
89 client.untag_resource(resourceArn=r['stateMachineArn'], tagKeys=tag_keys)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/sfn.py b/c7n/resources/sfn.py
--- a/c7n/resources/sfn.py
+++ b/c7n/resources/sfn.py
@@ -13,9 +13,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+
+from c7n.actions import Action
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.tags import Tag, RemoveTag
+from c7n.utils import type_schema, local_session, dumps, chunks
@resources.register('step-machine')
@@ -35,6 +38,95 @@
filter_name = None
+class InvokeStepFunction(Action):
+ """Invoke step function on resources.
+
+ By default this will invoke a step function for each resource
+ providing both the `policy` and `resource` as input.
+
+ That behavior can be configured setting policy and bulk
+ boolean flags on the action.
+
+ If bulk action parameter is set to true, then the step
+ function will be invoked in bulk, with a set of resource arns
+ under the `resources` key.
+
+ The size of the batch can be configured via the batch-size
+ parameter. Note step function state (input, execution, etc)must
+ fit within 32k, we default to batch size 250.
+
+ :example:
+
+ .. code-block:: yaml
+
+ policies:
+ - name: invoke-step-function
+ resource: s3
+ filters:
+ - is-log-target
+ - "tag:IngestSetup": absent
+ actions:
+ - type: invoke-sfn
+ # This will cause the workflow to be invoked
+ # with many resources arns in a single execution.
+ # Note this is *not* the default.
+ bulk: true
+ batch-size: 10
+ state-machine: LogIngestSetup
+ """
+
+ schema = type_schema(
+ 'invoke-sfn',
+ required=['state-machine'],
+ **{'state-machine': {'type': 'string'},
+ 'batch-size': {'type': 'integer'},
+ 'bulk': {'type': 'boolean'},
+ 'policy': {'type': 'boolean'}})
+ schema_alias = True
+ permissions = ('stepfunctions:StartExecution',)
+
+ def process(self, resources):
+ client = local_session(
+ self.manager.session_factory).client('stepfunctions')
+ arn = self.data['state-machine']
+ if not arn.startswith('arn'):
+ arn = 'arn:aws:states:{}:{}:stateMachine:{}'.format(
+ self.manager.config.region, self.manager.config.account_id, arn)
+
+ params = {'stateMachineArn': arn}
+ pinput = {}
+
+ if self.data.get('policy', True):
+ pinput['policy'] = dict(self.manager.data)
+
+ resource_set = list(zip(self.manager.get_arns(resources), resources))
+ if self.data.get('bulk', False) is True:
+ return self.invoke_batch(client, params, pinput, resource_set)
+
+ for arn, r in resource_set:
+ pinput['resource'] = r
+ params['input'] = dumps(pinput)
+ r['c7n:execution-arn'] = self.manager.retry(
+ client.start_execution, **params).get('executionArn')
+
+ def invoke_batch(self, client, params, pinput, resource_set):
+ for batch_rset in chunks(resource_set, self.data.get('batch-size', 250)):
+ pinput['resources'] = [rarn for rarn, _ in batch_rset]
+ params['input'] = dumps(pinput)
+ exec_arn = self.manager.retry(
+ client.start_execution, **params).get('executionArn')
+ for _, r in resource_set:
+ r['c7n:execution-arn'] = exec_arn
+
+ @classmethod
+ def register(cls, registry, key):
+ for _, r in registry.items():
+ r.action_registry.register('invoke-sfn', cls)
+
+
+resources.subscribe(resources.EVENT_FINAL, InvokeStepFunction.register)
+
+
@StepFunction.action_registry.register('tag')
class TagStepFunction(Tag):
"""Action to create tag(s) on a step function
| {"golden_diff": "diff --git a/c7n/resources/sfn.py b/c7n/resources/sfn.py\n--- a/c7n/resources/sfn.py\n+++ b/c7n/resources/sfn.py\n@@ -13,9 +13,12 @@\n # limitations under the License.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+\n+from c7n.actions import Action\n from c7n.manager import resources\n from c7n.query import QueryResourceManager\n from c7n.tags import Tag, RemoveTag\n+from c7n.utils import type_schema, local_session, dumps, chunks\n \n \n @resources.register('step-machine')\n@@ -35,6 +38,95 @@\n filter_name = None\n \n \n+class InvokeStepFunction(Action):\n+ \"\"\"Invoke step function on resources.\n+\n+ By default this will invoke a step function for each resource\n+ providing both the `policy` and `resource` as input.\n+\n+ That behavior can be configured setting policy and bulk\n+ boolean flags on the action.\n+\n+ If bulk action parameter is set to true, then the step\n+ function will be invoked in bulk, with a set of resource arns\n+ under the `resources` key.\n+\n+ The size of the batch can be configured via the batch-size\n+ parameter. Note step function state (input, execution, etc)must\n+ fit within 32k, we default to batch size 250.\n+\n+ :example:\n+\n+ .. code-block:: yaml\n+\n+ policies:\n+ - name: invoke-step-function\n+ resource: s3\n+ filters:\n+ - is-log-target\n+ - \"tag:IngestSetup\": absent\n+ actions:\n+ - type: invoke-sfn\n+ # This will cause the workflow to be invoked\n+ # with many resources arns in a single execution.\n+ # Note this is *not* the default.\n+ bulk: true\n+ batch-size: 10\n+ state-machine: LogIngestSetup\n+ \"\"\"\n+\n+ schema = type_schema(\n+ 'invoke-sfn',\n+ required=['state-machine'],\n+ **{'state-machine': {'type': 'string'},\n+ 'batch-size': {'type': 'integer'},\n+ 'bulk': {'type': 'boolean'},\n+ 'policy': {'type': 'boolean'}})\n+ schema_alias = True\n+ permissions = ('stepfunctions:StartExecution',)\n+\n+ def process(self, resources):\n+ client = local_session(\n+ self.manager.session_factory).client('stepfunctions')\n+ arn = self.data['state-machine']\n+ if not arn.startswith('arn'):\n+ arn = 'arn:aws:states:{}:{}:stateMachine:{}'.format(\n+ self.manager.config.region, self.manager.config.account_id, arn)\n+\n+ params = {'stateMachineArn': arn}\n+ pinput = {}\n+\n+ if self.data.get('policy', True):\n+ pinput['policy'] = dict(self.manager.data)\n+\n+ resource_set = list(zip(self.manager.get_arns(resources), resources))\n+ if self.data.get('bulk', False) is True:\n+ return self.invoke_batch(client, params, pinput, resource_set)\n+\n+ for arn, r in resource_set:\n+ pinput['resource'] = r\n+ params['input'] = dumps(pinput)\n+ r['c7n:execution-arn'] = self.manager.retry(\n+ client.start_execution, **params).get('executionArn')\n+\n+ def invoke_batch(self, client, params, pinput, resource_set):\n+ for batch_rset in chunks(resource_set, self.data.get('batch-size', 250)):\n+ pinput['resources'] = [rarn for rarn, _ in batch_rset]\n+ params['input'] = dumps(pinput)\n+ exec_arn = self.manager.retry(\n+ client.start_execution, **params).get('executionArn')\n+ for _, r in resource_set:\n+ r['c7n:execution-arn'] = exec_arn\n+\n+ @classmethod\n+ def register(cls, registry, key):\n+ for _, r in registry.items():\n+ r.action_registry.register('invoke-sfn', cls)\n+\n+\n+resources.subscribe(resources.EVENT_FINAL, InvokeStepFunction.register)\n+\n+\n @StepFunction.action_registry.register('tag')\n class TagStepFunction(Tag):\n \"\"\"Action to create tag(s) on a step function\n", "issue": "aws - new generic resource action \"invoke-stepfunction\" enhancement\nEnhancement which behaves just like action invoke-lambda but invokes (aka starts execution of) step function state machines for use cases which need sequencing and conditional logic which is more easily implemented in step functions. \r\n\r\nDoc hint: See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/stepfunctions.html#SFN.Client.start_execution\n", "before_files": [{"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.tags import Tag, RemoveTag\n\n\[email protected]('step-machine')\nclass StepFunction(QueryResourceManager):\n \"\"\"AWS Step Functions State Machine\"\"\"\n\n class resource_type(object):\n service = 'stepfunctions'\n enum_spec = ('list_state_machines', 'stateMachines', None)\n arn = id = 'stateMachineArn'\n name = 'name'\n date = 'creationDate'\n dimension = None\n detail_spec = (\n \"describe_state_machine\", \"stateMachineArn\",\n 'stateMachineArn', None)\n filter_name = None\n\n\[email protected]_registry.register('tag')\nclass TagStepFunction(Tag):\n \"\"\"Action to create tag(s) on a step function\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: tag-step-function\n resource: step-machine\n actions:\n - type: tag\n key: target-tag\n value: target-tag-value\n \"\"\"\n\n permissions = ('stepfunctions:TagResource',)\n\n def process_resource_set(self, client, resources, tags):\n\n tags_lower = []\n\n for tag in tags:\n tags_lower.append({k.lower(): v for k, v in tag.items()})\n\n for r in resources:\n client.tag_resource(resourceArn=r['stateMachineArn'], tags=tags_lower)\n\n\[email protected]_registry.register('remove-tag')\nclass UnTagStepFunction(RemoveTag):\n \"\"\"Action to create tag(s) on a step function\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: step-function-remove-tag\n resource: step-machine\n actions:\n - type: remove-tag\n tags: [\"test\"]\n \"\"\"\n\n permissions = ('stepfunctions:UntagResource',)\n\n def process_resource_set(self, client, resources, tag_keys):\n\n for r in resources:\n client.untag_resource(resourceArn=r['stateMachineArn'], tagKeys=tag_keys)\n", "path": "c7n/resources/sfn.py"}], "after_files": [{"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nfrom c7n.actions import Action\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.tags import Tag, RemoveTag\nfrom c7n.utils import type_schema, local_session, dumps, chunks\n\n\[email protected]('step-machine')\nclass StepFunction(QueryResourceManager):\n \"\"\"AWS Step Functions State Machine\"\"\"\n\n class resource_type(object):\n service = 'stepfunctions'\n enum_spec = ('list_state_machines', 'stateMachines', None)\n arn = id = 'stateMachineArn'\n name = 'name'\n date = 'creationDate'\n dimension = None\n detail_spec = (\n \"describe_state_machine\", \"stateMachineArn\",\n 'stateMachineArn', None)\n filter_name = None\n\n\nclass InvokeStepFunction(Action):\n \"\"\"Invoke step function on resources.\n\n By default this will invoke a step function for each resource\n providing both the `policy` and `resource` as input.\n\n That behavior can be configured setting policy and bulk\n boolean flags on the action.\n\n If bulk action parameter is set to true, then the step\n function will be invoked in bulk, with a set of resource arns\n under the `resources` key.\n\n The size of the batch can be configured via the batch-size\n parameter. Note step function state (input, execution, etc)must\n fit within 32k, we default to batch size 250.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: invoke-step-function\n resource: s3\n filters:\n - is-log-target\n - \"tag:IngestSetup\": absent\n actions:\n - type: invoke-sfn\n # This will cause the workflow to be invoked\n # with many resources arns in a single execution.\n # Note this is *not* the default.\n bulk: true\n batch-size: 10\n state-machine: LogIngestSetup\n \"\"\"\n\n schema = type_schema(\n 'invoke-sfn',\n required=['state-machine'],\n **{'state-machine': {'type': 'string'},\n 'batch-size': {'type': 'integer'},\n 'bulk': {'type': 'boolean'},\n 'policy': {'type': 'boolean'}})\n schema_alias = True\n permissions = ('stepfunctions:StartExecution',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('stepfunctions')\n arn = self.data['state-machine']\n if not arn.startswith('arn'):\n arn = 'arn:aws:states:{}:{}:stateMachine:{}'.format(\n self.manager.config.region, self.manager.config.account_id, arn)\n\n params = {'stateMachineArn': arn}\n pinput = {}\n\n if self.data.get('policy', True):\n pinput['policy'] = dict(self.manager.data)\n\n resource_set = list(zip(self.manager.get_arns(resources), resources))\n if self.data.get('bulk', False) is True:\n return self.invoke_batch(client, params, pinput, resource_set)\n\n for arn, r in resource_set:\n pinput['resource'] = r\n params['input'] = dumps(pinput)\n r['c7n:execution-arn'] = self.manager.retry(\n client.start_execution, **params).get('executionArn')\n\n def invoke_batch(self, client, params, pinput, resource_set):\n for batch_rset in chunks(resource_set, self.data.get('batch-size', 250)):\n pinput['resources'] = [rarn for rarn, _ in batch_rset]\n params['input'] = dumps(pinput)\n exec_arn = self.manager.retry(\n client.start_execution, **params).get('executionArn')\n for _, r in resource_set:\n r['c7n:execution-arn'] = exec_arn\n\n @classmethod\n def register(cls, registry, key):\n for _, r in registry.items():\n r.action_registry.register('invoke-sfn', cls)\n\n\nresources.subscribe(resources.EVENT_FINAL, InvokeStepFunction.register)\n\n\[email protected]_registry.register('tag')\nclass TagStepFunction(Tag):\n \"\"\"Action to create tag(s) on a step function\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: tag-step-function\n resource: step-machine\n actions:\n - type: tag\n key: target-tag\n value: target-tag-value\n \"\"\"\n\n permissions = ('stepfunctions:TagResource',)\n\n def process_resource_set(self, client, resources, tags):\n\n tags_lower = []\n\n for tag in tags:\n tags_lower.append({k.lower(): v for k, v in tag.items()})\n\n for r in resources:\n client.tag_resource(resourceArn=r['stateMachineArn'], tags=tags_lower)\n\n\[email protected]_registry.register('remove-tag')\nclass UnTagStepFunction(RemoveTag):\n \"\"\"Action to create tag(s) on a step function\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: step-function-remove-tag\n resource: step-machine\n actions:\n - type: remove-tag\n tags: [\"test\"]\n \"\"\"\n\n permissions = ('stepfunctions:UntagResource',)\n\n def process_resource_set(self, client, resources, tag_keys):\n\n for r in resources:\n client.untag_resource(resourceArn=r['stateMachineArn'], tagKeys=tag_keys)\n", "path": "c7n/resources/sfn.py"}]} | 1,113 | 990 |
gh_patches_debug_1598 | rasdani/github-patches | git_diff | CTFd__CTFd-1908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bump pybluemonday to latest
Just tracking this so we don't forget. Latest should be 0.0.6 with bluemonday at 1.0.10. It adds support for comments in the HTML output.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/utils/security/sanitize.py`
Content:
```
1 from pybluemonday import UGCPolicy
2
3 # Copied from lxml:
4 # https://github.com/lxml/lxml/blob/e986a9cb5d54827c59aefa8803bc90954d67221e/src/lxml/html/defs.py#L38
5 # fmt: off
6 SAFE_ATTRS = (
7 'abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align',
8 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff',
9 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan',
10 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype',
11 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id',
12 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
13 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
14 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape',
15 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
16 'type', 'usemap', 'valign', 'value', 'vspace', 'width'
17 )
18 # fmt: on
19
20 PAGE_STRUCTURE_TAGS = {
21 "title": [],
22 }
23
24 META_TAGS = {
25 "meta": ["name", "content", "property"],
26 }
27
28 FORM_TAGS = {
29 "form": ["method", "action"],
30 "button": ["name", "type", "value", "disabled"],
31 "input": ["name", "type", "value", "placeholder"],
32 "select": ["name", "value", "placeholder"],
33 "option": ["value"],
34 "textarea": ["name", "value", "placeholder"],
35 "label": ["for"],
36 }
37
38 ANNOYING_TAGS = {
39 "blink": [],
40 "marquee": [],
41 }
42
43
44 MEDIA_TAGS = {
45 "audio": ["autoplay", "controls", "crossorigin", "loop", "muted", "preload", "src"],
46 "video": [
47 "autoplay",
48 "buffered",
49 "controls",
50 "crossorigin",
51 "loop",
52 "muted",
53 "playsinline",
54 "poster",
55 "preload",
56 "src",
57 ],
58 "source": ["src", "type"],
59 "iframe": ["width", "height", "src", "frameborder", "allow", "allowfullscreen"],
60 }
61
62 SANITIZER = UGCPolicy()
63
64 for TAGS in (PAGE_STRUCTURE_TAGS, META_TAGS, FORM_TAGS, ANNOYING_TAGS, MEDIA_TAGS):
65 for element in TAGS:
66 SANITIZER.AllowElements(element)
67 SANITIZER.AllowAttrs(*TAGS[element]).OnElements(element)
68
69 # Allow safe attrs copied from lxml
70 SANITIZER.AllowAttrs(*SAFE_ATTRS).Globally()
71
72 # Allow styling globally
73 SANITIZER.AllowAttrs("class", "style").Globally()
74
75 # Allow styling via bluemonday
76 SANITIZER.AllowStyling()
77
78 # Allow safe convenience functions from bluemonday
79 SANITIZER.AllowStandardAttributes()
80 SANITIZER.AllowStandardURLs()
81
82 # Allow data atributes
83 SANITIZER.AllowDataAttributes()
84
85 # Allow data URI images
86 SANITIZER.AllowDataURIImages()
87
88 # Link security
89 SANITIZER.AllowRelativeURLs(True)
90 SANITIZER.RequireNoFollowOnFullyQualifiedLinks(True)
91 SANITIZER.RequireNoFollowOnLinks(True)
92 SANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True)
93 SANITIZER.RequireNoReferrerOnLinks(True)
94
95
96 def sanitize_html(html):
97 return SANITIZER.sanitize(html)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/utils/security/sanitize.py b/CTFd/utils/security/sanitize.py
--- a/CTFd/utils/security/sanitize.py
+++ b/CTFd/utils/security/sanitize.py
@@ -92,6 +92,9 @@
SANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True)
SANITIZER.RequireNoReferrerOnLinks(True)
+# Allow Comments
+SANITIZER.AllowComments()
+
def sanitize_html(html):
return SANITIZER.sanitize(html)
| {"golden_diff": "diff --git a/CTFd/utils/security/sanitize.py b/CTFd/utils/security/sanitize.py\n--- a/CTFd/utils/security/sanitize.py\n+++ b/CTFd/utils/security/sanitize.py\n@@ -92,6 +92,9 @@\n SANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True)\n SANITIZER.RequireNoReferrerOnLinks(True)\n \n+# Allow Comments\n+SANITIZER.AllowComments()\n+\n \n def sanitize_html(html):\n return SANITIZER.sanitize(html)\n", "issue": "Bump pybluemonday to latest\nJust tracking this so we don't forget. Latest should be 0.0.6 with bluemonday at 1.0.10. It adds support for comments in the HTML output. \n", "before_files": [{"content": "from pybluemonday import UGCPolicy\n\n# Copied from lxml:\n# https://github.com/lxml/lxml/blob/e986a9cb5d54827c59aefa8803bc90954d67221e/src/lxml/html/defs.py#L38\n# fmt: off\nSAFE_ATTRS = (\n 'abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align',\n 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff',\n 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan',\n 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype',\n 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id',\n 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',\n 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',\n 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape',\n 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',\n 'type', 'usemap', 'valign', 'value', 'vspace', 'width'\n)\n# fmt: on\n\nPAGE_STRUCTURE_TAGS = {\n \"title\": [],\n}\n\nMETA_TAGS = {\n \"meta\": [\"name\", \"content\", \"property\"],\n}\n\nFORM_TAGS = {\n \"form\": [\"method\", \"action\"],\n \"button\": [\"name\", \"type\", \"value\", \"disabled\"],\n \"input\": [\"name\", \"type\", \"value\", \"placeholder\"],\n \"select\": [\"name\", \"value\", \"placeholder\"],\n \"option\": [\"value\"],\n \"textarea\": [\"name\", \"value\", \"placeholder\"],\n \"label\": [\"for\"],\n}\n\nANNOYING_TAGS = {\n \"blink\": [],\n \"marquee\": [],\n}\n\n\nMEDIA_TAGS = {\n \"audio\": [\"autoplay\", \"controls\", \"crossorigin\", \"loop\", \"muted\", \"preload\", \"src\"],\n \"video\": [\n \"autoplay\",\n \"buffered\",\n \"controls\",\n \"crossorigin\",\n \"loop\",\n \"muted\",\n \"playsinline\",\n \"poster\",\n \"preload\",\n \"src\",\n ],\n \"source\": [\"src\", \"type\"],\n \"iframe\": [\"width\", \"height\", \"src\", \"frameborder\", \"allow\", \"allowfullscreen\"],\n}\n\nSANITIZER = UGCPolicy()\n\nfor TAGS in (PAGE_STRUCTURE_TAGS, META_TAGS, FORM_TAGS, ANNOYING_TAGS, MEDIA_TAGS):\n for element in TAGS:\n SANITIZER.AllowElements(element)\n SANITIZER.AllowAttrs(*TAGS[element]).OnElements(element)\n\n# Allow safe attrs copied from lxml\nSANITIZER.AllowAttrs(*SAFE_ATTRS).Globally()\n\n# Allow styling globally\nSANITIZER.AllowAttrs(\"class\", \"style\").Globally()\n\n# Allow styling via bluemonday\nSANITIZER.AllowStyling()\n\n# Allow safe convenience functions from bluemonday\nSANITIZER.AllowStandardAttributes()\nSANITIZER.AllowStandardURLs()\n\n# Allow data atributes\nSANITIZER.AllowDataAttributes()\n\n# Allow data URI images\nSANITIZER.AllowDataURIImages()\n\n# Link security\nSANITIZER.AllowRelativeURLs(True)\nSANITIZER.RequireNoFollowOnFullyQualifiedLinks(True)\nSANITIZER.RequireNoFollowOnLinks(True)\nSANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True)\nSANITIZER.RequireNoReferrerOnLinks(True)\n\n\ndef sanitize_html(html):\n return SANITIZER.sanitize(html)\n", "path": "CTFd/utils/security/sanitize.py"}], "after_files": [{"content": "from pybluemonday import UGCPolicy\n\n# Copied from lxml:\n# https://github.com/lxml/lxml/blob/e986a9cb5d54827c59aefa8803bc90954d67221e/src/lxml/html/defs.py#L38\n# fmt: off\nSAFE_ATTRS = (\n 'abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align',\n 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff',\n 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan',\n 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype',\n 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id',\n 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',\n 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',\n 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape',\n 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',\n 'type', 'usemap', 'valign', 'value', 'vspace', 'width'\n)\n# fmt: on\n\nPAGE_STRUCTURE_TAGS = {\n \"title\": [],\n}\n\nMETA_TAGS = {\n \"meta\": [\"name\", \"content\", \"property\"],\n}\n\nFORM_TAGS = {\n \"form\": [\"method\", \"action\"],\n \"button\": [\"name\", \"type\", \"value\", \"disabled\"],\n \"input\": [\"name\", \"type\", \"value\", \"placeholder\"],\n \"select\": [\"name\", \"value\", \"placeholder\"],\n \"option\": [\"value\"],\n \"textarea\": [\"name\", \"value\", \"placeholder\"],\n \"label\": [\"for\"],\n}\n\nANNOYING_TAGS = {\n \"blink\": [],\n \"marquee\": [],\n}\n\n\nMEDIA_TAGS = {\n \"audio\": [\"autoplay\", \"controls\", \"crossorigin\", \"loop\", \"muted\", \"preload\", \"src\"],\n \"video\": [\n \"autoplay\",\n \"buffered\",\n \"controls\",\n \"crossorigin\",\n \"loop\",\n \"muted\",\n \"playsinline\",\n \"poster\",\n \"preload\",\n \"src\",\n ],\n \"source\": [\"src\", \"type\"],\n \"iframe\": [\"width\", \"height\", \"src\", \"frameborder\", \"allow\", \"allowfullscreen\"],\n}\n\nSANITIZER = UGCPolicy()\n\nfor TAGS in (PAGE_STRUCTURE_TAGS, META_TAGS, FORM_TAGS, ANNOYING_TAGS, MEDIA_TAGS):\n for element in TAGS:\n SANITIZER.AllowElements(element)\n SANITIZER.AllowAttrs(*TAGS[element]).OnElements(element)\n\n# Allow safe attrs copied from lxml\nSANITIZER.AllowAttrs(*SAFE_ATTRS).Globally()\n\n# Allow styling globally\nSANITIZER.AllowAttrs(\"class\", \"style\").Globally()\n\n# Allow styling via bluemonday\nSANITIZER.AllowStyling()\n\n# Allow safe convenience functions from bluemonday\nSANITIZER.AllowStandardAttributes()\nSANITIZER.AllowStandardURLs()\n\n# Allow data atributes\nSANITIZER.AllowDataAttributes()\n\n# Allow data URI images\nSANITIZER.AllowDataURIImages()\n\n# Link security\nSANITIZER.AllowRelativeURLs(True)\nSANITIZER.RequireNoFollowOnFullyQualifiedLinks(True)\nSANITIZER.RequireNoFollowOnLinks(True)\nSANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True)\nSANITIZER.RequireNoReferrerOnLinks(True)\n\n# Allow Comments\nSANITIZER.AllowComments()\n\n\ndef sanitize_html(html):\n return SANITIZER.sanitize(html)\n", "path": "CTFd/utils/security/sanitize.py"}]} | 1,353 | 108 |
gh_patches_debug_21547 | rasdani/github-patches | git_diff | fidals__shopelectro-421 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong QuerySet in RobotsView stb2
Currently it looks so `CustomPage.objects.filter(slug='robots')` and should be `.filter(slug='robots.txt')`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/urls.py`
Content:
```
1 from collections import OrderedDict
2
3 from django.conf import settings
4 from django.conf.urls import url, include
5 from django.conf.urls.static import static
6 from django.contrib.sitemaps.views import sitemap
7 from django.views.decorators.cache import cache_page
8
9 from pages.views import RobotsView, SitemapPage
10 from pages.urls import custom_page_url
11
12 from shopelectro import sitemaps, config, views
13 from shopelectro.admin import se_admin
14
15 # Orders sitemaps instances
16 sitemaps = OrderedDict([
17 ('index', sitemaps.IndexSitemap),
18 ('category', sitemaps.CategorySitemap),
19 ('category-with-tags', sitemaps.CategoryWithTagsSitemap),
20 ('products', sitemaps.ProductSitemap),
21 ('site', sitemaps.PagesSitemap)
22 ])
23
24 # disable cache
25 if settings.DEBUG:
26 def cache_page(arg): # Ignore PyFlakesBear
27 if callable(arg):
28 return arg
29 return cache_page
30
31 cached_60d = cache_page(config.cached_time(days=60))
32 cached_2h = cache_page(config.cached_time(hours=2))
33
34 admin_urls = [
35 url(r'^', se_admin.urls),
36 url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),
37 url(r'^get-tree-items/$', views.Tree.as_view()),
38 url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),
39 url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),
40 url(r'^select2/', include('django_select2.urls')),
41 ]
42
43 catalog_urls = [
44 # "category" group
45 url(r'^categories/(?P<slug>[\w-]+)/$',
46 cached_2h(views.CategoryPage.as_view()), name='category'),
47 url(r'^categories/(?P<slug>[\w-]+)/tags/(?P<tags>[\w-]+)/$',
48 cached_2h(views.CategoryPage.as_view()), name='category'),
49 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/$',
50 views.CategoryPage.as_view(), name='category'),
51 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w-]+)/$',
52 views.CategoryPage.as_view(), name='category'),
53 # "load more" group
54 url(r'categories/(?P<category_slug>[\w-]+)/load-more/'
55 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',
56 views.load_more, name='load_more'),
57 url(r'categories/(?P<category_slug>[\w-]+)/load-more/'
58 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w-]+)/$',
59 views.load_more, name='load_more'),
60 # rest of urls
61 url(r'^no-images/$', views.ProductsWithoutImages.as_view(),
62 name='products_without_images'),
63 url(r'^no-text/$', views.ProductsWithoutText.as_view(),
64 name='products_without_text'),
65 url(r'^products/(?P<product_vendor_code>[0-9]+)/$',
66 views.ProductPage.as_view(), name='product'),
67 ]
68
69 service_urls = [
70 url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),
71 url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),
72 url(r'^ya-feedback/redirect/$',
73 views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),
74 url(r'^ya-feedback/request/$',
75 views.ya_feedback_request, name='ya_feedback_request'),
76 ]
77
78 search_urls = [
79 url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),
80 ]
81
82 ecommerce_urls = [
83 url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),
84 url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),
85 url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),
86 url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),
87 url(r'^order-call/$', views.order_call),
88 url(r'^one-click-buy/$', views.one_click_buy),
89 url(r'^yandex-order/$', views.YandexOrder.as_view()),
90 url(r'', include('ecommerce.urls')),
91 ]
92
93 custom_pages = [
94 custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),
95 custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view(in_db=True)),
96 custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
97 custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),
98 custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),
99 custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),
100 custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),
101 ]
102
103 urlpatterns = [
104 url('', include(custom_pages)),
105 url(r'^admin/', include(admin_urls)),
106 url(r'^catalog/', include(catalog_urls)),
107 url(r'^pages/', include('pages.urls')),
108 url(r'^save-feedback/$', views.save_feedback),
109 url(r'^delete-feedback/$', views.delete_feedback),
110 url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),
111 url(r'^shop/', include(ecommerce_urls)),
112 url(r'^search/', include(search_urls)),
113 url(r'^service/', include(service_urls)),
114 url(r'^sitemap\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),
115 ]
116
117 if settings.DEBUG:
118 import debug_toolbar
119
120 urlpatterns += [
121 url(r'^__debug__/', include(debug_toolbar.urls)),
122 *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
123 *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
124 ]
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/urls.py b/shopelectro/urls.py
--- a/shopelectro/urls.py
+++ b/shopelectro/urls.py
@@ -6,6 +6,7 @@
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page
+from pages.models import CustomPage
from pages.views import RobotsView, SitemapPage
from pages.urls import custom_page_url
@@ -92,7 +93,10 @@
custom_pages = [
custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),
- custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view(in_db=True)),
+ custom_page_url(
+ r'^(?P<page>robots\.txt)$',
+ RobotsView.as_view(in_db=True, objects=CustomPage.objects.filter(slug='robots.txt'))
+ ),
custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),
custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),
| {"golden_diff": "diff --git a/shopelectro/urls.py b/shopelectro/urls.py\n--- a/shopelectro/urls.py\n+++ b/shopelectro/urls.py\n@@ -6,6 +6,7 @@\n from django.contrib.sitemaps.views import sitemap\n from django.views.decorators.cache import cache_page\n \n+from pages.models import CustomPage\n from pages.views import RobotsView, SitemapPage\n from pages.urls import custom_page_url\n \n@@ -92,7 +93,10 @@\n \n custom_pages = [\n custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),\n- custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view(in_db=True)),\n+ custom_page_url(\n+ r'^(?P<page>robots\\.txt)$',\n+ RobotsView.as_view(in_db=True, objects=CustomPage.objects.filter(slug='robots.txt'))\n+ ),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n", "issue": "Wrong QuerySet in RobotsView stb2\nCurrently it looks so `CustomPage.objects.filter(slug='robots')` and should be `.filter(slug='robots.txt')`\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, config, views\nfrom shopelectro.admin import se_admin\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(config.cached_time(days=60))\ncached_2h = cache_page(config.cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<category_slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<category_slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view(in_db=True)),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page\n\nfrom pages.models import CustomPage\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, config, views\nfrom shopelectro.admin import se_admin\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(config.cached_time(days=60))\ncached_2h = cache_page(config.cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<category_slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<category_slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),\n custom_page_url(\n r'^(?P<page>robots\\.txt)$',\n RobotsView.as_view(in_db=True, objects=CustomPage.objects.filter(slug='robots.txt'))\n ),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}]} | 1,886 | 277 |
gh_patches_debug_25269 | rasdani/github-patches | git_diff | pre-commit__pre-commit-493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Encode some sort of "version" for language-local repositories
Without this, they'll never get upgraded if fixes are made in the pre-commit empty template
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/store.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import os.path
7 import sqlite3
8 import tempfile
9
10 from cached_property import cached_property
11
12 from pre_commit.prefixed_command_runner import PrefixedCommandRunner
13 from pre_commit.util import clean_path_on_failure
14 from pre_commit.util import cmd_output
15 from pre_commit.util import copy_tree_to_path
16 from pre_commit.util import cwd
17 from pre_commit.util import no_git_env
18 from pre_commit.util import resource_filename
19
20
21 logger = logging.getLogger('pre_commit')
22
23
24 def _get_default_directory():
25 """Returns the default directory for the Store. This is intentionally
26 underscored to indicate that `Store.get_default_directory` is the intended
27 way to get this information. This is also done so
28 `Store.get_default_directory` can be mocked in tests and
29 `_get_default_directory` can be tested.
30 """
31 return os.environ.get(
32 'PRE_COMMIT_HOME',
33 os.path.join(os.path.expanduser('~'), '.pre-commit'),
34 )
35
36
37 class Store(object):
38 get_default_directory = staticmethod(_get_default_directory)
39
40 def __init__(self, directory=None):
41 if directory is None:
42 directory = self.get_default_directory()
43
44 self.directory = directory
45 self.__created = False
46
47 def _write_readme(self):
48 with io.open(os.path.join(self.directory, 'README'), 'w') as readme:
49 readme.write(
50 'This directory is maintained by the pre-commit project.\n'
51 'Learn more: https://github.com/pre-commit/pre-commit\n'
52 )
53
54 def _write_sqlite_db(self):
55 # To avoid a race where someone ^Cs between db creation and execution
56 # of the CREATE TABLE statement
57 fd, tmpfile = tempfile.mkstemp(dir=self.directory)
58 # We'll be managing this file ourselves
59 os.close(fd)
60 # sqlite doesn't close its fd with its contextmanager >.<
61 # contextlib.closing fixes this.
62 # See: http://stackoverflow.com/a/28032829/812183
63 with contextlib.closing(sqlite3.connect(tmpfile)) as db:
64 db.executescript(
65 'CREATE TABLE repos ('
66 ' repo CHAR(255) NOT NULL,'
67 ' ref CHAR(255) NOT NULL,'
68 ' path CHAR(255) NOT NULL,'
69 ' PRIMARY KEY (repo, ref)'
70 ');'
71 )
72
73 # Atomic file move
74 os.rename(tmpfile, self.db_path)
75
76 def _create(self):
77 if os.path.exists(self.db_path):
78 return
79 if not os.path.exists(self.directory):
80 os.makedirs(self.directory)
81 self._write_readme()
82 self._write_sqlite_db()
83
84 def require_created(self):
85 """Require the pre-commit file store to be created."""
86 if not self.__created:
87 self._create()
88 self.__created = True
89
90 def _new_repo(self, repo, ref, make_strategy):
91 self.require_created()
92
93 # Check if we already exist
94 with sqlite3.connect(self.db_path) as db:
95 result = db.execute(
96 'SELECT path FROM repos WHERE repo = ? AND ref = ?',
97 [repo, ref],
98 ).fetchone()
99 if result:
100 return result[0]
101
102 logger.info('Initializing environment for {}.'.format(repo))
103
104 directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)
105 with clean_path_on_failure(directory):
106 make_strategy(directory)
107
108 # Update our db with the created repo
109 with sqlite3.connect(self.db_path) as db:
110 db.execute(
111 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',
112 [repo, ref, directory],
113 )
114 return directory
115
116 def clone(self, repo, ref):
117 """Clone the given url and checkout the specific ref."""
118 def clone_strategy(directory):
119 cmd_output(
120 'git', 'clone', '--no-checkout', repo, directory,
121 env=no_git_env(),
122 )
123 with cwd(directory):
124 cmd_output('git', 'reset', ref, '--hard', env=no_git_env())
125
126 return self._new_repo(repo, ref, clone_strategy)
127
128 def make_local(self, deps):
129 def make_local_strategy(directory):
130 copy_tree_to_path(resource_filename('empty_template'), directory)
131 return self._new_repo(
132 'local:{}'.format(','.join(sorted(deps))), 'N/A',
133 make_local_strategy,
134 )
135
136 @cached_property
137 def cmd_runner(self):
138 return PrefixedCommandRunner(self.directory)
139
140 @cached_property
141 def db_path(self):
142 return os.path.join(self.directory, 'db.db')
143
```
Path: `pre_commit/constants.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import pkg_resources
4
5 CONFIG_FILE = '.pre-commit-config.yaml'
6
7 # In 0.12.0, the default file was changed to be namespaced
8 MANIFEST_FILE = '.pre-commit-hooks.yaml'
9 MANIFEST_FILE_LEGACY = 'hooks.yaml'
10
11 YAML_DUMP_KWARGS = {
12 'default_flow_style': False,
13 # Use unicode
14 'encoding': None,
15 'indent': 4,
16 }
17
18 # Bump when installation changes in a backwards / forwards incompatible way
19 INSTALLED_STATE_VERSION = '1'
20
21 VERSION = pkg_resources.get_distribution('pre-commit').version
22 VERSION_PARSED = pkg_resources.parse_version(VERSION)
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/constants.py b/pre_commit/constants.py
--- a/pre_commit/constants.py
+++ b/pre_commit/constants.py
@@ -17,6 +17,8 @@
# Bump when installation changes in a backwards / forwards incompatible way
INSTALLED_STATE_VERSION = '1'
+# Bump when modifying `empty_template`
+LOCAL_REPO_VERSION = '1'
VERSION = pkg_resources.get_distribution('pre-commit').version
VERSION_PARSED = pkg_resources.parse_version(VERSION)
diff --git a/pre_commit/store.py b/pre_commit/store.py
--- a/pre_commit/store.py
+++ b/pre_commit/store.py
@@ -9,6 +9,7 @@
from cached_property import cached_property
+import pre_commit.constants as C
from pre_commit.prefixed_command_runner import PrefixedCommandRunner
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output
@@ -129,7 +130,7 @@
def make_local_strategy(directory):
copy_tree_to_path(resource_filename('empty_template'), directory)
return self._new_repo(
- 'local:{}'.format(','.join(sorted(deps))), 'N/A',
+ 'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,
make_local_strategy,
)
| {"golden_diff": "diff --git a/pre_commit/constants.py b/pre_commit/constants.py\n--- a/pre_commit/constants.py\n+++ b/pre_commit/constants.py\n@@ -17,6 +17,8 @@\n \n # Bump when installation changes in a backwards / forwards incompatible way\n INSTALLED_STATE_VERSION = '1'\n+# Bump when modifying `empty_template`\n+LOCAL_REPO_VERSION = '1'\n \n VERSION = pkg_resources.get_distribution('pre-commit').version\n VERSION_PARSED = pkg_resources.parse_version(VERSION)\ndiff --git a/pre_commit/store.py b/pre_commit/store.py\n--- a/pre_commit/store.py\n+++ b/pre_commit/store.py\n@@ -9,6 +9,7 @@\n \n from cached_property import cached_property\n \n+import pre_commit.constants as C\n from pre_commit.prefixed_command_runner import PrefixedCommandRunner\n from pre_commit.util import clean_path_on_failure\n from pre_commit.util import cmd_output\n@@ -129,7 +130,7 @@\n def make_local_strategy(directory):\n copy_tree_to_path(resource_filename('empty_template'), directory)\n return self._new_repo(\n- 'local:{}'.format(','.join(sorted(deps))), 'N/A',\n+ 'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,\n make_local_strategy,\n )\n", "issue": "Encode some sort of \"version\" for language-local repositories\nWithout this, they'll never get upgraded if fixes are made in the pre-commit empty template\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport sqlite3\nimport tempfile\n\nfrom cached_property import cached_property\n\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import copy_tree_to_path\nfrom pre_commit.util import cwd\nfrom pre_commit.util import no_git_env\nfrom pre_commit.util import resource_filename\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory():\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get(\n 'PRE_COMMIT_HOME',\n os.path.join(os.path.expanduser('~'), '.pre-commit'),\n )\n\n\nclass Store(object):\n get_default_directory = staticmethod(_get_default_directory)\n\n def __init__(self, directory=None):\n if directory is None:\n directory = self.get_default_directory()\n\n self.directory = directory\n self.__created = False\n\n def _write_readme(self):\n with io.open(os.path.join(self.directory, 'README'), 'w') as readme:\n readme.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n'\n )\n\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: http://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(tmpfile)) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo CHAR(255) NOT NULL,'\n ' ref CHAR(255) NOT NULL,'\n ' path CHAR(255) NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');'\n )\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n def _create(self):\n if os.path.exists(self.db_path):\n return\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n self._write_readme()\n self._write_sqlite_db()\n\n def require_created(self):\n \"\"\"Require the pre-commit file store to be created.\"\"\"\n if not self.__created:\n self._create()\n self.__created = True\n\n def _new_repo(self, repo, ref, make_strategy):\n self.require_created()\n\n # Check if we already exist\n with sqlite3.connect(self.db_path) as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n [repo, ref],\n ).fetchone()\n if result:\n return result[0]\n\n logger.info('Initializing environment for {}.'.format(repo))\n\n directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(directory):\n make_strategy(directory)\n\n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [repo, ref, directory],\n )\n return directory\n\n def clone(self, repo, ref):\n \"\"\"Clone the given url and checkout the specific ref.\"\"\"\n def clone_strategy(directory):\n cmd_output(\n 'git', 'clone', '--no-checkout', repo, directory,\n env=no_git_env(),\n )\n with cwd(directory):\n cmd_output('git', 'reset', ref, '--hard', env=no_git_env())\n\n return self._new_repo(repo, ref, clone_strategy)\n\n def make_local(self, deps):\n def make_local_strategy(directory):\n copy_tree_to_path(resource_filename('empty_template'), directory)\n return self._new_repo(\n 'local:{}'.format(','.join(sorted(deps))), 'N/A',\n make_local_strategy,\n )\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.directory)\n\n @cached_property\n def db_path(self):\n return os.path.join(self.directory, 'db.db')\n", "path": "pre_commit/store.py"}, {"content": "from __future__ import unicode_literals\n\nimport pkg_resources\n\nCONFIG_FILE = '.pre-commit-config.yaml'\n\n# In 0.12.0, the default file was changed to be namespaced\nMANIFEST_FILE = '.pre-commit-hooks.yaml'\nMANIFEST_FILE_LEGACY = 'hooks.yaml'\n\nYAML_DUMP_KWARGS = {\n 'default_flow_style': False,\n # Use unicode\n 'encoding': None,\n 'indent': 4,\n}\n\n# Bump when installation changes in a backwards / forwards incompatible way\nINSTALLED_STATE_VERSION = '1'\n\nVERSION = pkg_resources.get_distribution('pre-commit').version\nVERSION_PARSED = pkg_resources.parse_version(VERSION)\n", "path": "pre_commit/constants.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport sqlite3\nimport tempfile\n\nfrom cached_property import cached_property\n\nimport pre_commit.constants as C\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import copy_tree_to_path\nfrom pre_commit.util import cwd\nfrom pre_commit.util import no_git_env\nfrom pre_commit.util import resource_filename\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory():\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get(\n 'PRE_COMMIT_HOME',\n os.path.join(os.path.expanduser('~'), '.pre-commit'),\n )\n\n\nclass Store(object):\n get_default_directory = staticmethod(_get_default_directory)\n\n def __init__(self, directory=None):\n if directory is None:\n directory = self.get_default_directory()\n\n self.directory = directory\n self.__created = False\n\n def _write_readme(self):\n with io.open(os.path.join(self.directory, 'README'), 'w') as readme:\n readme.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n'\n )\n\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: http://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(tmpfile)) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo CHAR(255) NOT NULL,'\n ' ref CHAR(255) NOT NULL,'\n ' path CHAR(255) NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');'\n )\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n def _create(self):\n if os.path.exists(self.db_path):\n return\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n self._write_readme()\n self._write_sqlite_db()\n\n def require_created(self):\n \"\"\"Require the pre-commit file store to be created.\"\"\"\n if not self.__created:\n self._create()\n self.__created = True\n\n def _new_repo(self, repo, ref, make_strategy):\n self.require_created()\n\n # Check if we already exist\n with sqlite3.connect(self.db_path) as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n [repo, ref],\n ).fetchone()\n if result:\n return result[0]\n\n logger.info('Initializing environment for {}.'.format(repo))\n\n directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(directory):\n make_strategy(directory)\n\n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [repo, ref, directory],\n )\n return directory\n\n def clone(self, repo, ref):\n \"\"\"Clone the given url and checkout the specific ref.\"\"\"\n def clone_strategy(directory):\n cmd_output(\n 'git', 'clone', '--no-checkout', repo, directory,\n env=no_git_env(),\n )\n with cwd(directory):\n cmd_output('git', 'reset', ref, '--hard', env=no_git_env())\n\n return self._new_repo(repo, ref, clone_strategy)\n\n def make_local(self, deps):\n def make_local_strategy(directory):\n copy_tree_to_path(resource_filename('empty_template'), directory)\n return self._new_repo(\n 'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,\n make_local_strategy,\n )\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.directory)\n\n @cached_property\n def db_path(self):\n return os.path.join(self.directory, 'db.db')\n", "path": "pre_commit/store.py"}, {"content": "from __future__ import unicode_literals\n\nimport pkg_resources\n\nCONFIG_FILE = '.pre-commit-config.yaml'\n\n# In 0.12.0, the default file was changed to be namespaced\nMANIFEST_FILE = '.pre-commit-hooks.yaml'\nMANIFEST_FILE_LEGACY = 'hooks.yaml'\n\nYAML_DUMP_KWARGS = {\n 'default_flow_style': False,\n # Use unicode\n 'encoding': None,\n 'indent': 4,\n}\n\n# Bump when installation changes in a backwards / forwards incompatible way\nINSTALLED_STATE_VERSION = '1'\n# Bump when modifying `empty_template`\nLOCAL_REPO_VERSION = '1'\n\nVERSION = pkg_resources.get_distribution('pre-commit').version\nVERSION_PARSED = pkg_resources.parse_version(VERSION)\n", "path": "pre_commit/constants.py"}]} | 1,834 | 275 |
gh_patches_debug_7905 | rasdani/github-patches | git_diff | coala__coala-591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
run_tests --html doesn't work in ubuntu
In ubuntu and debian systems, coverage isn't installed in `/usr/bin/coverage3` ... rather, it is installed as `/usr/bin/python3-coverage`. Hence, `--html` flag doesn't work. The corresponding launchpad bug is [here](https://bugs.launchpad.net/horizon/+bug/1241332)
It would be better to use `python3 -m coverage` as this is platform independent.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/misc/StringConstants.py`
Content:
```
1 import inspect
2 import os
3
4 from coalib.misc.i18n import _
5
6
7 class StringConstants:
8 THIS_IS_A_BUG = _("This is a bug. We are sorry for the inconvenience. "
9 "Please contact the developers for assistance.")
10
11 OBJ_NOT_ACCESSIBLE = _("{} is not accessible and will be ignored!")
12
13 TRUE_STRINGS = ['1',
14 "on",
15 'y',
16 'yes',
17 "yeah",
18 "sure",
19 'true',
20 'definitely',
21 'yup',
22 "right"]
23
24 FALSE_STRINGS = ['0',
25 'off',
26 'n',
27 'no',
28 'nope',
29 'nah',
30 'false',
31 "wrong"]
32
33 # This string contains many unicode characters to challenge tests.
34 COMPLEX_TEST_STRING = ("4 r34l ch4ll3n63: 123 ÄÖü ABc @€¥ §&% {[( ←↓→↑ "
35 "ĦŊħ ß°^ \\\n\u2192")
36
37 # Results from coverage for unittests are stored here.
38 COVERAGE_DIR = "./.coverageresults"
39
40 # Path to the coalib directory
41 coalib_root = os.path.join(os.path.dirname(inspect.getfile(_)),
42 os.path.pardir)
43
44 # Path to the directory containing the default bears
45 coalib_bears_root = os.path.join(coalib_root, os.path.pardir, "bears")
46
47 # Path to the language definition files
48 language_definitions = os.path.join(coalib_root,
49 "bearlib",
50 "languages",
51 "definitions")
52
53 system_coafile = os.path.join(coalib_root, "default_coafile")
54
55 user_coafile = os.path.join(os.path.expanduser("~"), ".coarc")
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/misc/StringConstants.py b/coalib/misc/StringConstants.py
--- a/coalib/misc/StringConstants.py
+++ b/coalib/misc/StringConstants.py
@@ -1,5 +1,6 @@
import inspect
import os
+import sys
from coalib.misc.i18n import _
@@ -53,3 +54,8 @@
system_coafile = os.path.join(coalib_root, "default_coafile")
user_coafile = os.path.join(os.path.expanduser("~"), ".coarc")
+
+ if sys.platform.startswith("win"): # pragma: no cover
+ python_executable = "python"
+ else:
+ python_executable = "python3"
| {"golden_diff": "diff --git a/coalib/misc/StringConstants.py b/coalib/misc/StringConstants.py\n--- a/coalib/misc/StringConstants.py\n+++ b/coalib/misc/StringConstants.py\n@@ -1,5 +1,6 @@\n import inspect\n import os\n+import sys\n \n from coalib.misc.i18n import _\n \n@@ -53,3 +54,8 @@\n system_coafile = os.path.join(coalib_root, \"default_coafile\")\n \n user_coafile = os.path.join(os.path.expanduser(\"~\"), \".coarc\")\n+\n+ if sys.platform.startswith(\"win\"): # pragma: no cover\n+ python_executable = \"python\"\n+ else:\n+ python_executable = \"python3\"\n", "issue": "run_tests --html doesn't work in ubuntu\nIn ubuntu and debian systems, coverage isn't installed in `/usr/bin/coverage3` ... rather, it is installed as `/usr/bin/python3-coverage`. Hence, `--html` flag doesn't work. The corresponding launchpad bug is [here](https://bugs.launchpad.net/horizon/+bug/1241332)\n\nIt would be better to use `python3 -m coverage` as this is platform independent.\n\n", "before_files": [{"content": "import inspect\nimport os\n\nfrom coalib.misc.i18n import _\n\n\nclass StringConstants:\n THIS_IS_A_BUG = _(\"This is a bug. We are sorry for the inconvenience. \"\n \"Please contact the developers for assistance.\")\n\n OBJ_NOT_ACCESSIBLE = _(\"{} is not accessible and will be ignored!\")\n\n TRUE_STRINGS = ['1',\n \"on\",\n 'y',\n 'yes',\n \"yeah\",\n \"sure\",\n 'true',\n 'definitely',\n 'yup',\n \"right\"]\n\n FALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nope',\n 'nah',\n 'false',\n \"wrong\"]\n\n # This string contains many unicode characters to challenge tests.\n COMPLEX_TEST_STRING = (\"4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 \"\n \"\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192\")\n\n # Results from coverage for unittests are stored here.\n COVERAGE_DIR = \"./.coverageresults\"\n\n # Path to the coalib directory\n coalib_root = os.path.join(os.path.dirname(inspect.getfile(_)),\n os.path.pardir)\n\n # Path to the directory containing the default bears\n coalib_bears_root = os.path.join(coalib_root, os.path.pardir, \"bears\")\n\n # Path to the language definition files\n language_definitions = os.path.join(coalib_root,\n \"bearlib\",\n \"languages\",\n \"definitions\")\n\n system_coafile = os.path.join(coalib_root, \"default_coafile\")\n\n user_coafile = os.path.join(os.path.expanduser(\"~\"), \".coarc\")\n", "path": "coalib/misc/StringConstants.py"}], "after_files": [{"content": "import inspect\nimport os\nimport sys\n\nfrom coalib.misc.i18n import _\n\n\nclass StringConstants:\n THIS_IS_A_BUG = _(\"This is a bug. We are sorry for the inconvenience. \"\n \"Please contact the developers for assistance.\")\n\n OBJ_NOT_ACCESSIBLE = _(\"{} is not accessible and will be ignored!\")\n\n TRUE_STRINGS = ['1',\n \"on\",\n 'y',\n 'yes',\n \"yeah\",\n \"sure\",\n 'true',\n 'definitely',\n 'yup',\n \"right\"]\n\n FALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nope',\n 'nah',\n 'false',\n \"wrong\"]\n\n # This string contains many unicode characters to challenge tests.\n COMPLEX_TEST_STRING = (\"4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 \"\n \"\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192\")\n\n # Results from coverage for unittests are stored here.\n COVERAGE_DIR = \"./.coverageresults\"\n\n # Path to the coalib directory\n coalib_root = os.path.join(os.path.dirname(inspect.getfile(_)),\n os.path.pardir)\n\n # Path to the directory containing the default bears\n coalib_bears_root = os.path.join(coalib_root, os.path.pardir, \"bears\")\n\n # Path to the language definition files\n language_definitions = os.path.join(coalib_root,\n \"bearlib\",\n \"languages\",\n \"definitions\")\n\n system_coafile = os.path.join(coalib_root, \"default_coafile\")\n\n user_coafile = os.path.join(os.path.expanduser(\"~\"), \".coarc\")\n\n if sys.platform.startswith(\"win\"): # pragma: no cover\n python_executable = \"python\"\n else:\n python_executable = \"python3\"\n", "path": "coalib/misc/StringConstants.py"}]} | 866 | 163 |
gh_patches_debug_33170 | rasdani/github-patches | git_diff | kedro-org__kedro-1838 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tracking.JSONDataSet documentation and docstring are not accurate
Here is the source code for `tracking.JSONDataSet`: https://kedro.readthedocs.io/en/latest/_modules/kedro/extras/datasets/tracking/json_dataset.html#JSONDataSet
The `_load` method has been overriden to raise a `DataSetError` when called.
The documentation does not reflect that well. What has to be fixed:
- The `tracking.JSONDataSet` documentation and API ref should mention that this dataset cannot be loaded (see https://kedro.readthedocs.io/en/latest/kedro.extras.datasets.tracking.JSONDataSet.html)
- The `tracking.JSONDataSet` class docstring should not call `data_set.load()` (this code snippet that was probably copied from the `json.JSONDataSet`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/extras/datasets/tracking/json_dataset.py`
Content:
```
1 """``JSONDataSet`` saves data to a JSON file using an underlying
2 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.
3 The ``JSONDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default.
4 """
5 from typing import NoReturn
6
7 from kedro.extras.datasets.json import JSONDataSet as JDS
8 from kedro.io.core import DataSetError
9
10
11 class JSONDataSet(JDS):
12 """``JSONDataSet`` saves data to a JSON file using an underlying
13 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.
14 The ``JSONDataSet`` is part of Kedro Experiment Tracking.
15 The dataset is versioned by default.
16
17 Example:
18 ::
19
20 >>> from kedro.extras.datasets.tracking import JSONDataSet
21 >>>
22 >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}
23 >>>
24 >>> # data_set = JSONDataSet(filepath="gcs://bucket/test.json")
25 >>> data_set = JSONDataSet(filepath="test.json")
26 >>> data_set.save(data)
27 >>> reloaded = data_set.load()
28 >>> assert data == reloaded
29
30 """
31
32 versioned = True
33
34 def _load(self) -> NoReturn:
35 raise DataSetError(f"Loading not supported for '{self.__class__.__name__}'")
36
```
Path: `kedro/extras/datasets/tracking/metrics_dataset.py`
Content:
```
1 """``MetricsDataSet`` saves data to a JSON file using an underlying
2 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.
3 The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default
4 and only takes metrics of numeric values.
5 """
6 import json
7 from typing import Dict, NoReturn
8
9 from kedro.extras.datasets.json import JSONDataSet
10 from kedro.io.core import DataSetError, get_filepath_str
11
12
13 class MetricsDataSet(JSONDataSet):
14 """``MetricsDataSet`` saves data to a JSON file using an underlying
15 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.
16 The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default
17 and only takes metrics of numeric values.
18
19 Example:
20 ::
21
22 >>> from kedro.extras.datasets.tracking import MetricsDataSet
23 >>>
24 >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}
25 >>>
26 >>> # data_set = MetricsDataSet(filepath="gcs://bucket/test.json")
27 >>> data_set = MetricsDataSet(filepath="test.json")
28 >>> data_set.save(data)
29 >>> reloaded = data_set.load()
30 >>> assert data == reloaded
31
32 """
33
34 versioned = True
35
36 def _load(self) -> NoReturn:
37 raise DataSetError(f"Loading not supported for '{self.__class__.__name__}'")
38
39 def _save(self, data: Dict[str, float]) -> None:
40 """Converts all values in the data from a ``MetricsDataSet`` to float to make sure
41 they are numeric values which can be displayed in Kedro Viz and then saves the dataset.
42 """
43 try:
44 for key, value in data.items():
45 data[key] = float(value)
46 except ValueError as exc:
47 raise DataSetError(
48 f"The MetricsDataSet expects only numeric values. {exc}"
49 ) from exc
50
51 save_path = get_filepath_str(self._get_save_path(), self._protocol)
52
53 with self._fs.open(save_path, **self._fs_open_args_save) as fs_file:
54 json.dump(data, fs_file, **self._save_args)
55
56 self._invalidate_cache()
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kedro/extras/datasets/tracking/json_dataset.py b/kedro/extras/datasets/tracking/json_dataset.py
--- a/kedro/extras/datasets/tracking/json_dataset.py
+++ b/kedro/extras/datasets/tracking/json_dataset.py
@@ -12,7 +12,7 @@
"""``JSONDataSet`` saves data to a JSON file using an underlying
filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.
The ``JSONDataSet`` is part of Kedro Experiment Tracking.
- The dataset is versioned by default.
+ The dataset is write-only and it is versioned by default.
Example:
::
@@ -24,8 +24,6 @@
>>> # data_set = JSONDataSet(filepath="gcs://bucket/test.json")
>>> data_set = JSONDataSet(filepath="test.json")
>>> data_set.save(data)
- >>> reloaded = data_set.load()
- >>> assert data == reloaded
"""
diff --git a/kedro/extras/datasets/tracking/metrics_dataset.py b/kedro/extras/datasets/tracking/metrics_dataset.py
--- a/kedro/extras/datasets/tracking/metrics_dataset.py
+++ b/kedro/extras/datasets/tracking/metrics_dataset.py
@@ -12,9 +12,9 @@
class MetricsDataSet(JSONDataSet):
"""``MetricsDataSet`` saves data to a JSON file using an underlying
- filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.
- The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default
- and only takes metrics of numeric values.
+ filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. The
+ ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is write-only,
+ it is versioned by default and only takes metrics of numeric values.
Example:
::
@@ -26,8 +26,6 @@
>>> # data_set = MetricsDataSet(filepath="gcs://bucket/test.json")
>>> data_set = MetricsDataSet(filepath="test.json")
>>> data_set.save(data)
- >>> reloaded = data_set.load()
- >>> assert data == reloaded
"""
| {"golden_diff": "diff --git a/kedro/extras/datasets/tracking/json_dataset.py b/kedro/extras/datasets/tracking/json_dataset.py\n--- a/kedro/extras/datasets/tracking/json_dataset.py\n+++ b/kedro/extras/datasets/tracking/json_dataset.py\n@@ -12,7 +12,7 @@\n \"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n The ``JSONDataSet`` is part of Kedro Experiment Tracking.\n- The dataset is versioned by default.\n+ The dataset is write-only and it is versioned by default.\n \n Example:\n ::\n@@ -24,8 +24,6 @@\n >>> # data_set = JSONDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = JSONDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n- >>> reloaded = data_set.load()\n- >>> assert data == reloaded\n \n \"\"\"\n \ndiff --git a/kedro/extras/datasets/tracking/metrics_dataset.py b/kedro/extras/datasets/tracking/metrics_dataset.py\n--- a/kedro/extras/datasets/tracking/metrics_dataset.py\n+++ b/kedro/extras/datasets/tracking/metrics_dataset.py\n@@ -12,9 +12,9 @@\n \n class MetricsDataSet(JSONDataSet):\n \"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\n- filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n- The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default\n- and only takes metrics of numeric values.\n+ filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. The\n+ ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is write-only,\n+ it is versioned by default and only takes metrics of numeric values.\n \n Example:\n ::\n@@ -26,8 +26,6 @@\n >>> # data_set = MetricsDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = MetricsDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n- >>> reloaded = data_set.load()\n- >>> assert data == reloaded\n \n \"\"\"\n", "issue": "tracking.JSONDataSet documentation and docstring are not accurate\nHere is the source code for `tracking.JSONDataSet`: https://kedro.readthedocs.io/en/latest/_modules/kedro/extras/datasets/tracking/json_dataset.html#JSONDataSet\r\n\r\nThe `_load` method has been overriden to raise a `DataSetError` when called.\r\n\r\nThe documentation does not reflect that well. What has to be fixed:\r\n\r\n- The `tracking.JSONDataSet` documentation and API ref should mention that this dataset cannot be loaded (see https://kedro.readthedocs.io/en/latest/kedro.extras.datasets.tracking.JSONDataSet.html)\r\n- The `tracking.JSONDataSet` class docstring should not call `data_set.load()` (this code snippet that was probably copied from the `json.JSONDataSet`)\n", "before_files": [{"content": "\"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\nThe ``JSONDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default.\n\"\"\"\nfrom typing import NoReturn\n\nfrom kedro.extras.datasets.json import JSONDataSet as JDS\nfrom kedro.io.core import DataSetError\n\n\nclass JSONDataSet(JDS):\n \"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n The ``JSONDataSet`` is part of Kedro Experiment Tracking.\n The dataset is versioned by default.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.tracking import JSONDataSet\n >>>\n >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}\n >>>\n >>> # data_set = JSONDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = JSONDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data == reloaded\n\n \"\"\"\n\n versioned = True\n\n def _load(self) -> NoReturn:\n raise DataSetError(f\"Loading not supported for '{self.__class__.__name__}'\")\n", "path": "kedro/extras/datasets/tracking/json_dataset.py"}, {"content": "\"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\nThe ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default\nand only takes metrics of numeric values.\n\"\"\"\nimport json\nfrom typing import Dict, NoReturn\n\nfrom kedro.extras.datasets.json import JSONDataSet\nfrom kedro.io.core import DataSetError, get_filepath_str\n\n\nclass MetricsDataSet(JSONDataSet):\n \"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default\n and only takes metrics of numeric values.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.tracking import MetricsDataSet\n >>>\n >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}\n >>>\n >>> # data_set = MetricsDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = MetricsDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data == reloaded\n\n \"\"\"\n\n versioned = True\n\n def _load(self) -> NoReturn:\n raise DataSetError(f\"Loading not supported for '{self.__class__.__name__}'\")\n\n def _save(self, data: Dict[str, float]) -> None:\n \"\"\"Converts all values in the data from a ``MetricsDataSet`` to float to make sure\n they are numeric values which can be displayed in Kedro Viz and then saves the dataset.\n \"\"\"\n try:\n for key, value in data.items():\n data[key] = float(value)\n except ValueError as exc:\n raise DataSetError(\n f\"The MetricsDataSet expects only numeric values. {exc}\"\n ) from exc\n\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n with self._fs.open(save_path, **self._fs_open_args_save) as fs_file:\n json.dump(data, fs_file, **self._save_args)\n\n self._invalidate_cache()\n", "path": "kedro/extras/datasets/tracking/metrics_dataset.py"}], "after_files": [{"content": "\"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\nThe ``JSONDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default.\n\"\"\"\nfrom typing import NoReturn\n\nfrom kedro.extras.datasets.json import JSONDataSet as JDS\nfrom kedro.io.core import DataSetError\n\n\nclass JSONDataSet(JDS):\n \"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n The ``JSONDataSet`` is part of Kedro Experiment Tracking.\n The dataset is write-only and it is versioned by default.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.tracking import JSONDataSet\n >>>\n >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}\n >>>\n >>> # data_set = JSONDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = JSONDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n\n \"\"\"\n\n versioned = True\n\n def _load(self) -> NoReturn:\n raise DataSetError(f\"Loading not supported for '{self.__class__.__name__}'\")\n", "path": "kedro/extras/datasets/tracking/json_dataset.py"}, {"content": "\"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\nThe ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default\nand only takes metrics of numeric values.\n\"\"\"\nimport json\nfrom typing import Dict, NoReturn\n\nfrom kedro.extras.datasets.json import JSONDataSet\nfrom kedro.io.core import DataSetError, get_filepath_str\n\n\nclass MetricsDataSet(JSONDataSet):\n \"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. The\n ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is write-only,\n it is versioned by default and only takes metrics of numeric values.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.tracking import MetricsDataSet\n >>>\n >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}\n >>>\n >>> # data_set = MetricsDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = MetricsDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n\n \"\"\"\n\n versioned = True\n\n def _load(self) -> NoReturn:\n raise DataSetError(f\"Loading not supported for '{self.__class__.__name__}'\")\n\n def _save(self, data: Dict[str, float]) -> None:\n \"\"\"Converts all values in the data from a ``MetricsDataSet`` to float to make sure\n they are numeric values which can be displayed in Kedro Viz and then saves the dataset.\n \"\"\"\n try:\n for key, value in data.items():\n data[key] = float(value)\n except ValueError as exc:\n raise DataSetError(\n f\"The MetricsDataSet expects only numeric values. {exc}\"\n ) from exc\n\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n with self._fs.open(save_path, **self._fs_open_args_save) as fs_file:\n json.dump(data, fs_file, **self._save_args)\n\n self._invalidate_cache()\n", "path": "kedro/extras/datasets/tracking/metrics_dataset.py"}]} | 1,440 | 525 |
gh_patches_debug_29307 | rasdani/github-patches | git_diff | shuup__shuup-1557 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin: Search doesn't work well with categories
Admin > Search > Search for a category
Why does it say "Products" when it's clearly showing categories?
Deleted categories are also visible in search.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shuup/admin/modules/categories/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
5 #
6 # This source code is licensed under the OSL-3.0 license found in the
7 # LICENSE file in the root directory of this source tree.
8 import six
9 from django.db.models import Q
10 from django.utils.translation import ugettext_lazy as _
11 from filer.models import File
12
13 from shuup.admin.base import AdminModule, MenuEntry, SearchResult
14 from shuup.admin.menu import PRODUCTS_MENU_CATEGORY
15 from shuup.admin.utils.permissions import get_default_model_permissions
16 from shuup.admin.utils.urls import (
17 admin_url, derive_model_url, get_edit_and_list_urls, get_model_url
18 )
19 from shuup.admin.views.home import HelpBlockCategory, SimpleHelpBlock
20 from shuup.core.models import Category
21
22
23 class CategoryModule(AdminModule):
24 name = _("Categories")
25 category = _("Products")
26 breadcrumbs_menu_entry = MenuEntry(text=name, url="shuup_admin:category.list", category=PRODUCTS_MENU_CATEGORY)
27
28 def get_urls(self):
29 return [
30 admin_url(
31 "^categories/(?P<pk>\d+)/copy-visibility/$",
32 "shuup.admin.modules.categories.views.CategoryCopyVisibilityView",
33 name="category.copy_visibility",
34 permissions=get_default_model_permissions(Category)
35 ),
36 admin_url(
37 "^categories/(?P<pk>\d+)/delete/$",
38 "shuup.admin.modules.categories.views.CategoryDeleteView",
39 name="category.delete",
40 permissions=get_default_model_permissions(Category)
41 ),
42 admin_url(
43 "^categories/organize/$",
44 "shuup.admin.modules.categories.views.CategoryOrganizeView",
45 name="category.organize",
46 permissions=get_default_model_permissions(Category)
47 )
48 ] + get_edit_and_list_urls(
49 url_prefix="^categories",
50 view_template="shuup.admin.modules.categories.views.Category%sView",
51 name_template="category.%s",
52 permissions=get_default_model_permissions(Category),
53 )
54
55 def get_menu_entries(self, request):
56 return [
57 MenuEntry(
58 text=_("Categories"), icon="fa fa-sitemap",
59 url="shuup_admin:category.list", category=PRODUCTS_MENU_CATEGORY, ordering=2
60 )
61 ]
62
63 def get_search_results(self, request, query):
64 minimum_query_length = 3
65 if len(query) >= minimum_query_length:
66 categories = Category.objects.filter(
67 Q(translations__name__icontains=query) |
68 Q(identifier__icontains=query)
69 ).distinct().order_by("tree_id", "lft")
70 for i, category in enumerate(categories[:10]):
71 relevance = 100 - i
72 yield SearchResult(
73 text=six.text_type(category),
74 url=get_model_url(category),
75 category=self.category,
76 relevance=relevance
77 )
78
79 def get_help_blocks(self, request, kind):
80 yield SimpleHelpBlock(
81 text=_("Add a product category to organize your products"),
82 actions=[{
83 "text": _("New category"),
84 "url": get_model_url(Category, "new")
85 }],
86 icon_url="shuup_admin/img/category.png",
87 category=HelpBlockCategory.PRODUCTS,
88 priority=1,
89 done=Category.objects.filter(shops=request.shop).exists() if kind == "setup" else False
90 )
91
92 def get_required_permissions(self):
93 return get_default_model_permissions(Category) | get_default_model_permissions(File)
94
95 def get_model_url(self, object, kind, shop=None):
96 return derive_model_url(Category, "shuup_admin:category", object, kind)
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shuup/admin/modules/categories/__init__.py b/shuup/admin/modules/categories/__init__.py
--- a/shuup/admin/modules/categories/__init__.py
+++ b/shuup/admin/modules/categories/__init__.py
@@ -12,6 +12,7 @@
from shuup.admin.base import AdminModule, MenuEntry, SearchResult
from shuup.admin.menu import PRODUCTS_MENU_CATEGORY
+from shuup.admin.shop_provider import get_shop
from shuup.admin.utils.permissions import get_default_model_permissions
from shuup.admin.utils.urls import (
admin_url, derive_model_url, get_edit_and_list_urls, get_model_url
@@ -22,7 +23,7 @@
class CategoryModule(AdminModule):
name = _("Categories")
- category = _("Products")
+ category = _("Categories")
breadcrumbs_menu_entry = MenuEntry(text=name, url="shuup_admin:category.list", category=PRODUCTS_MENU_CATEGORY)
def get_urls(self):
@@ -56,8 +57,10 @@
def get_search_results(self, request, query):
minimum_query_length = 3
+
if len(query) >= minimum_query_length:
- categories = Category.objects.filter(
+ shop = get_shop(request)
+ categories = Category.objects.all_except_deleted(shop=shop).filter(
Q(translations__name__icontains=query) |
Q(identifier__icontains=query)
).distinct().order_by("tree_id", "lft")
| {"golden_diff": "diff --git a/shuup/admin/modules/categories/__init__.py b/shuup/admin/modules/categories/__init__.py\n--- a/shuup/admin/modules/categories/__init__.py\n+++ b/shuup/admin/modules/categories/__init__.py\n@@ -12,6 +12,7 @@\n \n from shuup.admin.base import AdminModule, MenuEntry, SearchResult\n from shuup.admin.menu import PRODUCTS_MENU_CATEGORY\n+from shuup.admin.shop_provider import get_shop\n from shuup.admin.utils.permissions import get_default_model_permissions\n from shuup.admin.utils.urls import (\n admin_url, derive_model_url, get_edit_and_list_urls, get_model_url\n@@ -22,7 +23,7 @@\n \n class CategoryModule(AdminModule):\n name = _(\"Categories\")\n- category = _(\"Products\")\n+ category = _(\"Categories\")\n breadcrumbs_menu_entry = MenuEntry(text=name, url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY)\n \n def get_urls(self):\n@@ -56,8 +57,10 @@\n \n def get_search_results(self, request, query):\n minimum_query_length = 3\n+\n if len(query) >= minimum_query_length:\n- categories = Category.objects.filter(\n+ shop = get_shop(request)\n+ categories = Category.objects.all_except_deleted(shop=shop).filter(\n Q(translations__name__icontains=query) |\n Q(identifier__icontains=query)\n ).distinct().order_by(\"tree_id\", \"lft\")\n", "issue": "Admin: Search doesn't work well with categories\nAdmin > Search > Search for a category\r\nWhy does it say \"Products\" when it's clearly showing categories?\r\nDeleted categories are also visible in search. \r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nimport six\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\nfrom filer.models import File\n\nfrom shuup.admin.base import AdminModule, MenuEntry, SearchResult\nfrom shuup.admin.menu import PRODUCTS_MENU_CATEGORY\nfrom shuup.admin.utils.permissions import get_default_model_permissions\nfrom shuup.admin.utils.urls import (\n admin_url, derive_model_url, get_edit_and_list_urls, get_model_url\n)\nfrom shuup.admin.views.home import HelpBlockCategory, SimpleHelpBlock\nfrom shuup.core.models import Category\n\n\nclass CategoryModule(AdminModule):\n name = _(\"Categories\")\n category = _(\"Products\")\n breadcrumbs_menu_entry = MenuEntry(text=name, url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY)\n\n def get_urls(self):\n return [\n admin_url(\n \"^categories/(?P<pk>\\d+)/copy-visibility/$\",\n \"shuup.admin.modules.categories.views.CategoryCopyVisibilityView\",\n name=\"category.copy_visibility\",\n permissions=get_default_model_permissions(Category)\n ),\n admin_url(\n \"^categories/(?P<pk>\\d+)/delete/$\",\n \"shuup.admin.modules.categories.views.CategoryDeleteView\",\n name=\"category.delete\",\n permissions=get_default_model_permissions(Category)\n ),\n admin_url(\n \"^categories/organize/$\",\n \"shuup.admin.modules.categories.views.CategoryOrganizeView\",\n name=\"category.organize\",\n permissions=get_default_model_permissions(Category)\n )\n ] + get_edit_and_list_urls(\n url_prefix=\"^categories\",\n view_template=\"shuup.admin.modules.categories.views.Category%sView\",\n name_template=\"category.%s\",\n permissions=get_default_model_permissions(Category),\n )\n\n def get_menu_entries(self, request):\n return [\n MenuEntry(\n text=_(\"Categories\"), icon=\"fa fa-sitemap\",\n url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY, ordering=2\n )\n ]\n\n def get_search_results(self, request, query):\n minimum_query_length = 3\n if len(query) >= minimum_query_length:\n categories = Category.objects.filter(\n Q(translations__name__icontains=query) |\n Q(identifier__icontains=query)\n ).distinct().order_by(\"tree_id\", \"lft\")\n for i, category in enumerate(categories[:10]):\n relevance = 100 - i\n yield SearchResult(\n text=six.text_type(category),\n url=get_model_url(category),\n category=self.category,\n relevance=relevance\n )\n\n def get_help_blocks(self, request, kind):\n yield SimpleHelpBlock(\n text=_(\"Add a product category to organize your products\"),\n actions=[{\n \"text\": _(\"New category\"),\n \"url\": get_model_url(Category, \"new\")\n }],\n icon_url=\"shuup_admin/img/category.png\",\n category=HelpBlockCategory.PRODUCTS,\n priority=1,\n done=Category.objects.filter(shops=request.shop).exists() if kind == \"setup\" else False\n )\n\n def get_required_permissions(self):\n return get_default_model_permissions(Category) | get_default_model_permissions(File)\n\n def get_model_url(self, object, kind, shop=None):\n return derive_model_url(Category, \"shuup_admin:category\", object, kind)\n", "path": "shuup/admin/modules/categories/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nimport six\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\nfrom filer.models import File\n\nfrom shuup.admin.base import AdminModule, MenuEntry, SearchResult\nfrom shuup.admin.menu import PRODUCTS_MENU_CATEGORY\nfrom shuup.admin.shop_provider import get_shop\nfrom shuup.admin.utils.permissions import get_default_model_permissions\nfrom shuup.admin.utils.urls import (\n admin_url, derive_model_url, get_edit_and_list_urls, get_model_url\n)\nfrom shuup.admin.views.home import HelpBlockCategory, SimpleHelpBlock\nfrom shuup.core.models import Category\n\n\nclass CategoryModule(AdminModule):\n name = _(\"Categories\")\n category = _(\"Categories\")\n breadcrumbs_menu_entry = MenuEntry(text=name, url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY)\n\n def get_urls(self):\n return [\n admin_url(\n \"^categories/(?P<pk>\\d+)/copy-visibility/$\",\n \"shuup.admin.modules.categories.views.CategoryCopyVisibilityView\",\n name=\"category.copy_visibility\",\n permissions=get_default_model_permissions(Category)\n ),\n admin_url(\n \"^categories/(?P<pk>\\d+)/delete/$\",\n \"shuup.admin.modules.categories.views.CategoryDeleteView\",\n name=\"category.delete\",\n permissions=get_default_model_permissions(Category)\n ),\n ] + get_edit_and_list_urls(\n url_prefix=\"^categories\",\n view_template=\"shuup.admin.modules.categories.views.Category%sView\",\n name_template=\"category.%s\",\n permissions=get_default_model_permissions(Category),\n )\n\n def get_menu_entries(self, request):\n return [\n MenuEntry(\n text=_(\"Categories\"), icon=\"fa fa-sitemap\",\n url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY, ordering=2\n )\n ]\n\n def get_search_results(self, request, query):\n minimum_query_length = 3\n\n if len(query) >= minimum_query_length:\n shop = get_shop(request)\n categories = Category.objects.all_except_deleted(shop=shop).filter(\n Q(translations__name__icontains=query) |\n Q(identifier__icontains=query)\n ).distinct().order_by(\"tree_id\", \"lft\")\n for i, category in enumerate(categories[:10]):\n relevance = 100 - i\n yield SearchResult(\n text=six.text_type(category),\n url=get_model_url(category),\n category=self.category,\n relevance=relevance\n )\n\n def get_help_blocks(self, request, kind):\n yield SimpleHelpBlock(\n text=_(\"Add a product category to organize your products\"),\n actions=[{\n \"text\": _(\"New category\"),\n \"url\": get_model_url(Category, \"new\")\n }],\n icon_url=\"shuup_admin/img/category.png\",\n category=HelpBlockCategory.PRODUCTS,\n priority=1,\n done=Category.objects.filter(shops=request.shop).exists() if kind == \"setup\" else False\n )\n\n def get_required_permissions(self):\n return get_default_model_permissions(Category) | get_default_model_permissions(File)\n\n def get_model_url(self, object, kind, shop=None):\n return derive_model_url(Category, \"shuup_admin:category\", object, kind)\n", "path": "shuup/admin/modules/categories/__init__.py"}]} | 1,361 | 325 |
gh_patches_debug_34114 | rasdani/github-patches | git_diff | learningequality__kolibri-1396 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Coach Reports - change when progress starts being measured in coach reports
## Summary
Implementations team members were confused as to why some exercises were showing up as having progress in Recent but there ended up being no data for it when they clicked into it.
**Suggestion:**
On coach reports, log that an exercise has been engaged with only when the learner clicked "check answer".
That way, we avoid having exercises display on the coach's end because of learners who happen to click into an exercise and leave the page without interacting with the exercise.
A potential problem area in the future is overloading the Recent tab with content that has 0 data because of learners who like to click around & explore.
## System information
- Version: Kolibri 0.4.0alpha6 http://192.237.248.135:8088/
- Operating system: Mac
- Browser: Chrome
## Screenshots

Exercises should log as "in progress" as soon as there's an interaction with it
## Summary
Marking incorrect answers and getting hints does not log it as "in progress"
The first correct answer on an exercise logs it as "in progress"
## System information
- Version: Kolibri 0.4.0alpha1.dev915
- Operating system: MacOS Sierra
- Browser: Chrome

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/plugins/coach/api.py`
Content:
```
1 import datetime
2 from dateutil.parser import parse
3
4 from django.utils import timezone
5
6 from kolibri.auth.constants import role_kinds
7 from kolibri.auth.models import Collection, FacilityUser
8 from kolibri.content.models import ContentNode
9 from kolibri.logger.models import ContentSummaryLog
10 from rest_framework import pagination, permissions, viewsets
11
12 from .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer
13 from .utils.return_users import get_members_or_user
14
15
16 class OptionalPageNumberPagination(pagination.PageNumberPagination):
17 """
18 Pagination class that allows for page number-style pagination, when requested.
19 To activate, the `page_size` argument must be set. For example, to request the first 20 records:
20 `?page_size=20&page=1`
21 """
22 page_size = None
23 page_size_query_param = "page_size"
24
25
26 class KolibriReportPermissions(permissions.BasePermission):
27
28 # check if requesting user has permission for collection or user
29 def has_permission(self, request, view):
30 collection_kind = view.kwargs.get('collection_kind', 'user')
31 collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))
32
33 allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]
34 if 'user' == collection_kind:
35 return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))
36 else:
37 return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))
38
39
40 class UserReportViewSet(viewsets.ModelViewSet):
41
42 permission_classes = (KolibriReportPermissions,)
43 pagination_class = OptionalPageNumberPagination
44 serializer_class = UserReportSerializer
45
46 def get_queryset(self):
47 assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'
48 return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])
49
50
51 class ContentReportViewSet(viewsets.ModelViewSet):
52
53 permission_classes = (KolibriReportPermissions,)
54 pagination_class = OptionalPageNumberPagination
55 serializer_class = ContentReportSerializer
56
57 def get_queryset(self):
58 content_node_id = self.kwargs['content_node_id']
59 return ContentNode.objects.filter(parent=content_node_id)
60
61
62 class ContentSummaryViewSet(viewsets.ModelViewSet):
63
64 permission_classes = (KolibriReportPermissions,)
65 serializer_class = ContentSummarySerializer
66
67 def get_queryset(self):
68 return ContentNode.objects.all()
69
70
71 class UserSummaryViewSet(viewsets.ModelViewSet):
72
73 permission_classes = (KolibriReportPermissions,)
74 serializer_class = UserReportSerializer
75
76 def get_queryset(self):
77 return FacilityUser.objects.all()
78
79
80 class RecentReportViewSet(viewsets.ModelViewSet):
81
82 permission_classes = (KolibriReportPermissions,)
83 pagination_class = OptionalPageNumberPagination
84 serializer_class = ContentReportSerializer
85
86 def get_queryset(self):
87 query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])
88 if self.request.query_params.get('last_active_time'):
89 # Last active time specified
90 datetime_cutoff = parse(self.request.query_params.get('last_active_time'))
91 else:
92 datetime_cutoff = timezone.now() - datetime.timedelta(7)
93 # Set on the kwargs to pass into the serializer
94 self.kwargs['last_active_time'] = datetime_cutoff.isoformat()
95 recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(
96 user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']),
97 end_timestamp__gte=datetime_cutoff).values_list('content_id')
98 return ContentNode.objects.filter(content_id__in=recent_content_items)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py
--- a/kolibri/plugins/coach/api.py
+++ b/kolibri/plugins/coach/api.py
@@ -1,12 +1,13 @@
import datetime
from dateutil.parser import parse
+from django.db.models import Q
from django.utils import timezone
from kolibri.auth.constants import role_kinds
from kolibri.auth.models import Collection, FacilityUser
from kolibri.content.models import ContentNode
-from kolibri.logger.models import ContentSummaryLog
+from kolibri.logger.models import ContentSummaryLog, MasteryLog
from rest_framework import pagination, permissions, viewsets
from .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer
@@ -84,6 +85,7 @@
serializer_class = ContentReportSerializer
def get_queryset(self):
+ attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)
query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])
if self.request.query_params.get('last_active_time'):
# Last active time specified
@@ -93,6 +95,7 @@
# Set on the kwargs to pass into the serializer
self.kwargs['last_active_time'] = datetime_cutoff.isoformat()
recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(
+ Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),
user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']),
end_timestamp__gte=datetime_cutoff).values_list('content_id')
return ContentNode.objects.filter(content_id__in=recent_content_items)
| {"golden_diff": "diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py\n--- a/kolibri/plugins/coach/api.py\n+++ b/kolibri/plugins/coach/api.py\n@@ -1,12 +1,13 @@\n import datetime\n from dateutil.parser import parse\n \n+from django.db.models import Q\n from django.utils import timezone\n \n from kolibri.auth.constants import role_kinds\n from kolibri.auth.models import Collection, FacilityUser\n from kolibri.content.models import ContentNode\n-from kolibri.logger.models import ContentSummaryLog\n+from kolibri.logger.models import ContentSummaryLog, MasteryLog\n from rest_framework import pagination, permissions, viewsets\n \n from .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer\n@@ -84,6 +85,7 @@\n serializer_class = ContentReportSerializer\n \n def get_queryset(self):\n+ attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n@@ -93,6 +95,7 @@\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n+ Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\n user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']),\n end_timestamp__gte=datetime_cutoff).values_list('content_id')\n return ContentNode.objects.filter(content_id__in=recent_content_items)\n", "issue": "Coach Reports - change when progress starts being measured in coach reports\n## Summary\r\n\r\nImplementations team members were confused as to why some exercises were showing up as having progress in Recent but there ended up being no data for it when they clicked into it.\r\n\r\n**Suggestion:** \r\nOn coach reports, log that an exercise has been engaged with only when the learner clicked \"check answer\". \r\n\r\nThat way, we avoid having exercises display on the coach's end because of learners who happen to click into an exercise and leave the page without interacting with the exercise.\r\n\r\nA potential problem area in the future is overloading the Recent tab with content that has 0 data because of learners who like to click around & explore. \r\n\r\n## System information\r\n\r\n - Version: Kolibri 0.4.0alpha6 http://192.237.248.135:8088/\r\n - Operating system: Mac\r\n - Browser: Chrome\r\n\r\n## Screenshots\r\n\r\n\r\n\nExercises should log as \"in progress\" as soon as there's an interaction with it\n## Summary\r\n\r\nMarking incorrect answers and getting hints does not log it as \"in progress\"\r\nThe first correct answer on an exercise logs it as \"in progress\"\r\n\r\n## System information\r\n\r\n - Version: Kolibri 0.4.0alpha1.dev915\r\n - Operating system: MacOS Sierra\r\n - Browser: Chrome\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\nfrom dateutil.parser import parse\n\nfrom django.utils import timezone\n\nfrom kolibri.auth.constants import role_kinds\nfrom kolibri.auth.models import Collection, FacilityUser\nfrom kolibri.content.models import ContentNode\nfrom kolibri.logger.models import ContentSummaryLog\nfrom rest_framework import pagination, permissions, viewsets\n\nfrom .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer\nfrom .utils.return_users import get_members_or_user\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass KolibriReportPermissions(permissions.BasePermission):\n\n # check if requesting user has permission for collection or user\n def has_permission(self, request, view):\n collection_kind = view.kwargs.get('collection_kind', 'user')\n collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\n\n allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\n if 'user' == collection_kind:\n return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\n else:\n return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\n\n\nclass UserReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\n return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\n\n\nclass ContentReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n content_node_id = self.kwargs['content_node_id']\n return ContentNode.objects.filter(parent=content_node_id)\n\n\nclass ContentSummaryViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = ContentSummarySerializer\n\n def get_queryset(self):\n return ContentNode.objects.all()\n\n\nclass UserSummaryViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n return FacilityUser.objects.all()\n\n\nclass RecentReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\n else:\n datetime_cutoff = timezone.now() - datetime.timedelta(7)\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']),\n end_timestamp__gte=datetime_cutoff).values_list('content_id')\n return ContentNode.objects.filter(content_id__in=recent_content_items)\n", "path": "kolibri/plugins/coach/api.py"}], "after_files": [{"content": "import datetime\nfrom dateutil.parser import parse\n\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom kolibri.auth.constants import role_kinds\nfrom kolibri.auth.models import Collection, FacilityUser\nfrom kolibri.content.models import ContentNode\nfrom kolibri.logger.models import ContentSummaryLog, MasteryLog\nfrom rest_framework import pagination, permissions, viewsets\n\nfrom .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer\nfrom .utils.return_users import get_members_or_user\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass KolibriReportPermissions(permissions.BasePermission):\n\n # check if requesting user has permission for collection or user\n def has_permission(self, request, view):\n collection_kind = view.kwargs.get('collection_kind', 'user')\n collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\n\n allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\n if 'user' == collection_kind:\n return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\n else:\n return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\n\n\nclass UserReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\n return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\n\n\nclass ContentReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n content_node_id = self.kwargs['content_node_id']\n return ContentNode.objects.filter(parent=content_node_id)\n\n\nclass ContentSummaryViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = ContentSummarySerializer\n\n def get_queryset(self):\n return ContentNode.objects.all()\n\n\nclass UserSummaryViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n return FacilityUser.objects.all()\n\n\nclass RecentReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\n else:\n datetime_cutoff = timezone.now() - datetime.timedelta(7)\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\n user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']),\n end_timestamp__gte=datetime_cutoff).values_list('content_id')\n return ContentNode.objects.filter(content_id__in=recent_content_items)\n", "path": "kolibri/plugins/coach/api.py"}]} | 1,677 | 369 |
gh_patches_debug_5072 | rasdani/github-patches | git_diff | pypa__cibuildwheel-1217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should arm64 runners build both universal2 and arm64 wheels by default?
Continuing the discussion from https://github.com/pypa/cibuildwheel/pull/1191#discussion_r928749039
The current situation is that arm64 runners builds universal2 and arm64 by default. This discussion is whether we change that to just arm64.
> @joerick: The question of whether to default building both universal2 and arm64 on the arm runner is still open, in my mind. I'm still mostly of the opinion that building both is good, because universal2 packages are preferred in some contexts (e.g. distributable GUI apps), and that most packages are small and the extra build time isn't much of a hit. Of course, people building big libraries would be wise to change this to just build arm64.
> @henryiii: Personally, I think the default should be native only. Eventually no one will care about Intel / universal and will only produce native binaries again. Also, there are still workarounds related to cross compiling for universal (like requiring all dependencies to also be universal). And we don’t support it yet, but there are two ways to make a universal binary, the default way and merging two native binaries. And finally universal vs. native is a choice, and I’d you pick universal, you don’t need native. You can always add tags for the native arch’s to a universal binary. And pip won’t even ever download universal if both native wheels exist.
>
> So I’d recommend native being default, and only producing native binaries with it.
Let's get a decision on this before we release CirrusCI support, which would be our first officially supported arm64 CI.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cibuildwheel/architecture.py`
Content:
```
1 from __future__ import annotations
2
3 import functools
4 import platform as platform_module
5 import re
6 from enum import Enum
7
8 from .typing import Final, Literal, PlatformName, assert_never
9
10 PRETTY_NAMES: Final = {"linux": "Linux", "macos": "macOS", "windows": "Windows"}
11
12
13 @functools.total_ordering
14 class Architecture(Enum):
15 value: str
16
17 # mac/linux archs
18 x86_64 = "x86_64"
19
20 # linux archs
21 i686 = "i686"
22 aarch64 = "aarch64"
23 ppc64le = "ppc64le"
24 s390x = "s390x"
25
26 # mac archs
27 universal2 = "universal2"
28 arm64 = "arm64"
29
30 # windows archs
31 x86 = "x86"
32 AMD64 = "AMD64"
33 ARM64 = "ARM64"
34
35 # Allow this to be sorted
36 def __lt__(self, other: Architecture) -> bool:
37 return self.value < other.value
38
39 @staticmethod
40 def parse_config(config: str, platform: PlatformName) -> set[Architecture]:
41 result = set()
42 for arch_str in re.split(r"[\s,]+", config):
43 if arch_str == "auto":
44 result |= Architecture.auto_archs(platform=platform)
45 elif arch_str == "native":
46 result.add(Architecture(platform_module.machine()))
47 elif arch_str == "all":
48 result |= Architecture.all_archs(platform=platform)
49 elif arch_str == "auto64":
50 result |= Architecture.bitness_archs(platform=platform, bitness="64")
51 elif arch_str == "auto32":
52 result |= Architecture.bitness_archs(platform=platform, bitness="32")
53 else:
54 result.add(Architecture(arch_str))
55 return result
56
57 @staticmethod
58 def auto_archs(platform: PlatformName) -> set[Architecture]:
59 native_architecture = Architecture(platform_module.machine())
60 result = {native_architecture}
61
62 if platform == "linux" and native_architecture == Architecture.x86_64:
63 # x86_64 machines can run i686 containers
64 result.add(Architecture.i686)
65
66 if platform == "windows" and native_architecture == Architecture.AMD64:
67 result.add(Architecture.x86)
68
69 if platform == "macos" and native_architecture == Architecture.arm64:
70 # arm64 can build and test both archs of a universal2 wheel.
71 result.add(Architecture.universal2)
72
73 return result
74
75 @staticmethod
76 def all_archs(platform: PlatformName) -> set[Architecture]:
77 all_archs_map = {
78 "linux": {
79 Architecture.x86_64,
80 Architecture.i686,
81 Architecture.aarch64,
82 Architecture.ppc64le,
83 Architecture.s390x,
84 },
85 "macos": {Architecture.x86_64, Architecture.arm64, Architecture.universal2},
86 "windows": {Architecture.x86, Architecture.AMD64, Architecture.ARM64},
87 }
88 return all_archs_map[platform]
89
90 @staticmethod
91 def bitness_archs(platform: PlatformName, bitness: Literal["64", "32"]) -> set[Architecture]:
92 archs_32 = {Architecture.i686, Architecture.x86}
93 auto_archs = Architecture.auto_archs(platform)
94
95 if bitness == "64":
96 return auto_archs - archs_32
97 elif bitness == "32":
98 return auto_archs & archs_32
99 else:
100 assert_never(bitness)
101
102
103 def allowed_architectures_check(
104 platform: PlatformName,
105 architectures: set[Architecture],
106 ) -> None:
107
108 allowed_architectures = Architecture.all_archs(platform)
109
110 msg = f"{PRETTY_NAMES[platform]} only supports {sorted(allowed_architectures)} at the moment."
111
112 if platform != "linux":
113 msg += " If you want to set emulation architectures on Linux, use CIBW_ARCHS_LINUX instead."
114
115 if not architectures <= allowed_architectures:
116 msg = f"Invalid archs option {architectures}. " + msg
117 raise ValueError(msg)
118
119 if not architectures:
120 msg = "Empty archs option set. " + msg
121 raise ValueError(msg)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cibuildwheel/architecture.py b/cibuildwheel/architecture.py
--- a/cibuildwheel/architecture.py
+++ b/cibuildwheel/architecture.py
@@ -66,10 +66,6 @@
if platform == "windows" and native_architecture == Architecture.AMD64:
result.add(Architecture.x86)
- if platform == "macos" and native_architecture == Architecture.arm64:
- # arm64 can build and test both archs of a universal2 wheel.
- result.add(Architecture.universal2)
-
return result
@staticmethod
| {"golden_diff": "diff --git a/cibuildwheel/architecture.py b/cibuildwheel/architecture.py\n--- a/cibuildwheel/architecture.py\n+++ b/cibuildwheel/architecture.py\n@@ -66,10 +66,6 @@\n if platform == \"windows\" and native_architecture == Architecture.AMD64:\n result.add(Architecture.x86)\n \n- if platform == \"macos\" and native_architecture == Architecture.arm64:\n- # arm64 can build and test both archs of a universal2 wheel.\n- result.add(Architecture.universal2)\n-\n return result\n \n @staticmethod\n", "issue": "Should arm64 runners build both universal2 and arm64 wheels by default?\nContinuing the discussion from https://github.com/pypa/cibuildwheel/pull/1191#discussion_r928749039\r\n\r\nThe current situation is that arm64 runners builds universal2 and arm64 by default. This discussion is whether we change that to just arm64.\r\n\r\n> @joerick: The question of whether to default building both universal2 and arm64 on the arm runner is still open, in my mind. I'm still mostly of the opinion that building both is good, because universal2 packages are preferred in some contexts (e.g. distributable GUI apps), and that most packages are small and the extra build time isn't much of a hit. Of course, people building big libraries would be wise to change this to just build arm64.\r\n\r\n> @henryiii: Personally, I think the default should be native only. Eventually no one will care about Intel / universal and will only produce native binaries again. Also, there are still workarounds related to cross compiling for universal (like requiring all dependencies to also be universal). And we don\u2019t support it yet, but there are two ways to make a universal binary, the default way and merging two native binaries. And finally universal vs. native is a choice, and I\u2019d you pick universal, you don\u2019t need native. You can always add tags for the native arch\u2019s to a universal binary. And pip won\u2019t even ever download universal if both native wheels exist.\r\n> \r\n> So I\u2019d recommend native being default, and only producing native binaries with it.\r\n\r\nLet's get a decision on this before we release CirrusCI support, which would be our first officially supported arm64 CI.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport platform as platform_module\nimport re\nfrom enum import Enum\n\nfrom .typing import Final, Literal, PlatformName, assert_never\n\nPRETTY_NAMES: Final = {\"linux\": \"Linux\", \"macos\": \"macOS\", \"windows\": \"Windows\"}\n\n\[email protected]_ordering\nclass Architecture(Enum):\n value: str\n\n # mac/linux archs\n x86_64 = \"x86_64\"\n\n # linux archs\n i686 = \"i686\"\n aarch64 = \"aarch64\"\n ppc64le = \"ppc64le\"\n s390x = \"s390x\"\n\n # mac archs\n universal2 = \"universal2\"\n arm64 = \"arm64\"\n\n # windows archs\n x86 = \"x86\"\n AMD64 = \"AMD64\"\n ARM64 = \"ARM64\"\n\n # Allow this to be sorted\n def __lt__(self, other: Architecture) -> bool:\n return self.value < other.value\n\n @staticmethod\n def parse_config(config: str, platform: PlatformName) -> set[Architecture]:\n result = set()\n for arch_str in re.split(r\"[\\s,]+\", config):\n if arch_str == \"auto\":\n result |= Architecture.auto_archs(platform=platform)\n elif arch_str == \"native\":\n result.add(Architecture(platform_module.machine()))\n elif arch_str == \"all\":\n result |= Architecture.all_archs(platform=platform)\n elif arch_str == \"auto64\":\n result |= Architecture.bitness_archs(platform=platform, bitness=\"64\")\n elif arch_str == \"auto32\":\n result |= Architecture.bitness_archs(platform=platform, bitness=\"32\")\n else:\n result.add(Architecture(arch_str))\n return result\n\n @staticmethod\n def auto_archs(platform: PlatformName) -> set[Architecture]:\n native_architecture = Architecture(platform_module.machine())\n result = {native_architecture}\n\n if platform == \"linux\" and native_architecture == Architecture.x86_64:\n # x86_64 machines can run i686 containers\n result.add(Architecture.i686)\n\n if platform == \"windows\" and native_architecture == Architecture.AMD64:\n result.add(Architecture.x86)\n\n if platform == \"macos\" and native_architecture == Architecture.arm64:\n # arm64 can build and test both archs of a universal2 wheel.\n result.add(Architecture.universal2)\n\n return result\n\n @staticmethod\n def all_archs(platform: PlatformName) -> set[Architecture]:\n all_archs_map = {\n \"linux\": {\n Architecture.x86_64,\n Architecture.i686,\n Architecture.aarch64,\n Architecture.ppc64le,\n Architecture.s390x,\n },\n \"macos\": {Architecture.x86_64, Architecture.arm64, Architecture.universal2},\n \"windows\": {Architecture.x86, Architecture.AMD64, Architecture.ARM64},\n }\n return all_archs_map[platform]\n\n @staticmethod\n def bitness_archs(platform: PlatformName, bitness: Literal[\"64\", \"32\"]) -> set[Architecture]:\n archs_32 = {Architecture.i686, Architecture.x86}\n auto_archs = Architecture.auto_archs(platform)\n\n if bitness == \"64\":\n return auto_archs - archs_32\n elif bitness == \"32\":\n return auto_archs & archs_32\n else:\n assert_never(bitness)\n\n\ndef allowed_architectures_check(\n platform: PlatformName,\n architectures: set[Architecture],\n) -> None:\n\n allowed_architectures = Architecture.all_archs(platform)\n\n msg = f\"{PRETTY_NAMES[platform]} only supports {sorted(allowed_architectures)} at the moment.\"\n\n if platform != \"linux\":\n msg += \" If you want to set emulation architectures on Linux, use CIBW_ARCHS_LINUX instead.\"\n\n if not architectures <= allowed_architectures:\n msg = f\"Invalid archs option {architectures}. \" + msg\n raise ValueError(msg)\n\n if not architectures:\n msg = \"Empty archs option set. \" + msg\n raise ValueError(msg)\n", "path": "cibuildwheel/architecture.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport platform as platform_module\nimport re\nfrom enum import Enum\n\nfrom .typing import Final, Literal, PlatformName, assert_never\n\nPRETTY_NAMES: Final = {\"linux\": \"Linux\", \"macos\": \"macOS\", \"windows\": \"Windows\"}\n\n\[email protected]_ordering\nclass Architecture(Enum):\n value: str\n\n # mac/linux archs\n x86_64 = \"x86_64\"\n\n # linux archs\n i686 = \"i686\"\n aarch64 = \"aarch64\"\n ppc64le = \"ppc64le\"\n s390x = \"s390x\"\n\n # mac archs\n universal2 = \"universal2\"\n arm64 = \"arm64\"\n\n # windows archs\n x86 = \"x86\"\n AMD64 = \"AMD64\"\n ARM64 = \"ARM64\"\n\n # Allow this to be sorted\n def __lt__(self, other: Architecture) -> bool:\n return self.value < other.value\n\n @staticmethod\n def parse_config(config: str, platform: PlatformName) -> set[Architecture]:\n result = set()\n for arch_str in re.split(r\"[\\s,]+\", config):\n if arch_str == \"auto\":\n result |= Architecture.auto_archs(platform=platform)\n elif arch_str == \"native\":\n result.add(Architecture(platform_module.machine()))\n elif arch_str == \"all\":\n result |= Architecture.all_archs(platform=platform)\n elif arch_str == \"auto64\":\n result |= Architecture.bitness_archs(platform=platform, bitness=\"64\")\n elif arch_str == \"auto32\":\n result |= Architecture.bitness_archs(platform=platform, bitness=\"32\")\n else:\n result.add(Architecture(arch_str))\n return result\n\n @staticmethod\n def auto_archs(platform: PlatformName) -> set[Architecture]:\n native_architecture = Architecture(platform_module.machine())\n result = {native_architecture}\n\n if platform == \"linux\" and native_architecture == Architecture.x86_64:\n # x86_64 machines can run i686 containers\n result.add(Architecture.i686)\n\n if platform == \"windows\" and native_architecture == Architecture.AMD64:\n result.add(Architecture.x86)\n\n return result\n\n @staticmethod\n def all_archs(platform: PlatformName) -> set[Architecture]:\n all_archs_map = {\n \"linux\": {\n Architecture.x86_64,\n Architecture.i686,\n Architecture.aarch64,\n Architecture.ppc64le,\n Architecture.s390x,\n },\n \"macos\": {Architecture.x86_64, Architecture.arm64, Architecture.universal2},\n \"windows\": {Architecture.x86, Architecture.AMD64, Architecture.ARM64},\n }\n return all_archs_map[platform]\n\n @staticmethod\n def bitness_archs(platform: PlatformName, bitness: Literal[\"64\", \"32\"]) -> set[Architecture]:\n archs_32 = {Architecture.i686, Architecture.x86}\n auto_archs = Architecture.auto_archs(platform)\n\n if bitness == \"64\":\n return auto_archs - archs_32\n elif bitness == \"32\":\n return auto_archs & archs_32\n else:\n assert_never(bitness)\n\n\ndef allowed_architectures_check(\n platform: PlatformName,\n architectures: set[Architecture],\n) -> None:\n\n allowed_architectures = Architecture.all_archs(platform)\n\n msg = f\"{PRETTY_NAMES[platform]} only supports {sorted(allowed_architectures)} at the moment.\"\n\n if platform != \"linux\":\n msg += \" If you want to set emulation architectures on Linux, use CIBW_ARCHS_LINUX instead.\"\n\n if not architectures <= allowed_architectures:\n msg = f\"Invalid archs option {architectures}. \" + msg\n raise ValueError(msg)\n\n if not architectures:\n msg = \"Empty archs option set. \" + msg\n raise ValueError(msg)\n", "path": "cibuildwheel/architecture.py"}]} | 1,895 | 140 |
gh_patches_debug_54055 | rasdani/github-patches | git_diff | docker__docker-py-1669 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue with port option in 2.4.0 version
Hi,
I update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) :
`ports:
- "127.0.0.1:9292:9090"`
I got the following error:
`
ERROR: for ContainerName expected string or buffer
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 68, in main
command()
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 118, in perform_command
handler(command, command_options)
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 926, in up
scale_override=parse_scale_args(options['--scale']),
File "/usr/local/lib/python2.7/dist-packages/compose/project.py", line 424, in up
get_deps
File "/usr/local/lib/python2.7/dist-packages/compose/parallel.py", line 69, in parallel_execute
raise error_to_reraise
TypeError: expected string or buffer
`
I have no issue when i downgrade again to the 2.3 version of the package
To reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):
```
version: '2'
services :
ContainerName:
image: bae2d441e03a
ports:
- "127.0.0.1:9292:9090"
```
I run on Ubuntu 14.04.5 LTS with the following package:
```
docker==2.4.0
docker-compose==1.14.0
docker-pycreds==0.2.1
dockerpty==0.4.1
Python 2.7.6
Client:
Version: 17.05.0-ce
API version: 1.29
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Server:
Version: 17.05.0-ce
API version: 1.29 (minimum version 1.12)
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Experimental: false
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/ports.py`
Content:
```
1 import re
2
3 PORT_SPEC = re.compile(
4 "^" # Match full string
5 "(" # External part
6 "((?P<host>[a-fA-F\d.:]+):)?" # Address
7 "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
8 ")?"
9 "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
10 "(?P<proto>/(udp|tcp))?" # Protocol
11 "$" # Match full string
12 )
13
14
15 def add_port_mapping(port_bindings, internal_port, external):
16 if internal_port in port_bindings:
17 port_bindings[internal_port].append(external)
18 else:
19 port_bindings[internal_port] = [external]
20
21
22 def add_port(port_bindings, internal_port_range, external_range):
23 if external_range is None:
24 for internal_port in internal_port_range:
25 add_port_mapping(port_bindings, internal_port, None)
26 else:
27 ports = zip(internal_port_range, external_range)
28 for internal_port, external_port in ports:
29 add_port_mapping(port_bindings, internal_port, external_port)
30
31
32 def build_port_bindings(ports):
33 port_bindings = {}
34 for port in ports:
35 internal_port_range, external_range = split_port(port)
36 add_port(port_bindings, internal_port_range, external_range)
37 return port_bindings
38
39
40 def _raise_invalid_port(port):
41 raise ValueError('Invalid port "%s", should be '
42 '[[remote_ip:]remote_port[-remote_port]:]'
43 'port[/protocol]' % port)
44
45
46 def port_range(start, end, proto, randomly_available_port=False):
47 if not start:
48 return start
49 if not end:
50 return [start + proto]
51 if randomly_available_port:
52 return ['{}-{}'.format(start, end) + proto]
53 return [str(port) + proto for port in range(int(start), int(end) + 1)]
54
55
56 def split_port(port):
57 match = PORT_SPEC.match(port)
58 if match is None:
59 _raise_invalid_port(port)
60 parts = match.groupdict()
61
62 host = parts['host']
63 proto = parts['proto'] or ''
64 internal = port_range(parts['int'], parts['int_end'], proto)
65 external = port_range(
66 parts['ext'], parts['ext_end'], '', len(internal) == 1)
67
68 if host is None:
69 if external is not None and len(internal) != len(external):
70 raise ValueError('Port ranges don\'t match in length')
71 return internal, external
72 else:
73 if not external:
74 external = [None] * len(internal)
75 elif len(internal) != len(external):
76 raise ValueError('Port ranges don\'t match in length')
77 return internal, [(host, ext_port) for ext_port in external]
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/utils/ports.py b/docker/utils/ports.py
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -54,6 +54,7 @@
def split_port(port):
+ port = str(port)
match = PORT_SPEC.match(port)
if match is None:
_raise_invalid_port(port)
| {"golden_diff": "diff --git a/docker/utils/ports.py b/docker/utils/ports.py\n--- a/docker/utils/ports.py\n+++ b/docker/utils/ports.py\n@@ -54,6 +54,7 @@\n \n \n def split_port(port):\n+ port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n", "issue": "Issue with port option in 2.4.0 version\nHi,\r\nI update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) : \r\n`ports:\r\n - \"127.0.0.1:9292:9090\"`\r\n\r\nI got the following error:\r\n\r\n`\r\nERROR: for ContainerName expected string or buffer\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/docker-compose\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 68, in main\r\n command()\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 118, in perform_command\r\n handler(command, command_options)\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 926, in up\r\n scale_override=parse_scale_args(options['--scale']),\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/project.py\", line 424, in up\r\n get_deps\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/parallel.py\", line 69, in parallel_execute\r\n raise error_to_reraise\r\nTypeError: expected string or buffer\r\n`\r\n\r\nI have no issue when i downgrade again to the 2.3 version of the package\r\n\r\nTo reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):\r\n```\r\nversion: '2'\r\n\r\nservices :\r\n ContainerName:\r\n image: bae2d441e03a\r\n ports:\r\n - \"127.0.0.1:9292:9090\"\r\n```\r\n\r\nI run on Ubuntu 14.04.5 LTS with the following package:\r\n```\r\ndocker==2.4.0\r\ndocker-compose==1.14.0\r\ndocker-pycreds==0.2.1\r\ndockerpty==0.4.1\r\nPython 2.7.6\r\nClient:\r\n Version: 17.05.0-ce\r\n API version: 1.29\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n\r\nServer:\r\n Version: 17.05.0-ce\r\n API version: 1.29 (minimum version 1.12)\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n```\n", "before_files": [{"content": "import re\n\nPORT_SPEC = re.compile(\n \"^\" # Match full string\n \"(\" # External part\n \"((?P<host>[a-fA-F\\d.:]+):)?\" # Address\n \"(?P<ext>[\\d]*)(-(?P<ext_end>[\\d]+))?:\" # External range\n \")?\"\n \"(?P<int>[\\d]+)(-(?P<int_end>[\\d]+))?\" # Internal range\n \"(?P<proto>/(udp|tcp))?\" # Protocol\n \"$\" # Match full string\n)\n\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef _raise_invalid_port(port):\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n\n\ndef port_range(start, end, proto, randomly_available_port=False):\n if not start:\n return start\n if not end:\n return [start + proto]\n if randomly_available_port:\n return ['{}-{}'.format(start, end) + proto]\n return [str(port) + proto for port in range(int(start), int(end) + 1)]\n\n\ndef split_port(port):\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n parts = match.groupdict()\n\n host = parts['host']\n proto = parts['proto'] or ''\n internal = port_range(parts['int'], parts['int_end'], proto)\n external = port_range(\n parts['ext'], parts['ext_end'], '', len(internal) == 1)\n\n if host is None:\n if external is not None and len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, external\n else:\n if not external:\n external = [None] * len(internal)\n elif len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, [(host, ext_port) for ext_port in external]\n", "path": "docker/utils/ports.py"}], "after_files": [{"content": "import re\n\nPORT_SPEC = re.compile(\n \"^\" # Match full string\n \"(\" # External part\n \"((?P<host>[a-fA-F\\d.:]+):)?\" # Address\n \"(?P<ext>[\\d]*)(-(?P<ext_end>[\\d]+))?:\" # External range\n \")?\"\n \"(?P<int>[\\d]+)(-(?P<int_end>[\\d]+))?\" # Internal range\n \"(?P<proto>/(udp|tcp))?\" # Protocol\n \"$\" # Match full string\n)\n\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef _raise_invalid_port(port):\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n\n\ndef port_range(start, end, proto, randomly_available_port=False):\n if not start:\n return start\n if not end:\n return [start + proto]\n if randomly_available_port:\n return ['{}-{}'.format(start, end) + proto]\n return [str(port) + proto for port in range(int(start), int(end) + 1)]\n\n\ndef split_port(port):\n port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n parts = match.groupdict()\n\n host = parts['host']\n proto = parts['proto'] or ''\n internal = port_range(parts['int'], parts['int_end'], proto)\n external = port_range(\n parts['ext'], parts['ext_end'], '', len(internal) == 1)\n\n if host is None:\n if external is not None and len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, external\n else:\n if not external:\n external = [None] * len(internal)\n elif len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, [(host, ext_port) for ext_port in external]\n", "path": "docker/utils/ports.py"}]} | 1,672 | 78 |
gh_patches_debug_32765 | rasdani/github-patches | git_diff | openai__evals-1180 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Registry path CLI option for oaievalset
### Describe the feature or improvement you're requesting
It should take an `--registry_path` like `oaieval` does
### Additional context
Useful when using as a library, to run eval sets stored in my own project outside this repo.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evals/cli/oaievalset.py`
Content:
```
1 """
2 This file defines the `oaievalset` CLI for running eval sets.
3 """
4 import argparse
5 import json
6 import logging
7 import subprocess
8 from pathlib import Path
9 from typing import Optional, cast
10
11 from evals.registry import Registry
12
13 Task = list[str]
14 logger = logging.getLogger(__name__)
15
16
17 class Progress:
18 def __init__(self, file: str) -> None:
19 self.file = Path(file)
20 self.completed: list[Task] = []
21
22 def load(self) -> bool:
23 if not self.file.exists():
24 return False
25
26 with self.file.open() as f:
27 for line in f:
28 self.completed.append(json.loads(line))
29 return len(self.completed) > 0
30
31 def add(self, item: Task) -> None:
32 self.completed.append(item)
33 self.save()
34
35 def save(self) -> None:
36 self.file.parent.mkdir(parents=True, exist_ok=True)
37 with self.file.open("w") as f:
38 for item in self.completed:
39 f.write(json.dumps(item) + "\n")
40 print(highlight(f"Saved progress to {self.file}"))
41
42
43 def highlight(str: str) -> str:
44 return f"\033[1;32m>>> {str}\033[0m"
45
46
47 def get_parser() -> argparse.ArgumentParser:
48 parser = argparse.ArgumentParser(description="Run eval sets through the API")
49 parser.add_argument("model", type=str, help="Name of a completion model.")
50 parser.add_argument("eval_set", type=str, help="Name of eval set. See registry.")
51 parser.add_argument(
52 "--resume",
53 action=argparse.BooleanOptionalAction,
54 default=True,
55 help="Resume from last checkpoint.",
56 )
57 parser.add_argument(
58 "--exit-on-error",
59 action=argparse.BooleanOptionalAction,
60 default=True,
61 help="Exit if any oaieval command fails.",
62 )
63 return parser
64
65
66 class OaiEvalSetArguments(argparse.Namespace):
67 model: str
68 eval_set: str
69 resume: bool
70 exit_on_error: bool
71
72
73 def run(
74 args: OaiEvalSetArguments,
75 unknown_args: list[str],
76 registry: Optional[Registry] = None,
77 run_command: str = "oaieval",
78 ) -> None:
79 registry = registry or Registry()
80 commands: list[Task] = []
81 eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None
82 if eval_set:
83 for index, eval in enumerate(registry.get_evals(eval_set.evals)):
84 if not eval or not eval.key:
85 logger.debug("The eval #%d in eval_set is not valid", index)
86
87 command = [run_command, args.model, eval.key] + unknown_args
88 if command in commands:
89 continue
90 commands.append(command)
91 else:
92 logger.warning("No eval set found for %s", args.eval_set)
93
94 num_evals = len(commands)
95
96 progress = Progress(f"/tmp/oaievalset/{args.model}.{args.eval_set}.progress.txt")
97 if args.resume and progress.load():
98 print(f"Loaded progress from {progress.file}")
99 print(f"{len(progress.completed)}/{len(commands)} evals already completed:")
100 for item in progress.completed:
101 print(" " + " ".join(item))
102
103 commands = [c for c in commands if c not in progress.completed]
104 command_strs = [" ".join(cmd) for cmd in commands]
105 print("Going to run the following commands:")
106 for command_str in command_strs:
107 print(" " + command_str)
108
109 num_already_completed = num_evals - len(commands)
110 for idx, command in enumerate(commands):
111 real_idx = idx + num_already_completed
112 print(highlight("Running command: " + " ".join(command) + f" ({real_idx+1}/{num_evals})"))
113 subprocess.run(command, stdout=subprocess.PIPE, check=args.exit_on_error)
114 progress.add(command)
115
116 print(highlight("All done!"))
117
118
119 def main() -> None:
120 parser = get_parser()
121 args, unknown_args = parser.parse_known_args()
122 run(cast(OaiEvalSetArguments, args), unknown_args)
123
124
125 if __name__ == "__main__":
126 main()
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evals/cli/oaievalset.py b/evals/cli/oaievalset.py
--- a/evals/cli/oaievalset.py
+++ b/evals/cli/oaievalset.py
@@ -48,6 +48,13 @@
parser = argparse.ArgumentParser(description="Run eval sets through the API")
parser.add_argument("model", type=str, help="Name of a completion model.")
parser.add_argument("eval_set", type=str, help="Name of eval set. See registry.")
+ parser.add_argument(
+ "--registry_path",
+ type=str,
+ default=None,
+ action="append",
+ help="Path to the registry",
+ )
parser.add_argument(
"--resume",
action=argparse.BooleanOptionalAction,
@@ -66,6 +73,7 @@
class OaiEvalSetArguments(argparse.Namespace):
model: str
eval_set: str
+ registry_path: Optional[str]
resume: bool
exit_on_error: bool
@@ -77,6 +85,9 @@
run_command: str = "oaieval",
) -> None:
registry = registry or Registry()
+ if args.registry_path:
+ registry.add_registry_paths(args.registry_path)
+
commands: list[Task] = []
eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None
if eval_set:
@@ -85,6 +96,9 @@
logger.debug("The eval #%d in eval_set is not valid", index)
command = [run_command, args.model, eval.key] + unknown_args
+ if args.registry_path:
+ command.append("--registry_path")
+ command = command + args.registry_path
if command in commands:
continue
commands.append(command)
| {"golden_diff": "diff --git a/evals/cli/oaievalset.py b/evals/cli/oaievalset.py\n--- a/evals/cli/oaievalset.py\n+++ b/evals/cli/oaievalset.py\n@@ -48,6 +48,13 @@\n parser = argparse.ArgumentParser(description=\"Run eval sets through the API\")\n parser.add_argument(\"model\", type=str, help=\"Name of a completion model.\")\n parser.add_argument(\"eval_set\", type=str, help=\"Name of eval set. See registry.\")\n+ parser.add_argument(\n+ \"--registry_path\",\n+ type=str,\n+ default=None,\n+ action=\"append\",\n+ help=\"Path to the registry\",\n+ )\n parser.add_argument(\n \"--resume\",\n action=argparse.BooleanOptionalAction,\n@@ -66,6 +73,7 @@\n class OaiEvalSetArguments(argparse.Namespace):\n model: str\n eval_set: str\n+ registry_path: Optional[str]\n resume: bool\n exit_on_error: bool\n \n@@ -77,6 +85,9 @@\n run_command: str = \"oaieval\",\n ) -> None:\n registry = registry or Registry()\n+ if args.registry_path:\n+ registry.add_registry_paths(args.registry_path)\n+\n commands: list[Task] = []\n eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None\n if eval_set:\n@@ -85,6 +96,9 @@\n logger.debug(\"The eval #%d in eval_set is not valid\", index)\n \n command = [run_command, args.model, eval.key] + unknown_args\n+ if args.registry_path:\n+ command.append(\"--registry_path\")\n+ command = command + args.registry_path\n if command in commands:\n continue\n commands.append(command)\n", "issue": "Registry path CLI option for oaievalset\n### Describe the feature or improvement you're requesting\n\nIt should take an `--registry_path` like `oaieval` does\n\n### Additional context\n\nUseful when using as a library, to run eval sets stored in my own project outside this repo.\n", "before_files": [{"content": "\"\"\"\nThis file defines the `oaievalset` CLI for running eval sets.\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport subprocess\nfrom pathlib import Path\nfrom typing import Optional, cast\n\nfrom evals.registry import Registry\n\nTask = list[str]\nlogger = logging.getLogger(__name__)\n\n\nclass Progress:\n def __init__(self, file: str) -> None:\n self.file = Path(file)\n self.completed: list[Task] = []\n\n def load(self) -> bool:\n if not self.file.exists():\n return False\n\n with self.file.open() as f:\n for line in f:\n self.completed.append(json.loads(line))\n return len(self.completed) > 0\n\n def add(self, item: Task) -> None:\n self.completed.append(item)\n self.save()\n\n def save(self) -> None:\n self.file.parent.mkdir(parents=True, exist_ok=True)\n with self.file.open(\"w\") as f:\n for item in self.completed:\n f.write(json.dumps(item) + \"\\n\")\n print(highlight(f\"Saved progress to {self.file}\"))\n\n\ndef highlight(str: str) -> str:\n return f\"\\033[1;32m>>> {str}\\033[0m\"\n\n\ndef get_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(description=\"Run eval sets through the API\")\n parser.add_argument(\"model\", type=str, help=\"Name of a completion model.\")\n parser.add_argument(\"eval_set\", type=str, help=\"Name of eval set. See registry.\")\n parser.add_argument(\n \"--resume\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Resume from last checkpoint.\",\n )\n parser.add_argument(\n \"--exit-on-error\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Exit if any oaieval command fails.\",\n )\n return parser\n\n\nclass OaiEvalSetArguments(argparse.Namespace):\n model: str\n eval_set: str\n resume: bool\n exit_on_error: bool\n\n\ndef run(\n args: OaiEvalSetArguments,\n unknown_args: list[str],\n registry: Optional[Registry] = None,\n run_command: str = \"oaieval\",\n) -> None:\n registry = registry or Registry()\n commands: list[Task] = []\n eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None\n if eval_set:\n for index, eval in enumerate(registry.get_evals(eval_set.evals)):\n if not eval or not eval.key:\n logger.debug(\"The eval #%d in eval_set is not valid\", index)\n\n command = [run_command, args.model, eval.key] + unknown_args\n if command in commands:\n continue\n commands.append(command)\n else:\n logger.warning(\"No eval set found for %s\", args.eval_set)\n\n num_evals = len(commands)\n\n progress = Progress(f\"/tmp/oaievalset/{args.model}.{args.eval_set}.progress.txt\")\n if args.resume and progress.load():\n print(f\"Loaded progress from {progress.file}\")\n print(f\"{len(progress.completed)}/{len(commands)} evals already completed:\")\n for item in progress.completed:\n print(\" \" + \" \".join(item))\n\n commands = [c for c in commands if c not in progress.completed]\n command_strs = [\" \".join(cmd) for cmd in commands]\n print(\"Going to run the following commands:\")\n for command_str in command_strs:\n print(\" \" + command_str)\n\n num_already_completed = num_evals - len(commands)\n for idx, command in enumerate(commands):\n real_idx = idx + num_already_completed\n print(highlight(\"Running command: \" + \" \".join(command) + f\" ({real_idx+1}/{num_evals})\"))\n subprocess.run(command, stdout=subprocess.PIPE, check=args.exit_on_error)\n progress.add(command)\n\n print(highlight(\"All done!\"))\n\n\ndef main() -> None:\n parser = get_parser()\n args, unknown_args = parser.parse_known_args()\n run(cast(OaiEvalSetArguments, args), unknown_args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "evals/cli/oaievalset.py"}], "after_files": [{"content": "\"\"\"\nThis file defines the `oaievalset` CLI for running eval sets.\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport subprocess\nfrom pathlib import Path\nfrom typing import Optional, cast\n\nfrom evals.registry import Registry\n\nTask = list[str]\nlogger = logging.getLogger(__name__)\n\n\nclass Progress:\n def __init__(self, file: str) -> None:\n self.file = Path(file)\n self.completed: list[Task] = []\n\n def load(self) -> bool:\n if not self.file.exists():\n return False\n\n with self.file.open() as f:\n for line in f:\n self.completed.append(json.loads(line))\n return len(self.completed) > 0\n\n def add(self, item: Task) -> None:\n self.completed.append(item)\n self.save()\n\n def save(self) -> None:\n self.file.parent.mkdir(parents=True, exist_ok=True)\n with self.file.open(\"w\") as f:\n for item in self.completed:\n f.write(json.dumps(item) + \"\\n\")\n print(highlight(f\"Saved progress to {self.file}\"))\n\n\ndef highlight(str: str) -> str:\n return f\"\\033[1;32m>>> {str}\\033[0m\"\n\n\ndef get_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(description=\"Run eval sets through the API\")\n parser.add_argument(\"model\", type=str, help=\"Name of a completion model.\")\n parser.add_argument(\"eval_set\", type=str, help=\"Name of eval set. See registry.\")\n parser.add_argument(\n \"--registry_path\",\n type=str,\n default=None,\n action=\"append\",\n help=\"Path to the registry\",\n )\n parser.add_argument(\n \"--resume\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Resume from last checkpoint.\",\n )\n parser.add_argument(\n \"--exit-on-error\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Exit if any oaieval command fails.\",\n )\n return parser\n\n\nclass OaiEvalSetArguments(argparse.Namespace):\n model: str\n eval_set: str\n registry_path: Optional[str]\n resume: bool\n exit_on_error: bool\n\n\ndef run(\n args: OaiEvalSetArguments,\n unknown_args: list[str],\n registry: Optional[Registry] = None,\n run_command: str = \"oaieval\",\n) -> None:\n registry = registry or Registry()\n if args.registry_path:\n registry.add_registry_paths(args.registry_path)\n\n commands: list[Task] = []\n eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None\n if eval_set:\n for index, eval in enumerate(registry.get_evals(eval_set.evals)):\n if not eval or not eval.key:\n logger.debug(\"The eval #%d in eval_set is not valid\", index)\n\n command = [run_command, args.model, eval.key] + unknown_args\n if args.registry_path:\n command.append(\"--registry_path\")\n command = command + args.registry_path\n if command in commands:\n continue\n commands.append(command)\n else:\n logger.warning(\"No eval set found for %s\", args.eval_set)\n\n num_evals = len(commands)\n\n progress = Progress(f\"/tmp/oaievalset/{args.model}.{args.eval_set}.progress.txt\")\n if args.resume and progress.load():\n print(f\"Loaded progress from {progress.file}\")\n print(f\"{len(progress.completed)}/{len(commands)} evals already completed:\")\n for item in progress.completed:\n print(\" \" + \" \".join(item))\n\n commands = [c for c in commands if c not in progress.completed]\n command_strs = [\" \".join(cmd) for cmd in commands]\n print(\"Going to run the following commands:\")\n for command_str in command_strs:\n print(\" \" + command_str)\n\n num_already_completed = num_evals - len(commands)\n for idx, command in enumerate(commands):\n real_idx = idx + num_already_completed\n print(highlight(\"Running command: \" + \" \".join(command) + f\" ({real_idx+1}/{num_evals})\"))\n subprocess.run(command, stdout=subprocess.PIPE, check=args.exit_on_error)\n progress.add(command)\n\n print(highlight(\"All done!\"))\n\n\ndef main() -> None:\n parser = get_parser()\n args, unknown_args = parser.parse_known_args()\n run(cast(OaiEvalSetArguments, args), unknown_args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "evals/cli/oaievalset.py"}]} | 1,510 | 395 |
gh_patches_debug_22706 | rasdani/github-patches | git_diff | arviz-devs__arviz-426 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incompatibility with emcee3 pre-release
Trying to use the `from_emcee()` function raises the following error for `emcee3`:
> AttributeError: 'EnsembleSampler' object has no attribute 'args'
This is due to line 29 in `io_emcee.py`:
num_args = len(sampler.args)
In version 2, the EnsembleSampler class stored this information, but only to pass this to `_function_wrapper` class, which also stored the same information. This wrapper was for the posterior probability, so, `sampler.args` is equivalent to `sampler.lnprobfn.args`.
In version 3, this has been slightly modified, and now only the `_FunctionWrapper` class stores this information, however, its name has also been modified, so in this case it should be retrieved with `sampler.log_prob_fn.args`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/data/io_emcee.py`
Content:
```
1 """emcee-specific conversion code."""
2 from .inference_data import InferenceData
3 from .base import dict_to_dataset
4
5
6 def _verify_names(sampler, var_names, arg_names):
7 """Make sure var_names and arg_names are assigned reasonably.
8
9 This is meant to run before loading emcee objects into InferenceData.
10 In case var_names or arg_names is None, will provide defaults. If they are
11 not None, it verifies there are the right number of them.
12
13 Throws a ValueError in case validation fails.
14
15 Parameters
16 ----------
17 sampler : emcee.EnsembleSampler
18 Fitted emcee sampler
19 var_names : list[str] or None
20 Names for the emcee parameters
21 arg_names : list[str] or None
22 Names for the args/observations provided to emcee
23
24 Returns
25 -------
26 list[str], list[str]
27 Defaults for var_names and arg_names
28 """
29 num_vars = sampler.chain.shape[-1]
30 num_args = len(sampler.args)
31
32 if var_names is None:
33 var_names = ["var_{}".format(idx) for idx in range(num_vars)]
34 if arg_names is None:
35 arg_names = ["arg_{}".format(idx) for idx in range(num_args)]
36
37 if len(var_names) != num_vars:
38 raise ValueError(
39 "The sampler has {} variables, but only {} var_names were provided!".format(
40 num_vars, len(var_names)
41 )
42 )
43
44 if len(arg_names) != num_args:
45 raise ValueError(
46 "The sampler has {} args, but only {} arg_names were provided!".format(
47 num_args, len(arg_names)
48 )
49 )
50 return var_names, arg_names
51
52
53 class EmceeConverter:
54 """Encapsulate emcee specific logic."""
55
56 def __init__(self, sampler, *_, var_names=None, arg_names=None, coords=None, dims=None):
57 var_names, arg_names = _verify_names(sampler, var_names, arg_names)
58 self.sampler = sampler
59 self.var_names = var_names
60 self.arg_names = arg_names
61 self.coords = coords
62 self.dims = dims
63 import emcee
64
65 self.emcee = emcee
66
67 def posterior_to_xarray(self):
68 """Convert the posterior to an xarray dataset."""
69 data = {}
70 for idx, var_name in enumerate(self.var_names):
71 data[var_name] = self.sampler.chain[(..., idx)]
72 return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)
73
74 def observed_data_to_xarray(self):
75 """Convert observed data to xarray."""
76 data = {}
77 for idx, var_name in enumerate(self.arg_names):
78 data[var_name] = self.sampler.args[idx]
79 return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)
80
81 def to_inference_data(self):
82 """Convert all available data to an InferenceData object."""
83 return InferenceData(
84 **{
85 "posterior": self.posterior_to_xarray(),
86 "observed_data": self.observed_data_to_xarray(),
87 }
88 )
89
90
91 def from_emcee(sampler, *, var_names=None, arg_names=None, coords=None, dims=None):
92 """Convert emcee data into an InferenceData object.
93
94 Parameters
95 ----------
96 sampler : emcee.EnsembleSampler
97 Fitted sampler from emcee.
98 var_names : list[str] (Optional)
99 A list of names for variables in the sampler
100 arg_names : list[str] (Optional)
101 A list of names for args in the sampler
102 coords : dict[str] -> list[str]
103 Map of dimensions to coordinates
104 dims : dict[str] -> list[str]
105 Map variable names to their coordinates
106 """
107 return EmceeConverter(
108 sampler, var_names=var_names, arg_names=arg_names, coords=coords, dims=dims
109 ).to_inference_data()
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/data/io_emcee.py b/arviz/data/io_emcee.py
--- a/arviz/data/io_emcee.py
+++ b/arviz/data/io_emcee.py
@@ -27,7 +27,8 @@
Defaults for var_names and arg_names
"""
num_vars = sampler.chain.shape[-1]
- num_args = len(sampler.args)
+ # Get emcee version 2 sampler args, else get emcee version 3
+ num_args = len(sampler.args) if hasattr(sampler, "args") else len(sampler.log_prob_fn.args)
if var_names is None:
var_names = ["var_{}".format(idx) for idx in range(num_vars)]
@@ -75,7 +76,12 @@
"""Convert observed data to xarray."""
data = {}
for idx, var_name in enumerate(self.arg_names):
- data[var_name] = self.sampler.args[idx]
+ # Get emcee version 2 sampler args, else get emcee version 3
+ data[var_name] = (
+ self.sampler.args[idx]
+ if hasattr(self.sampler, "args")
+ else self.sampler.log_prob_fn.args[idx]
+ )
return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)
def to_inference_data(self):
| {"golden_diff": "diff --git a/arviz/data/io_emcee.py b/arviz/data/io_emcee.py\n--- a/arviz/data/io_emcee.py\n+++ b/arviz/data/io_emcee.py\n@@ -27,7 +27,8 @@\n Defaults for var_names and arg_names\n \"\"\"\n num_vars = sampler.chain.shape[-1]\n- num_args = len(sampler.args)\n+ # Get emcee version 2 sampler args, else get emcee version 3\n+ num_args = len(sampler.args) if hasattr(sampler, \"args\") else len(sampler.log_prob_fn.args)\n \n if var_names is None:\n var_names = [\"var_{}\".format(idx) for idx in range(num_vars)]\n@@ -75,7 +76,12 @@\n \"\"\"Convert observed data to xarray.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.arg_names):\n- data[var_name] = self.sampler.args[idx]\n+ # Get emcee version 2 sampler args, else get emcee version 3\n+ data[var_name] = (\n+ self.sampler.args[idx]\n+ if hasattr(self.sampler, \"args\")\n+ else self.sampler.log_prob_fn.args[idx]\n+ )\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n \n def to_inference_data(self):\n", "issue": "Incompatibility with emcee3 pre-release\nTrying to use the `from_emcee()` function raises the following error for `emcee3`:\r\n\r\n> AttributeError: 'EnsembleSampler' object has no attribute 'args'\r\n\r\nThis is due to line 29 in `io_emcee.py`:\r\n\r\n num_args = len(sampler.args)\r\n\r\nIn version 2, the EnsembleSampler class stored this information, but only to pass this to `_function_wrapper` class, which also stored the same information. This wrapper was for the posterior probability, so, `sampler.args` is equivalent to `sampler.lnprobfn.args`.\r\n\r\nIn version 3, this has been slightly modified, and now only the `_FunctionWrapper` class stores this information, however, its name has also been modified, so in this case it should be retrieved with `sampler.log_prob_fn.args`. \n", "before_files": [{"content": "\"\"\"emcee-specific conversion code.\"\"\"\nfrom .inference_data import InferenceData\nfrom .base import dict_to_dataset\n\n\ndef _verify_names(sampler, var_names, arg_names):\n \"\"\"Make sure var_names and arg_names are assigned reasonably.\n\n This is meant to run before loading emcee objects into InferenceData.\n In case var_names or arg_names is None, will provide defaults. If they are\n not None, it verifies there are the right number of them.\n\n Throws a ValueError in case validation fails.\n\n Parameters\n ----------\n sampler : emcee.EnsembleSampler\n Fitted emcee sampler\n var_names : list[str] or None\n Names for the emcee parameters\n arg_names : list[str] or None\n Names for the args/observations provided to emcee\n\n Returns\n -------\n list[str], list[str]\n Defaults for var_names and arg_names\n \"\"\"\n num_vars = sampler.chain.shape[-1]\n num_args = len(sampler.args)\n\n if var_names is None:\n var_names = [\"var_{}\".format(idx) for idx in range(num_vars)]\n if arg_names is None:\n arg_names = [\"arg_{}\".format(idx) for idx in range(num_args)]\n\n if len(var_names) != num_vars:\n raise ValueError(\n \"The sampler has {} variables, but only {} var_names were provided!\".format(\n num_vars, len(var_names)\n )\n )\n\n if len(arg_names) != num_args:\n raise ValueError(\n \"The sampler has {} args, but only {} arg_names were provided!\".format(\n num_args, len(arg_names)\n )\n )\n return var_names, arg_names\n\n\nclass EmceeConverter:\n \"\"\"Encapsulate emcee specific logic.\"\"\"\n\n def __init__(self, sampler, *_, var_names=None, arg_names=None, coords=None, dims=None):\n var_names, arg_names = _verify_names(sampler, var_names, arg_names)\n self.sampler = sampler\n self.var_names = var_names\n self.arg_names = arg_names\n self.coords = coords\n self.dims = dims\n import emcee\n\n self.emcee = emcee\n\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.var_names):\n data[var_name] = self.sampler.chain[(..., idx)]\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.arg_names):\n data[var_name] = self.sampler.args[idx]\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\"\"\"\n return InferenceData(\n **{\n \"posterior\": self.posterior_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n )\n\n\ndef from_emcee(sampler, *, var_names=None, arg_names=None, coords=None, dims=None):\n \"\"\"Convert emcee data into an InferenceData object.\n\n Parameters\n ----------\n sampler : emcee.EnsembleSampler\n Fitted sampler from emcee.\n var_names : list[str] (Optional)\n A list of names for variables in the sampler\n arg_names : list[str] (Optional)\n A list of names for args in the sampler\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n \"\"\"\n return EmceeConverter(\n sampler, var_names=var_names, arg_names=arg_names, coords=coords, dims=dims\n ).to_inference_data()\n", "path": "arviz/data/io_emcee.py"}], "after_files": [{"content": "\"\"\"emcee-specific conversion code.\"\"\"\nfrom .inference_data import InferenceData\nfrom .base import dict_to_dataset\n\n\ndef _verify_names(sampler, var_names, arg_names):\n \"\"\"Make sure var_names and arg_names are assigned reasonably.\n\n This is meant to run before loading emcee objects into InferenceData.\n In case var_names or arg_names is None, will provide defaults. If they are\n not None, it verifies there are the right number of them.\n\n Throws a ValueError in case validation fails.\n\n Parameters\n ----------\n sampler : emcee.EnsembleSampler\n Fitted emcee sampler\n var_names : list[str] or None\n Names for the emcee parameters\n arg_names : list[str] or None\n Names for the args/observations provided to emcee\n\n Returns\n -------\n list[str], list[str]\n Defaults for var_names and arg_names\n \"\"\"\n num_vars = sampler.chain.shape[-1]\n # Get emcee version 2 sampler args, else get emcee version 3\n num_args = len(sampler.args) if hasattr(sampler, \"args\") else len(sampler.log_prob_fn.args)\n\n if var_names is None:\n var_names = [\"var_{}\".format(idx) for idx in range(num_vars)]\n if arg_names is None:\n arg_names = [\"arg_{}\".format(idx) for idx in range(num_args)]\n\n if len(var_names) != num_vars:\n raise ValueError(\n \"The sampler has {} variables, but only {} var_names were provided!\".format(\n num_vars, len(var_names)\n )\n )\n\n if len(arg_names) != num_args:\n raise ValueError(\n \"The sampler has {} args, but only {} arg_names were provided!\".format(\n num_args, len(arg_names)\n )\n )\n return var_names, arg_names\n\n\nclass EmceeConverter:\n \"\"\"Encapsulate emcee specific logic.\"\"\"\n\n def __init__(self, sampler, *_, var_names=None, arg_names=None, coords=None, dims=None):\n var_names, arg_names = _verify_names(sampler, var_names, arg_names)\n self.sampler = sampler\n self.var_names = var_names\n self.arg_names = arg_names\n self.coords = coords\n self.dims = dims\n import emcee\n\n self.emcee = emcee\n\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.var_names):\n data[var_name] = self.sampler.chain[(..., idx)]\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.arg_names):\n # Get emcee version 2 sampler args, else get emcee version 3\n data[var_name] = (\n self.sampler.args[idx]\n if hasattr(self.sampler, \"args\")\n else self.sampler.log_prob_fn.args[idx]\n )\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\"\"\"\n return InferenceData(\n **{\n \"posterior\": self.posterior_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n )\n\n\ndef from_emcee(sampler, *, var_names=None, arg_names=None, coords=None, dims=None):\n \"\"\"Convert emcee data into an InferenceData object.\n\n Parameters\n ----------\n sampler : emcee.EnsembleSampler\n Fitted sampler from emcee.\n var_names : list[str] (Optional)\n A list of names for variables in the sampler\n arg_names : list[str] (Optional)\n A list of names for args in the sampler\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n \"\"\"\n return EmceeConverter(\n sampler, var_names=var_names, arg_names=arg_names, coords=coords, dims=dims\n ).to_inference_data()\n", "path": "arviz/data/io_emcee.py"}]} | 1,519 | 299 |
gh_patches_debug_3463 | rasdani/github-patches | git_diff | ipython__ipython-10046 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Autocompletion with Init-Signatures of C extension classes
I don't quite get the autocompletion to work with C extension classes. These classes have a Signature (at least since python 3.4 by utilizing `__text_signature__`) but it seems IPython cannot autocomplete the parameters in `__text_signature__`.
For example:
```
>>> import pickle
>>> import inspect
>>> inspect.Signature.from_callable(pickle.Pickler)
<Signature (file, protocol=None, fix_imports=True)>
```
Note that the signature is saved as `__text_signature__`-attribute on ``pickle.Pickle``.
I would expect that after printing`pickle.Pickler(` and then hitting TAB it would show:
```
file=
protocol=
fix_imports=
# other stuff
```
but it doesn't.
Classes seem to be special in this regard - IPython is definitly able to get the parameters for functions and methods! It just doesn't work for classes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/utils/dir2.py`
Content:
```
1 # encoding: utf-8
2 """A fancy version of Python's builtin :func:`dir` function.
3 """
4
5 # Copyright (c) IPython Development Team.
6 # Distributed under the terms of the Modified BSD License.
7
8 import inspect
9 from .py3compat import string_types
10
11
12 def safe_hasattr(obj, attr):
13 """In recent versions of Python, hasattr() only catches AttributeError.
14 This catches all errors.
15 """
16 try:
17 getattr(obj, attr)
18 return True
19 except:
20 return False
21
22
23 def dir2(obj):
24 """dir2(obj) -> list of strings
25
26 Extended version of the Python builtin dir(), which does a few extra
27 checks.
28
29 This version is guaranteed to return only a list of true strings, whereas
30 dir() returns anything that objects inject into themselves, even if they
31 are later not really valid for attribute access (many extension libraries
32 have such bugs).
33 """
34
35 # Start building the attribute list via dir(), and then complete it
36 # with a few extra special-purpose calls.
37
38 try:
39 words = set(dir(obj))
40 except Exception:
41 # TypeError: dir(obj) does not return a list
42 words = set()
43
44 # filter out non-string attributes which may be stuffed by dir() calls
45 # and poor coding in third-party modules
46
47 words = [w for w in words if isinstance(w, string_types)]
48 return sorted(words)
49
50
51 def get_real_method(obj, name):
52 """Like getattr, but with a few extra sanity checks:
53
54 - If obj is a class, ignore its methods
55 - Check if obj is a proxy that claims to have all attributes
56 - Catch attribute access failing with any exception
57 - Check that the attribute is a callable object
58
59 Returns the method or None.
60 """
61 if inspect.isclass(obj):
62 return None
63
64 try:
65 canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)
66 except Exception:
67 return None
68
69 if canary is not None:
70 # It claimed to have an attribute it should never have
71 return None
72
73 try:
74 m = getattr(obj, name, None)
75 except Exception:
76 return None
77
78 if callable(m):
79 return m
80
81 return None
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/utils/dir2.py b/IPython/utils/dir2.py
--- a/IPython/utils/dir2.py
+++ b/IPython/utils/dir2.py
@@ -41,6 +41,9 @@
# TypeError: dir(obj) does not return a list
words = set()
+ if safe_hasattr(obj, '__class__'):
+ words |= set(dir(obj.__class__))
+
# filter out non-string attributes which may be stuffed by dir() calls
# and poor coding in third-party modules
| {"golden_diff": "diff --git a/IPython/utils/dir2.py b/IPython/utils/dir2.py\n--- a/IPython/utils/dir2.py\n+++ b/IPython/utils/dir2.py\n@@ -41,6 +41,9 @@\n # TypeError: dir(obj) does not return a list\n words = set()\n \n+ if safe_hasattr(obj, '__class__'):\n+ words |= set(dir(obj.__class__))\n+\n # filter out non-string attributes which may be stuffed by dir() calls\n # and poor coding in third-party modules\n", "issue": "Autocompletion with Init-Signatures of C extension classes\nI don't quite get the autocompletion to work with C extension classes. These classes have a Signature (at least since python 3.4 by utilizing `__text_signature__`) but it seems IPython cannot autocomplete the parameters in `__text_signature__`.\r\n\r\nFor example:\r\n\r\n```\r\n>>> import pickle\r\n>>> import inspect\r\n>>> inspect.Signature.from_callable(pickle.Pickler)\r\n<Signature (file, protocol=None, fix_imports=True)>\r\n```\r\n\r\nNote that the signature is saved as `__text_signature__`-attribute on ``pickle.Pickle``.\r\n\r\nI would expect that after printing`pickle.Pickler(` and then hitting TAB it would show:\r\n```\r\nfile=\r\nprotocol=\r\nfix_imports=\r\n# other stuff\r\n```\r\nbut it doesn't. \r\n\r\nClasses seem to be special in this regard - IPython is definitly able to get the parameters for functions and methods! It just doesn't work for classes.\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"A fancy version of Python's builtin :func:`dir` function.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport inspect\nfrom .py3compat import string_types\n\n\ndef safe_hasattr(obj, attr):\n \"\"\"In recent versions of Python, hasattr() only catches AttributeError.\n This catches all errors.\n \"\"\"\n try:\n getattr(obj, attr)\n return True\n except:\n return False\n\n\ndef dir2(obj):\n \"\"\"dir2(obj) -> list of strings\n\n Extended version of the Python builtin dir(), which does a few extra\n checks.\n\n This version is guaranteed to return only a list of true strings, whereas\n dir() returns anything that objects inject into themselves, even if they\n are later not really valid for attribute access (many extension libraries\n have such bugs).\n \"\"\"\n\n # Start building the attribute list via dir(), and then complete it\n # with a few extra special-purpose calls.\n\n try:\n words = set(dir(obj))\n except Exception:\n # TypeError: dir(obj) does not return a list\n words = set()\n\n # filter out non-string attributes which may be stuffed by dir() calls\n # and poor coding in third-party modules\n\n words = [w for w in words if isinstance(w, string_types)]\n return sorted(words)\n\n\ndef get_real_method(obj, name):\n \"\"\"Like getattr, but with a few extra sanity checks:\n\n - If obj is a class, ignore its methods\n - Check if obj is a proxy that claims to have all attributes\n - Catch attribute access failing with any exception\n - Check that the attribute is a callable object\n\n Returns the method or None.\n \"\"\"\n if inspect.isclass(obj):\n return None\n\n try:\n canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)\n except Exception:\n return None\n\n if canary is not None:\n # It claimed to have an attribute it should never have\n return None\n\n try:\n m = getattr(obj, name, None)\n except Exception:\n return None\n\n if callable(m):\n return m\n\n return None\n", "path": "IPython/utils/dir2.py"}], "after_files": [{"content": "# encoding: utf-8\n\"\"\"A fancy version of Python's builtin :func:`dir` function.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport inspect\nfrom .py3compat import string_types\n\n\ndef safe_hasattr(obj, attr):\n \"\"\"In recent versions of Python, hasattr() only catches AttributeError.\n This catches all errors.\n \"\"\"\n try:\n getattr(obj, attr)\n return True\n except:\n return False\n\n\ndef dir2(obj):\n \"\"\"dir2(obj) -> list of strings\n\n Extended version of the Python builtin dir(), which does a few extra\n checks.\n\n This version is guaranteed to return only a list of true strings, whereas\n dir() returns anything that objects inject into themselves, even if they\n are later not really valid for attribute access (many extension libraries\n have such bugs).\n \"\"\"\n\n # Start building the attribute list via dir(), and then complete it\n # with a few extra special-purpose calls.\n\n try:\n words = set(dir(obj))\n except Exception:\n # TypeError: dir(obj) does not return a list\n words = set()\n\n if safe_hasattr(obj, '__class__'):\n words |= set(dir(obj.__class__))\n\n # filter out non-string attributes which may be stuffed by dir() calls\n # and poor coding in third-party modules\n\n words = [w for w in words if isinstance(w, string_types)]\n return sorted(words)\n\n\ndef get_real_method(obj, name):\n \"\"\"Like getattr, but with a few extra sanity checks:\n\n - If obj is a class, ignore its methods\n - Check if obj is a proxy that claims to have all attributes\n - Catch attribute access failing with any exception\n - Check that the attribute is a callable object\n\n Returns the method or None.\n \"\"\"\n if inspect.isclass(obj):\n return None\n\n try:\n canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)\n except Exception:\n return None\n\n if canary is not None:\n # It claimed to have an attribute it should never have\n return None\n\n try:\n m = getattr(obj, name, None)\n except Exception:\n return None\n\n if callable(m):\n return m\n\n return None\n", "path": "IPython/utils/dir2.py"}]} | 1,118 | 116 |
gh_patches_debug_9205 | rasdani/github-patches | git_diff | sanic-org__sanic-2622 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
restart workers online (graceful restart) to hot reload, in production environment.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your feature request related to a problem? Please describe.
Condition: keep service available any time.
# ---- client code----
```py
async def run():
while 1:
await post('http://127.0.0.1:8000/')
```
when called app.m.restart("__ALL_PROCESSES__") in a worker, sanic crashed.
# ---- server code ----
```py
@app.post("/")
async def handler(request):
app.m.restart('__ALL_PROCESSES__')
return response.text('ok')
if __name__ == "__main__":
app.run(debug=True, workers=2)
```
### Describe the solution you'd like
graceful restarting and reduce the effect when restarting.
my messy describe:
1. graceful restart workers; restart all workers will not crash, if only 1 worker, block a little while (if worker not started yet) is ok.
2. a way to graceful restart worker one by one, code eg:
woker_names = tuple(app.m.workers.keys())
for woker_name in worker_names:
ret_val = app.m.restart(worker_name)
# here, the worker has been graceful restarted, ret_val is meaningful
3. may combine the above 2, when restarting all workers, 50% workers restarting, 50% old workers keep serving
### Additional context
simplify the api,
```py
app.m.restart('__ALL_PROCESSES__') => app.m.restart_all()
```
thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/worker/multiplexer.py`
Content:
```
1 from multiprocessing.connection import Connection
2 from os import environ, getpid
3 from typing import Any, Dict
4
5 from sanic.worker.process import ProcessState
6 from sanic.worker.state import WorkerState
7
8
9 class WorkerMultiplexer:
10 def __init__(
11 self,
12 monitor_publisher: Connection,
13 worker_state: Dict[str, Any],
14 ):
15 self._monitor_publisher = monitor_publisher
16 self._state = WorkerState(worker_state, self.name)
17
18 def ack(self):
19 self._state._state[self.name] = {
20 **self._state._state[self.name],
21 "state": ProcessState.ACKED.name,
22 }
23
24 def restart(self, name: str = ""):
25 if not name:
26 name = self.name
27 self._monitor_publisher.send(name)
28
29 reload = restart # no cov
30
31 def terminate(self, early: bool = False):
32 message = "__TERMINATE_EARLY__" if early else "__TERMINATE__"
33 self._monitor_publisher.send(message)
34
35 @property
36 def pid(self) -> int:
37 return getpid()
38
39 @property
40 def name(self) -> str:
41 return environ.get("SANIC_WORKER_NAME", "")
42
43 @property
44 def state(self):
45 return self._state
46
47 @property
48 def workers(self) -> Dict[str, Any]:
49 return self.state.full()
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/worker/multiplexer.py b/sanic/worker/multiplexer.py
--- a/sanic/worker/multiplexer.py
+++ b/sanic/worker/multiplexer.py
@@ -21,9 +21,14 @@
"state": ProcessState.ACKED.name,
}
- def restart(self, name: str = ""):
+ def restart(self, name: str = "", all_workers: bool = False):
+ if name and all_workers:
+ raise ValueError(
+ "Ambiguous restart with both a named process and"
+ " all_workers=True"
+ )
if not name:
- name = self.name
+ name = "__ALL_PROCESSES__:" if all_workers else self.name
self._monitor_publisher.send(name)
reload = restart # no cov
| {"golden_diff": "diff --git a/sanic/worker/multiplexer.py b/sanic/worker/multiplexer.py\n--- a/sanic/worker/multiplexer.py\n+++ b/sanic/worker/multiplexer.py\n@@ -21,9 +21,14 @@\n \"state\": ProcessState.ACKED.name,\n }\n \n- def restart(self, name: str = \"\"):\n+ def restart(self, name: str = \"\", all_workers: bool = False):\n+ if name and all_workers:\n+ raise ValueError(\n+ \"Ambiguous restart with both a named process and\"\n+ \" all_workers=True\"\n+ )\n if not name:\n- name = self.name\n+ name = \"__ALL_PROCESSES__:\" if all_workers else self.name\n self._monitor_publisher.send(name)\n \n reload = restart # no cov\n", "issue": "restart workers online (graceful restart) to hot reload, in production environment.\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Is your feature request related to a problem? Please describe.\r\n\r\nCondition: keep service available any time. \r\n\r\n# ---- client code----\r\n\r\n```py\r\nasync def run(): \r\n while 1:\r\n await post('http://127.0.0.1:8000/') \r\n```\r\n\r\nwhen called app.m.restart(\"__ALL_PROCESSES__\") in a worker, sanic crashed.\r\n\r\n# ---- server code ----\r\n```py\r\[email protected](\"/\")\r\nasync def handler(request):\r\n app.m.restart('__ALL_PROCESSES__')\r\n return response.text('ok')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, workers=2)\r\n```\r\n\r\n### Describe the solution you'd like\r\n\r\ngraceful restarting and reduce the effect when restarting.\r\nmy messy describe:\r\n1. graceful restart workers; restart all workers will not crash, if only 1 worker, block a little while (if worker not started yet) is ok.\r\n2. a way to graceful restart worker one by one, code eg:\r\n woker_names = tuple(app.m.workers.keys())\r\n for woker_name in worker_names:\r\n ret_val = app.m.restart(worker_name)\r\n # here, the worker has been graceful restarted, ret_val is meaningful\r\n3. may combine the above 2, when restarting all workers, 50% workers restarting, 50% old workers keep serving\r\n\r\n### Additional context\r\n\r\nsimplify the api,\r\n\r\n```py\r\napp.m.restart('__ALL_PROCESSES__') => app.m.restart_all()\r\n```\r\n\r\nthanks.\n", "before_files": [{"content": "from multiprocessing.connection import Connection\nfrom os import environ, getpid\nfrom typing import Any, Dict\n\nfrom sanic.worker.process import ProcessState\nfrom sanic.worker.state import WorkerState\n\n\nclass WorkerMultiplexer:\n def __init__(\n self,\n monitor_publisher: Connection,\n worker_state: Dict[str, Any],\n ):\n self._monitor_publisher = monitor_publisher\n self._state = WorkerState(worker_state, self.name)\n\n def ack(self):\n self._state._state[self.name] = {\n **self._state._state[self.name],\n \"state\": ProcessState.ACKED.name,\n }\n\n def restart(self, name: str = \"\"):\n if not name:\n name = self.name\n self._monitor_publisher.send(name)\n\n reload = restart # no cov\n\n def terminate(self, early: bool = False):\n message = \"__TERMINATE_EARLY__\" if early else \"__TERMINATE__\"\n self._monitor_publisher.send(message)\n\n @property\n def pid(self) -> int:\n return getpid()\n\n @property\n def name(self) -> str:\n return environ.get(\"SANIC_WORKER_NAME\", \"\")\n\n @property\n def state(self):\n return self._state\n\n @property\n def workers(self) -> Dict[str, Any]:\n return self.state.full()\n", "path": "sanic/worker/multiplexer.py"}], "after_files": [{"content": "from multiprocessing.connection import Connection\nfrom os import environ, getpid\nfrom typing import Any, Dict\n\nfrom sanic.worker.process import ProcessState\nfrom sanic.worker.state import WorkerState\n\n\nclass WorkerMultiplexer:\n def __init__(\n self,\n monitor_publisher: Connection,\n worker_state: Dict[str, Any],\n ):\n self._monitor_publisher = monitor_publisher\n self._state = WorkerState(worker_state, self.name)\n\n def ack(self):\n self._state._state[self.name] = {\n **self._state._state[self.name],\n \"state\": ProcessState.ACKED.name,\n }\n\n def restart(self, name: str = \"\", all_workers: bool = False):\n if name and all_workers:\n raise ValueError(\n \"Ambiguous restart with both a named process and\"\n \" all_workers=True\"\n )\n if not name:\n name = \"__ALL_PROCESSES__:\" if all_workers else self.name\n self._monitor_publisher.send(name)\n\n reload = restart # no cov\n\n def terminate(self, early: bool = False):\n message = \"__TERMINATE_EARLY__\" if early else \"__TERMINATE__\"\n self._monitor_publisher.send(message)\n\n @property\n def pid(self) -> int:\n return getpid()\n\n @property\n def name(self) -> str:\n return environ.get(\"SANIC_WORKER_NAME\", \"\")\n\n @property\n def state(self):\n return self._state\n\n @property\n def workers(self) -> Dict[str, Any]:\n return self.state.full()\n", "path": "sanic/worker/multiplexer.py"}]} | 1,014 | 185 |
gh_patches_debug_15392 | rasdani/github-patches | git_diff | DataBiosphere__toil-1385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve toil cluster utils CLI
Instead of requiring the -p flag make it default to 'aws'. It would also be great to make the cluster name optional -- if the user only has 1 cluster running it should use that by default, else raise an error asking for an explicit argument
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/toil/utils/__init__.py`
Content:
```
1 from __future__ import absolute_import
2
3 from toil import version
4 import logging
5
6 from toil.provisioners.aws import getCurrentAWSZone
7
8 logger = logging.getLogger( __name__ )
9
10
11 def addBasicProvisionerOptions(parser):
12 parser.add_argument("--version", action='version', version=version)
13 parser.add_argument('-p', "--provisioner", dest='provisioner', choices=['aws'], required=True,
14 help="The provisioner for cluster auto-scaling. Only aws is currently "
15 "supported")
16 currentZone = getCurrentAWSZone()
17 zoneString = currentZone if currentZone else 'No zone could be determined'
18 parser.add_argument('-z', '--zone', dest='zone', required=False, default=currentZone,
19 help="The AWS availability zone of the master. This parameter can also be "
20 "set via the TOIL_AWS_ZONE environment variable, or by the ec2_region_name "
21 "parameter in your .boto file, or derived from the instance metadata if "
22 "using this utility on an existing EC2 instance. "
23 "Currently: %s" % zoneString)
24 parser.add_argument("clusterName", help="The name that the cluster will be identifiable by. "
25 "Must be lowercase and may not contain the '_' "
26 "character.")
27 return parser
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/toil/utils/__init__.py b/src/toil/utils/__init__.py
--- a/src/toil/utils/__init__.py
+++ b/src/toil/utils/__init__.py
@@ -5,12 +5,12 @@
from toil.provisioners.aws import getCurrentAWSZone
-logger = logging.getLogger( __name__ )
+logger = logging.getLogger(__name__)
def addBasicProvisionerOptions(parser):
parser.add_argument("--version", action='version', version=version)
- parser.add_argument('-p', "--provisioner", dest='provisioner', choices=['aws'], required=True,
+ parser.add_argument('-p', "--provisioner", dest='provisioner', choices=['aws'], required=False, default="aws",
help="The provisioner for cluster auto-scaling. Only aws is currently "
"supported")
currentZone = getCurrentAWSZone()
| {"golden_diff": "diff --git a/src/toil/utils/__init__.py b/src/toil/utils/__init__.py\n--- a/src/toil/utils/__init__.py\n+++ b/src/toil/utils/__init__.py\n@@ -5,12 +5,12 @@\n \n from toil.provisioners.aws import getCurrentAWSZone\n \n-logger = logging.getLogger( __name__ )\n+logger = logging.getLogger(__name__)\n \n \n def addBasicProvisionerOptions(parser):\n parser.add_argument(\"--version\", action='version', version=version)\n- parser.add_argument('-p', \"--provisioner\", dest='provisioner', choices=['aws'], required=True,\n+ parser.add_argument('-p', \"--provisioner\", dest='provisioner', choices=['aws'], required=False, default=\"aws\",\n help=\"The provisioner for cluster auto-scaling. Only aws is currently \"\n \"supported\")\n currentZone = getCurrentAWSZone()\n", "issue": "Improve toil cluster utils CLI\nInstead of requiring the -p flag make it default to 'aws'. It would also be great to make the cluster name optional -- if the user only has 1 cluster running it should use that by default, else raise an error asking for an explicit argument\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom toil import version\nimport logging\n\nfrom toil.provisioners.aws import getCurrentAWSZone\n\nlogger = logging.getLogger( __name__ )\n\n\ndef addBasicProvisionerOptions(parser):\n parser.add_argument(\"--version\", action='version', version=version)\n parser.add_argument('-p', \"--provisioner\", dest='provisioner', choices=['aws'], required=True,\n help=\"The provisioner for cluster auto-scaling. Only aws is currently \"\n \"supported\")\n currentZone = getCurrentAWSZone()\n zoneString = currentZone if currentZone else 'No zone could be determined'\n parser.add_argument('-z', '--zone', dest='zone', required=False, default=currentZone,\n help=\"The AWS availability zone of the master. This parameter can also be \"\n \"set via the TOIL_AWS_ZONE environment variable, or by the ec2_region_name \"\n \"parameter in your .boto file, or derived from the instance metadata if \"\n \"using this utility on an existing EC2 instance. \"\n \"Currently: %s\" % zoneString)\n parser.add_argument(\"clusterName\", help=\"The name that the cluster will be identifiable by. \"\n \"Must be lowercase and may not contain the '_' \"\n \"character.\")\n return parser\n", "path": "src/toil/utils/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom toil import version\nimport logging\n\nfrom toil.provisioners.aws import getCurrentAWSZone\n\nlogger = logging.getLogger(__name__)\n\n\ndef addBasicProvisionerOptions(parser):\n parser.add_argument(\"--version\", action='version', version=version)\n parser.add_argument('-p', \"--provisioner\", dest='provisioner', choices=['aws'], required=False, default=\"aws\",\n help=\"The provisioner for cluster auto-scaling. Only aws is currently \"\n \"supported\")\n currentZone = getCurrentAWSZone()\n zoneString = currentZone if currentZone else 'No zone could be determined'\n parser.add_argument('-z', '--zone', dest='zone', required=False, default=currentZone,\n help=\"The AWS availability zone of the master. This parameter can also be \"\n \"set via the TOIL_AWS_ZONE environment variable, or by the ec2_region_name \"\n \"parameter in your .boto file, or derived from the instance metadata if \"\n \"using this utility on an existing EC2 instance. \"\n \"Currently: %s\" % zoneString)\n parser.add_argument(\"clusterName\", help=\"The name that the cluster will be identifiable by. \"\n \"Must be lowercase and may not contain the '_' \"\n \"character.\")\n return parser\n", "path": "src/toil/utils/__init__.py"}]} | 649 | 197 |
gh_patches_debug_34630 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2154 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't validate for existing column names if they're being moved
<img width="905" alt="Screen Shot 2022-12-21 at 6 02 20 PM" src="https://user-images.githubusercontent.com/287034/209019919-a8dd4cab-4ce9-4c85-8956-3163409e713b.png">
We're removing the `Publisher` column here, so it's okay that the replacement column will be named `Publisher`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/tables/operations/split.py`
Content:
```
1 from sqlalchemy import exists, func, literal, select
2
3 from db import constants
4 from db.columns.base import MathesarColumn
5 from db.columns.operations.alter import batch_alter_table_drop_columns
6 from db.columns.operations.select import get_column_names_from_attnums
7 from db.links.operations.create import create_foreign_key_link
8 from db.tables.operations.create import create_mathesar_table
9 from db.tables.operations.select import get_oid_from_table, reflect_table, reflect_table_from_oid
10 from db.metadata import get_empty_metadata
11
12
13 def _create_split_tables(extracted_table_name, extracted_columns, remainder_table_name, schema, engine, fk_column_name=None):
14 extracted_table = create_mathesar_table(
15 extracted_table_name,
16 schema,
17 extracted_columns,
18 engine,
19 )
20 fk_column_name = fk_column_name if fk_column_name else f"{extracted_table.name}_{constants.ID}"
21 remainder_table_oid = get_oid_from_table(remainder_table_name, schema, engine)
22 extracted_table_oid = get_oid_from_table(extracted_table_name, schema, engine)
23 create_foreign_key_link(engine, schema, fk_column_name, remainder_table_oid, extracted_table_oid)
24 # TODO reuse metadata
25 remainder_table_with_fk_key = reflect_table(remainder_table_name, schema, engine, metadata=get_empty_metadata())
26 return extracted_table, remainder_table_with_fk_key, fk_column_name
27
28
29 def _create_split_insert_stmt(old_table, extracted_table, extracted_columns, remainder_fk_name):
30 SPLIT_ID = f"{constants.MATHESAR_PREFIX}_split_column_alias"
31 extracted_column_names = [col.name for col in extracted_columns]
32 split_cte = select(
33 [
34 old_table,
35 func.dense_rank().over(order_by=extracted_columns).label(SPLIT_ID)
36 ]
37 ).cte()
38 cte_extraction_columns = (
39 [split_cte.columns[SPLIT_ID]]
40 + [split_cte.columns[n] for n in extracted_column_names]
41 )
42 extract_sel = select(
43 cte_extraction_columns,
44 distinct=True
45 )
46 extract_ins_cte = (
47 extracted_table
48 .insert()
49 .from_select([constants.ID] + extracted_column_names, extract_sel)
50 .returning(literal(1))
51 .cte()
52 )
53 fk_update_dict = {remainder_fk_name: split_cte.c[SPLIT_ID]}
54 split_ins = (
55 old_table
56 .update().values(**fk_update_dict).
57 where(old_table.c[constants.ID] == split_cte.c[constants.ID],
58 exists(extract_ins_cte.select()))
59 )
60 return split_ins
61
62
63 def extract_columns_from_table(old_table_oid, extracted_column_attnums, extracted_table_name, schema, engine, relationship_fk_column_name=None):
64 # TODO reuse metadata
65 old_table = reflect_table_from_oid(old_table_oid, engine, metadata=get_empty_metadata())
66 old_table_name = old_table.name
67 old_columns = (MathesarColumn.from_column(col) for col in old_table.columns)
68 old_non_default_columns = [
69 col for col in old_columns if not col.is_default
70 ]
71 # TODO reuse metadata
72 extracted_column_names = get_column_names_from_attnums(old_table_oid, extracted_column_attnums, engine, metadata=get_empty_metadata())
73 extracted_columns = [
74 col for col in old_non_default_columns if col.name in extracted_column_names
75 ]
76 with engine.begin() as conn:
77 extracted_table, remainder_table_with_fk_column, fk_column_name = _create_split_tables(
78 extracted_table_name,
79 extracted_columns,
80 old_table_name,
81 schema,
82 engine,
83 relationship_fk_column_name
84 )
85 split_ins = _create_split_insert_stmt(
86 remainder_table_with_fk_column,
87 extracted_table,
88 extracted_columns,
89 fk_column_name,
90 )
91 conn.execute(split_ins)
92 update_pk_sequence_to_latest(conn, engine, extracted_table)
93
94 remainder_table_oid = get_oid_from_table(remainder_table_with_fk_column.name, schema, engine)
95 deletion_column_data = [
96 {'attnum': column_attnum, 'delete': True}
97 for column_attnum in extracted_column_attnums
98 ]
99 batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine)
100 return extracted_table, remainder_table_with_fk_column, fk_column_name
101
102
103 def update_pk_sequence_to_latest(conn, engine, extracted_table):
104 _preparer = engine.dialect.identifier_preparer
105 quoted_table_name = _preparer.quote(extracted_table.schema) + "." + _preparer.quote(extracted_table.name)
106 update_pk_sequence_stmt = func.setval(
107 # `pg_get_serial_sequence needs a string of the Table name
108 func.pg_get_serial_sequence(
109 quoted_table_name,
110 extracted_table.c[constants.ID].name
111 ),
112 # If the table can be empty, start from 1 instead of using Null
113 func.coalesce(
114 func.max(extracted_table.c[constants.ID]) + 1,
115 1
116 ),
117 # Set the sequence to use the last value of the sequence
118 # Setting is_called field to false, meaning that the next nextval will not advance the sequence before returning a value.
119 # We need to do it as our default coalesce value is 1 instead of 0
120 # Refer the postgres docs https://www.postgresql.org/docs/current/functions-sequence.html
121 False
122 )
123 conn.execute(
124 select(update_pk_sequence_stmt)
125 )
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/tables/operations/split.py b/db/tables/operations/split.py
--- a/db/tables/operations/split.py
+++ b/db/tables/operations/split.py
@@ -2,8 +2,11 @@
from db import constants
from db.columns.base import MathesarColumn
-from db.columns.operations.alter import batch_alter_table_drop_columns
-from db.columns.operations.select import get_column_names_from_attnums
+from db.columns.operations.alter import batch_alter_table_drop_columns, rename_column
+from db.columns.operations.select import (
+ get_column_attnum_from_name,
+ get_column_names_from_attnums,
+)
from db.links.operations.create import create_foreign_key_link
from db.tables.operations.create import create_mathesar_table
from db.tables.operations.select import get_oid_from_table, reflect_table, reflect_table_from_oid
@@ -18,6 +21,11 @@
engine,
)
fk_column_name = fk_column_name if fk_column_name else f"{extracted_table.name}_{constants.ID}"
+ extracted_column_names = [
+ col.name for col in extracted_columns
+ ]
+ if fk_column_name in extracted_column_names:
+ fk_column_name = f"mathesar_temp_{fk_column_name}"
remainder_table_oid = get_oid_from_table(remainder_table_name, schema, engine)
extracted_table_oid = get_oid_from_table(extracted_table_name, schema, engine)
create_foreign_key_link(engine, schema, fk_column_name, remainder_table_oid, extracted_table_oid)
@@ -97,6 +105,9 @@
for column_attnum in extracted_column_attnums
]
batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine)
+ fk_column_attnum = get_column_attnum_from_name(remainder_table_oid, fk_column_name, engine, get_empty_metadata())
+ if relationship_fk_column_name != fk_column_name:
+ rename_column(remainder_table_oid, fk_column_attnum, engine, conn, relationship_fk_column_name)
return extracted_table, remainder_table_with_fk_column, fk_column_name
| {"golden_diff": "diff --git a/db/tables/operations/split.py b/db/tables/operations/split.py\n--- a/db/tables/operations/split.py\n+++ b/db/tables/operations/split.py\n@@ -2,8 +2,11 @@\n \n from db import constants\n from db.columns.base import MathesarColumn\n-from db.columns.operations.alter import batch_alter_table_drop_columns\n-from db.columns.operations.select import get_column_names_from_attnums\n+from db.columns.operations.alter import batch_alter_table_drop_columns, rename_column\n+from db.columns.operations.select import (\n+ get_column_attnum_from_name,\n+ get_column_names_from_attnums,\n+)\n from db.links.operations.create import create_foreign_key_link\n from db.tables.operations.create import create_mathesar_table\n from db.tables.operations.select import get_oid_from_table, reflect_table, reflect_table_from_oid\n@@ -18,6 +21,11 @@\n engine,\n )\n fk_column_name = fk_column_name if fk_column_name else f\"{extracted_table.name}_{constants.ID}\"\n+ extracted_column_names = [\n+ col.name for col in extracted_columns\n+ ]\n+ if fk_column_name in extracted_column_names:\n+ fk_column_name = f\"mathesar_temp_{fk_column_name}\"\n remainder_table_oid = get_oid_from_table(remainder_table_name, schema, engine)\n extracted_table_oid = get_oid_from_table(extracted_table_name, schema, engine)\n create_foreign_key_link(engine, schema, fk_column_name, remainder_table_oid, extracted_table_oid)\n@@ -97,6 +105,9 @@\n for column_attnum in extracted_column_attnums\n ]\n batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine)\n+ fk_column_attnum = get_column_attnum_from_name(remainder_table_oid, fk_column_name, engine, get_empty_metadata())\n+ if relationship_fk_column_name != fk_column_name:\n+ rename_column(remainder_table_oid, fk_column_attnum, engine, conn, relationship_fk_column_name)\n return extracted_table, remainder_table_with_fk_column, fk_column_name\n", "issue": "Don't validate for existing column names if they're being moved\n<img width=\"905\" alt=\"Screen Shot 2022-12-21 at 6 02 20 PM\" src=\"https://user-images.githubusercontent.com/287034/209019919-a8dd4cab-4ce9-4c85-8956-3163409e713b.png\">\r\n\r\nWe're removing the `Publisher` column here, so it's okay that the replacement column will be named `Publisher`.\n", "before_files": [{"content": "from sqlalchemy import exists, func, literal, select\n\nfrom db import constants\nfrom db.columns.base import MathesarColumn\nfrom db.columns.operations.alter import batch_alter_table_drop_columns\nfrom db.columns.operations.select import get_column_names_from_attnums\nfrom db.links.operations.create import create_foreign_key_link\nfrom db.tables.operations.create import create_mathesar_table\nfrom db.tables.operations.select import get_oid_from_table, reflect_table, reflect_table_from_oid\nfrom db.metadata import get_empty_metadata\n\n\ndef _create_split_tables(extracted_table_name, extracted_columns, remainder_table_name, schema, engine, fk_column_name=None):\n extracted_table = create_mathesar_table(\n extracted_table_name,\n schema,\n extracted_columns,\n engine,\n )\n fk_column_name = fk_column_name if fk_column_name else f\"{extracted_table.name}_{constants.ID}\"\n remainder_table_oid = get_oid_from_table(remainder_table_name, schema, engine)\n extracted_table_oid = get_oid_from_table(extracted_table_name, schema, engine)\n create_foreign_key_link(engine, schema, fk_column_name, remainder_table_oid, extracted_table_oid)\n # TODO reuse metadata\n remainder_table_with_fk_key = reflect_table(remainder_table_name, schema, engine, metadata=get_empty_metadata())\n return extracted_table, remainder_table_with_fk_key, fk_column_name\n\n\ndef _create_split_insert_stmt(old_table, extracted_table, extracted_columns, remainder_fk_name):\n SPLIT_ID = f\"{constants.MATHESAR_PREFIX}_split_column_alias\"\n extracted_column_names = [col.name for col in extracted_columns]\n split_cte = select(\n [\n old_table,\n func.dense_rank().over(order_by=extracted_columns).label(SPLIT_ID)\n ]\n ).cte()\n cte_extraction_columns = (\n [split_cte.columns[SPLIT_ID]]\n + [split_cte.columns[n] for n in extracted_column_names]\n )\n extract_sel = select(\n cte_extraction_columns,\n distinct=True\n )\n extract_ins_cte = (\n extracted_table\n .insert()\n .from_select([constants.ID] + extracted_column_names, extract_sel)\n .returning(literal(1))\n .cte()\n )\n fk_update_dict = {remainder_fk_name: split_cte.c[SPLIT_ID]}\n split_ins = (\n old_table\n .update().values(**fk_update_dict).\n where(old_table.c[constants.ID] == split_cte.c[constants.ID],\n exists(extract_ins_cte.select()))\n )\n return split_ins\n\n\ndef extract_columns_from_table(old_table_oid, extracted_column_attnums, extracted_table_name, schema, engine, relationship_fk_column_name=None):\n # TODO reuse metadata\n old_table = reflect_table_from_oid(old_table_oid, engine, metadata=get_empty_metadata())\n old_table_name = old_table.name\n old_columns = (MathesarColumn.from_column(col) for col in old_table.columns)\n old_non_default_columns = [\n col for col in old_columns if not col.is_default\n ]\n # TODO reuse metadata\n extracted_column_names = get_column_names_from_attnums(old_table_oid, extracted_column_attnums, engine, metadata=get_empty_metadata())\n extracted_columns = [\n col for col in old_non_default_columns if col.name in extracted_column_names\n ]\n with engine.begin() as conn:\n extracted_table, remainder_table_with_fk_column, fk_column_name = _create_split_tables(\n extracted_table_name,\n extracted_columns,\n old_table_name,\n schema,\n engine,\n relationship_fk_column_name\n )\n split_ins = _create_split_insert_stmt(\n remainder_table_with_fk_column,\n extracted_table,\n extracted_columns,\n fk_column_name,\n )\n conn.execute(split_ins)\n update_pk_sequence_to_latest(conn, engine, extracted_table)\n\n remainder_table_oid = get_oid_from_table(remainder_table_with_fk_column.name, schema, engine)\n deletion_column_data = [\n {'attnum': column_attnum, 'delete': True}\n for column_attnum in extracted_column_attnums\n ]\n batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine)\n return extracted_table, remainder_table_with_fk_column, fk_column_name\n\n\ndef update_pk_sequence_to_latest(conn, engine, extracted_table):\n _preparer = engine.dialect.identifier_preparer\n quoted_table_name = _preparer.quote(extracted_table.schema) + \".\" + _preparer.quote(extracted_table.name)\n update_pk_sequence_stmt = func.setval(\n # `pg_get_serial_sequence needs a string of the Table name\n func.pg_get_serial_sequence(\n quoted_table_name,\n extracted_table.c[constants.ID].name\n ),\n # If the table can be empty, start from 1 instead of using Null\n func.coalesce(\n func.max(extracted_table.c[constants.ID]) + 1,\n 1\n ),\n # Set the sequence to use the last value of the sequence\n # Setting is_called field to false, meaning that the next nextval will not advance the sequence before returning a value.\n # We need to do it as our default coalesce value is 1 instead of 0\n # Refer the postgres docs https://www.postgresql.org/docs/current/functions-sequence.html\n False\n )\n conn.execute(\n select(update_pk_sequence_stmt)\n )\n", "path": "db/tables/operations/split.py"}], "after_files": [{"content": "from sqlalchemy import exists, func, literal, select\n\nfrom db import constants\nfrom db.columns.base import MathesarColumn\nfrom db.columns.operations.alter import batch_alter_table_drop_columns, rename_column\nfrom db.columns.operations.select import (\n get_column_attnum_from_name,\n get_column_names_from_attnums,\n)\nfrom db.links.operations.create import create_foreign_key_link\nfrom db.tables.operations.create import create_mathesar_table\nfrom db.tables.operations.select import get_oid_from_table, reflect_table, reflect_table_from_oid\nfrom db.metadata import get_empty_metadata\n\n\ndef _create_split_tables(extracted_table_name, extracted_columns, remainder_table_name, schema, engine, fk_column_name=None):\n extracted_table = create_mathesar_table(\n extracted_table_name,\n schema,\n extracted_columns,\n engine,\n )\n fk_column_name = fk_column_name if fk_column_name else f\"{extracted_table.name}_{constants.ID}\"\n extracted_column_names = [\n col.name for col in extracted_columns\n ]\n if fk_column_name in extracted_column_names:\n fk_column_name = f\"mathesar_temp_{fk_column_name}\"\n remainder_table_oid = get_oid_from_table(remainder_table_name, schema, engine)\n extracted_table_oid = get_oid_from_table(extracted_table_name, schema, engine)\n create_foreign_key_link(engine, schema, fk_column_name, remainder_table_oid, extracted_table_oid)\n # TODO reuse metadata\n remainder_table_with_fk_key = reflect_table(remainder_table_name, schema, engine, metadata=get_empty_metadata())\n return extracted_table, remainder_table_with_fk_key, fk_column_name\n\n\ndef _create_split_insert_stmt(old_table, extracted_table, extracted_columns, remainder_fk_name):\n SPLIT_ID = f\"{constants.MATHESAR_PREFIX}_split_column_alias\"\n extracted_column_names = [col.name for col in extracted_columns]\n split_cte = select(\n [\n old_table,\n func.dense_rank().over(order_by=extracted_columns).label(SPLIT_ID)\n ]\n ).cte()\n cte_extraction_columns = (\n [split_cte.columns[SPLIT_ID]]\n + [split_cte.columns[n] for n in extracted_column_names]\n )\n extract_sel = select(\n cte_extraction_columns,\n distinct=True\n )\n extract_ins_cte = (\n extracted_table\n .insert()\n .from_select([constants.ID] + extracted_column_names, extract_sel)\n .returning(literal(1))\n .cte()\n )\n fk_update_dict = {remainder_fk_name: split_cte.c[SPLIT_ID]}\n split_ins = (\n old_table\n .update().values(**fk_update_dict).\n where(old_table.c[constants.ID] == split_cte.c[constants.ID],\n exists(extract_ins_cte.select()))\n )\n return split_ins\n\n\ndef extract_columns_from_table(old_table_oid, extracted_column_attnums, extracted_table_name, schema, engine, relationship_fk_column_name=None):\n # TODO reuse metadata\n old_table = reflect_table_from_oid(old_table_oid, engine, metadata=get_empty_metadata())\n old_table_name = old_table.name\n old_columns = (MathesarColumn.from_column(col) for col in old_table.columns)\n old_non_default_columns = [\n col for col in old_columns if not col.is_default\n ]\n # TODO reuse metadata\n extracted_column_names = get_column_names_from_attnums(old_table_oid, extracted_column_attnums, engine, metadata=get_empty_metadata())\n extracted_columns = [\n col for col in old_non_default_columns if col.name in extracted_column_names\n ]\n with engine.begin() as conn:\n extracted_table, remainder_table_with_fk_column, fk_column_name = _create_split_tables(\n extracted_table_name,\n extracted_columns,\n old_table_name,\n schema,\n engine,\n relationship_fk_column_name\n )\n split_ins = _create_split_insert_stmt(\n remainder_table_with_fk_column,\n extracted_table,\n extracted_columns,\n fk_column_name,\n )\n conn.execute(split_ins)\n update_pk_sequence_to_latest(conn, engine, extracted_table)\n\n remainder_table_oid = get_oid_from_table(remainder_table_with_fk_column.name, schema, engine)\n deletion_column_data = [\n {'attnum': column_attnum, 'delete': True}\n for column_attnum in extracted_column_attnums\n ]\n batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine)\n fk_column_attnum = get_column_attnum_from_name(remainder_table_oid, fk_column_name, engine, get_empty_metadata())\n if relationship_fk_column_name != fk_column_name:\n rename_column(remainder_table_oid, fk_column_attnum, engine, conn, relationship_fk_column_name)\n return extracted_table, remainder_table_with_fk_column, fk_column_name\n\n\ndef update_pk_sequence_to_latest(conn, engine, extracted_table):\n _preparer = engine.dialect.identifier_preparer\n quoted_table_name = _preparer.quote(extracted_table.schema) + \".\" + _preparer.quote(extracted_table.name)\n update_pk_sequence_stmt = func.setval(\n # `pg_get_serial_sequence needs a string of the Table name\n func.pg_get_serial_sequence(\n quoted_table_name,\n extracted_table.c[constants.ID].name\n ),\n # If the table can be empty, start from 1 instead of using Null\n func.coalesce(\n func.max(extracted_table.c[constants.ID]) + 1,\n 1\n ),\n # Set the sequence to use the last value of the sequence\n # Setting is_called field to false, meaning that the next nextval will not advance the sequence before returning a value.\n # We need to do it as our default coalesce value is 1 instead of 0\n # Refer the postgres docs https://www.postgresql.org/docs/current/functions-sequence.html\n False\n )\n conn.execute(\n select(update_pk_sequence_stmt)\n )\n", "path": "db/tables/operations/split.py"}]} | 1,812 | 460 |
gh_patches_debug_30217 | rasdani/github-patches | git_diff | bokeh__bokeh-8373 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DataTable crashes layout if 'field' is key in ColumnDataSource data
Bokeh 0.13.0
Mac OS 10.13.3
python 2.7.10
Safari 11.0.3
If 'field' is a key in the data of ColumnDataSource, bokeh serve cannot generate a layout including a DataTable using this ColumnDataSource. This only fails if the DataTable is in the layout, the DataTable may exist if not in the layout.
The following code will fail. If you replace 'field' in variables with almost anything else, the code works.
```
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import TableColumn, DataTable
from bokeh.io import curdoc
from bokeh.layouts import column
variables = ['x', 'y', 'field']
source = ColumnDataSource(data={v: [i] for i, v in enumerate(variables)})
table = DataTable(source=source, columns=[TableColumn(field=v, title=v) for v in variables])
curdoc().add_root(column(table))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/models/file/donut.py`
Content:
```
1 from __future__ import print_function
2
3 import base64
4 from math import pi, sin, cos
5
6 from bokeh.util.browser import view
7 from bokeh.colors.named import skyblue, seagreen, tomato, orchid, firebrick, lightgray
8 from bokeh.document import Document
9 from bokeh.embed import file_html
10 from bokeh.models.glyphs import Wedge, AnnularWedge, ImageURL, Text
11 from bokeh.models import ColumnDataSource, Plot, Range1d
12 from bokeh.resources import INLINE
13 from bokeh.sampledata.browsers import browsers_nov_2013, icons
14
15 df = browsers_nov_2013
16
17 xdr = Range1d(start=-2, end=2)
18 ydr = Range1d(start=-2, end=2)
19
20 plot = Plot(x_range=xdr, y_range=ydr, plot_width=800, plot_height=800)
21 plot.title.text = "Web browser market share (November 2013)"
22 plot.toolbar_location = None
23
24 colors = {"Chrome": seagreen, "Firefox": tomato, "Safari": orchid, "Opera": firebrick, "IE": skyblue, "Other": lightgray}
25
26 aggregated = df.groupby("Browser").agg(sum)
27 selected = aggregated[aggregated.Share >= 1].copy()
28 selected.loc["Other"] = aggregated[aggregated.Share < 1].sum()
29 browsers = selected.index.tolist()
30
31 radians = lambda x: 2*pi*(x/100)
32 angles = selected.Share.map(radians).cumsum()
33
34 end_angles = angles.tolist()
35 start_angles = [0] + end_angles[:-1]
36
37 browsers_source = ColumnDataSource(dict(
38 start = start_angles,
39 end = end_angles,
40 colors = [colors[browser] for browser in browsers ],
41 ))
42
43 glyph = Wedge(x=0, y=0, radius=1, line_color="white",
44 line_width=2, start_angle="start", end_angle="end", fill_color="colors")
45 plot.add_glyph(browsers_source, glyph)
46
47 def polar_to_cartesian(r, start_angles, end_angles):
48 cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))
49 points = []
50
51 for start, end in zip(start_angles, end_angles):
52 points.append(cartesian(r, (end + start)/2))
53
54 return zip(*points)
55
56 first = True
57
58 for browser, start_angle, end_angle in zip(browsers, start_angles, end_angles):
59 versions = df[(df.Browser == browser) & (df.Share >= 0.5)]
60 angles = versions.Share.map(radians).cumsum() + start_angle
61 end = angles.tolist() + [end_angle]
62 start = [start_angle] + end[:-1]
63 base_color = colors[browser]
64 fill = [ base_color.lighten(i*0.05) for i in range(len(versions) + 1) ]
65 # extra empty string accounts for all versions with share < 0.5 together
66 text = [ number if share >= 1 else "" for number, share in zip(versions.VersionNumber, versions.Share) ] + [""]
67 x, y = polar_to_cartesian(1.25, start, end)
68
69 source = ColumnDataSource(dict(start=start, end=end, fill=fill))
70 glyph = AnnularWedge(x=0, y=0,
71 inner_radius=1, outer_radius=1.5, start_angle="start", end_angle="end",
72 line_color="white", line_width=2, fill_color="fill")
73 plot.add_glyph(source, glyph)
74
75
76 text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
77 text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle for angle in text_angle]
78
79 if first and text:
80 text.insert(0, '(version)')
81 offset = pi / 48
82 text_angle.insert(0, text_angle[0] - offset)
83 start.insert(0, start[0] - offset)
84 end.insert(0, end[0] - offset)
85 x, y = polar_to_cartesian(1.25, start, end)
86 first = False
87
88
89 text_source = ColumnDataSource(dict(text=text, x=x, y=y, angle=text_angle))
90 glyph = Text(x="x", y="y", text="text", angle="angle",
91 text_align="center", text_baseline="middle", text_font_size="8pt")
92 plot.add_glyph(text_source, glyph)
93
94
95 def to_base64(png):
96 return "data:image/png;base64," + base64.b64encode(png).decode("utf-8")
97
98 urls = [ to_base64(icons.get(browser, b"")) for browser in browsers ]
99 x, y = polar_to_cartesian(1.7, start_angles, end_angles)
100
101 icons_source = ColumnDataSource(dict(urls=urls, x=x, y=y))
102 glyph = ImageURL(url="urls", x="x", y="y", anchor="center")
103 plot.add_glyph(icons_source, glyph)
104
105 text = [ "%.02f%%" % value for value in selected.Share ]
106 x, y = polar_to_cartesian(0.7, start_angles, end_angles)
107
108 text_source = ColumnDataSource(dict(text=text, x=x, y=y))
109 glyph = Text(x="x", y="y", text="text", text_align="center", text_baseline="middle")
110 plot.add_glyph(text_source, glyph)
111
112 doc = Document()
113 doc.add_root(plot)
114 doc.validate()
115
116 filename = "donut.html"
117 with open(filename, "w") as f:
118 f.write(file_html(doc, INLINE, "Donut Chart"))
119 print("Wrote %s" % filename)
120 view(filename)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/models/file/donut.py b/examples/models/file/donut.py
--- a/examples/models/file/donut.py
+++ b/examples/models/file/donut.py
@@ -61,7 +61,7 @@
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[browser]
- fill = [ base_color.lighten(i*0.05) for i in range(len(versions) + 1) ]
+ fill = [ base_color.lighten(i*0.05).to_hex() for i in range(len(versions) + 1) ]
# extra empty string accounts for all versions with share < 0.5 together
text = [ number if share >= 1 else "" for number, share in zip(versions.VersionNumber, versions.Share) ] + [""]
x, y = polar_to_cartesian(1.25, start, end)
@@ -72,20 +72,9 @@
line_color="white", line_width=2, fill_color="fill")
plot.add_glyph(source, glyph)
-
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle for angle in text_angle]
- if first and text:
- text.insert(0, '(version)')
- offset = pi / 48
- text_angle.insert(0, text_angle[0] - offset)
- start.insert(0, start[0] - offset)
- end.insert(0, end[0] - offset)
- x, y = polar_to_cartesian(1.25, start, end)
- first = False
-
-
text_source = ColumnDataSource(dict(text=text, x=x, y=y, angle=text_angle))
glyph = Text(x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle", text_font_size="8pt")
| {"golden_diff": "diff --git a/examples/models/file/donut.py b/examples/models/file/donut.py\n--- a/examples/models/file/donut.py\n+++ b/examples/models/file/donut.py\n@@ -61,7 +61,7 @@\n end = angles.tolist() + [end_angle]\n start = [start_angle] + end[:-1]\n base_color = colors[browser]\n- fill = [ base_color.lighten(i*0.05) for i in range(len(versions) + 1) ]\n+ fill = [ base_color.lighten(i*0.05).to_hex() for i in range(len(versions) + 1) ]\n # extra empty string accounts for all versions with share < 0.5 together\n text = [ number if share >= 1 else \"\" for number, share in zip(versions.VersionNumber, versions.Share) ] + [\"\"]\n x, y = polar_to_cartesian(1.25, start, end)\n@@ -72,20 +72,9 @@\n line_color=\"white\", line_width=2, fill_color=\"fill\")\n plot.add_glyph(source, glyph)\n \n-\n text_angle = [(start[i]+end[i])/2 for i in range(len(start))]\n text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle for angle in text_angle]\n \n- if first and text:\n- text.insert(0, '(version)')\n- offset = pi / 48\n- text_angle.insert(0, text_angle[0] - offset)\n- start.insert(0, start[0] - offset)\n- end.insert(0, end[0] - offset)\n- x, y = polar_to_cartesian(1.25, start, end)\n- first = False\n-\n-\n text_source = ColumnDataSource(dict(text=text, x=x, y=y, angle=text_angle))\n glyph = Text(x=\"x\", y=\"y\", text=\"text\", angle=\"angle\",\n text_align=\"center\", text_baseline=\"middle\", text_font_size=\"8pt\")\n", "issue": "DataTable crashes layout if 'field' is key in ColumnDataSource data\nBokeh 0.13.0\r\nMac OS 10.13.3\r\npython 2.7.10\r\nSafari 11.0.3\r\n\r\nIf 'field' is a key in the data of ColumnDataSource, bokeh serve cannot generate a layout including a DataTable using this ColumnDataSource. This only fails if the DataTable is in the layout, the DataTable may exist if not in the layout.\r\n\r\nThe following code will fail. If you replace 'field' in variables with almost anything else, the code works.\r\n\r\n```\r\nfrom bokeh.models import ColumnDataSource\r\nfrom bokeh.models.widgets import TableColumn, DataTable\r\nfrom bokeh.io import curdoc\r\nfrom bokeh.layouts import column\r\n\r\n\r\nvariables = ['x', 'y', 'field']\r\nsource = ColumnDataSource(data={v: [i] for i, v in enumerate(variables)})\r\ntable = DataTable(source=source, columns=[TableColumn(field=v, title=v) for v in variables])\r\ncurdoc().add_root(column(table))\r\n```\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport base64\nfrom math import pi, sin, cos\n\nfrom bokeh.util.browser import view\nfrom bokeh.colors.named import skyblue, seagreen, tomato, orchid, firebrick, lightgray\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import Wedge, AnnularWedge, ImageURL, Text\nfrom bokeh.models import ColumnDataSource, Plot, Range1d\nfrom bokeh.resources import INLINE\nfrom bokeh.sampledata.browsers import browsers_nov_2013, icons\n\ndf = browsers_nov_2013\n\nxdr = Range1d(start=-2, end=2)\nydr = Range1d(start=-2, end=2)\n\nplot = Plot(x_range=xdr, y_range=ydr, plot_width=800, plot_height=800)\nplot.title.text = \"Web browser market share (November 2013)\"\nplot.toolbar_location = None\n\ncolors = {\"Chrome\": seagreen, \"Firefox\": tomato, \"Safari\": orchid, \"Opera\": firebrick, \"IE\": skyblue, \"Other\": lightgray}\n\naggregated = df.groupby(\"Browser\").agg(sum)\nselected = aggregated[aggregated.Share >= 1].copy()\nselected.loc[\"Other\"] = aggregated[aggregated.Share < 1].sum()\nbrowsers = selected.index.tolist()\n\nradians = lambda x: 2*pi*(x/100)\nangles = selected.Share.map(radians).cumsum()\n\nend_angles = angles.tolist()\nstart_angles = [0] + end_angles[:-1]\n\nbrowsers_source = ColumnDataSource(dict(\n start = start_angles,\n end = end_angles,\n colors = [colors[browser] for browser in browsers ],\n))\n\nglyph = Wedge(x=0, y=0, radius=1, line_color=\"white\",\n line_width=2, start_angle=\"start\", end_angle=\"end\", fill_color=\"colors\")\nplot.add_glyph(browsers_source, glyph)\n\ndef polar_to_cartesian(r, start_angles, end_angles):\n cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))\n points = []\n\n for start, end in zip(start_angles, end_angles):\n points.append(cartesian(r, (end + start)/2))\n\n return zip(*points)\n\nfirst = True\n\nfor browser, start_angle, end_angle in zip(browsers, start_angles, end_angles):\n versions = df[(df.Browser == browser) & (df.Share >= 0.5)]\n angles = versions.Share.map(radians).cumsum() + start_angle\n end = angles.tolist() + [end_angle]\n start = [start_angle] + end[:-1]\n base_color = colors[browser]\n fill = [ base_color.lighten(i*0.05) for i in range(len(versions) + 1) ]\n # extra empty string accounts for all versions with share < 0.5 together\n text = [ number if share >= 1 else \"\" for number, share in zip(versions.VersionNumber, versions.Share) ] + [\"\"]\n x, y = polar_to_cartesian(1.25, start, end)\n\n source = ColumnDataSource(dict(start=start, end=end, fill=fill))\n glyph = AnnularWedge(x=0, y=0,\n inner_radius=1, outer_radius=1.5, start_angle=\"start\", end_angle=\"end\",\n line_color=\"white\", line_width=2, fill_color=\"fill\")\n plot.add_glyph(source, glyph)\n\n\n text_angle = [(start[i]+end[i])/2 for i in range(len(start))]\n text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle for angle in text_angle]\n\n if first and text:\n text.insert(0, '(version)')\n offset = pi / 48\n text_angle.insert(0, text_angle[0] - offset)\n start.insert(0, start[0] - offset)\n end.insert(0, end[0] - offset)\n x, y = polar_to_cartesian(1.25, start, end)\n first = False\n\n\n text_source = ColumnDataSource(dict(text=text, x=x, y=y, angle=text_angle))\n glyph = Text(x=\"x\", y=\"y\", text=\"text\", angle=\"angle\",\n text_align=\"center\", text_baseline=\"middle\", text_font_size=\"8pt\")\n plot.add_glyph(text_source, glyph)\n\n\ndef to_base64(png):\n return \"data:image/png;base64,\" + base64.b64encode(png).decode(\"utf-8\")\n\nurls = [ to_base64(icons.get(browser, b\"\")) for browser in browsers ]\nx, y = polar_to_cartesian(1.7, start_angles, end_angles)\n\nicons_source = ColumnDataSource(dict(urls=urls, x=x, y=y))\nglyph = ImageURL(url=\"urls\", x=\"x\", y=\"y\", anchor=\"center\")\nplot.add_glyph(icons_source, glyph)\n\ntext = [ \"%.02f%%\" % value for value in selected.Share ]\nx, y = polar_to_cartesian(0.7, start_angles, end_angles)\n\ntext_source = ColumnDataSource(dict(text=text, x=x, y=y))\nglyph = Text(x=\"x\", y=\"y\", text=\"text\", text_align=\"center\", text_baseline=\"middle\")\nplot.add_glyph(text_source, glyph)\n\ndoc = Document()\ndoc.add_root(plot)\ndoc.validate()\n\nfilename = \"donut.html\"\nwith open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Donut Chart\"))\nprint(\"Wrote %s\" % filename)\nview(filename)\n", "path": "examples/models/file/donut.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport base64\nfrom math import pi, sin, cos\n\nfrom bokeh.util.browser import view\nfrom bokeh.colors.named import skyblue, seagreen, tomato, orchid, firebrick, lightgray\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import Wedge, AnnularWedge, ImageURL, Text\nfrom bokeh.models import ColumnDataSource, Plot, Range1d\nfrom bokeh.resources import INLINE\nfrom bokeh.sampledata.browsers import browsers_nov_2013, icons\n\ndf = browsers_nov_2013\n\nxdr = Range1d(start=-2, end=2)\nydr = Range1d(start=-2, end=2)\n\nplot = Plot(x_range=xdr, y_range=ydr, plot_width=800, plot_height=800)\nplot.title.text = \"Web browser market share (November 2013)\"\nplot.toolbar_location = None\n\ncolors = {\"Chrome\": seagreen, \"Firefox\": tomato, \"Safari\": orchid, \"Opera\": firebrick, \"IE\": skyblue, \"Other\": lightgray}\n\naggregated = df.groupby(\"Browser\").agg(sum)\nselected = aggregated[aggregated.Share >= 1].copy()\nselected.loc[\"Other\"] = aggregated[aggregated.Share < 1].sum()\nbrowsers = selected.index.tolist()\n\nradians = lambda x: 2*pi*(x/100)\nangles = selected.Share.map(radians).cumsum()\n\nend_angles = angles.tolist()\nstart_angles = [0] + end_angles[:-1]\n\nbrowsers_source = ColumnDataSource(dict(\n start = start_angles,\n end = end_angles,\n colors = [colors[browser] for browser in browsers ],\n))\n\nglyph = Wedge(x=0, y=0, radius=1, line_color=\"white\",\n line_width=2, start_angle=\"start\", end_angle=\"end\", fill_color=\"colors\")\nplot.add_glyph(browsers_source, glyph)\n\ndef polar_to_cartesian(r, start_angles, end_angles):\n cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))\n points = []\n\n for start, end in zip(start_angles, end_angles):\n points.append(cartesian(r, (end + start)/2))\n\n return zip(*points)\n\nfirst = True\n\nfor browser, start_angle, end_angle in zip(browsers, start_angles, end_angles):\n versions = df[(df.Browser == browser) & (df.Share >= 0.5)]\n angles = versions.Share.map(radians).cumsum() + start_angle\n end = angles.tolist() + [end_angle]\n start = [start_angle] + end[:-1]\n base_color = colors[browser]\n fill = [ base_color.lighten(i*0.05).to_hex() for i in range(len(versions) + 1) ]\n # extra empty string accounts for all versions with share < 0.5 together\n text = [ number if share >= 1 else \"\" for number, share in zip(versions.VersionNumber, versions.Share) ] + [\"\"]\n x, y = polar_to_cartesian(1.25, start, end)\n\n source = ColumnDataSource(dict(start=start, end=end, fill=fill))\n glyph = AnnularWedge(x=0, y=0,\n inner_radius=1, outer_radius=1.5, start_angle=\"start\", end_angle=\"end\",\n line_color=\"white\", line_width=2, fill_color=\"fill\")\n plot.add_glyph(source, glyph)\n\n text_angle = [(start[i]+end[i])/2 for i in range(len(start))]\n text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle for angle in text_angle]\n\n text_source = ColumnDataSource(dict(text=text, x=x, y=y, angle=text_angle))\n glyph = Text(x=\"x\", y=\"y\", text=\"text\", angle=\"angle\",\n text_align=\"center\", text_baseline=\"middle\", text_font_size=\"8pt\")\n plot.add_glyph(text_source, glyph)\n\n\ndef to_base64(png):\n return \"data:image/png;base64,\" + base64.b64encode(png).decode(\"utf-8\")\n\nurls = [ to_base64(icons.get(browser, b\"\")) for browser in browsers ]\nx, y = polar_to_cartesian(1.7, start_angles, end_angles)\n\nicons_source = ColumnDataSource(dict(urls=urls, x=x, y=y))\nglyph = ImageURL(url=\"urls\", x=\"x\", y=\"y\", anchor=\"center\")\nplot.add_glyph(icons_source, glyph)\n\ntext = [ \"%.02f%%\" % value for value in selected.Share ]\nx, y = polar_to_cartesian(0.7, start_angles, end_angles)\n\ntext_source = ColumnDataSource(dict(text=text, x=x, y=y))\nglyph = Text(x=\"x\", y=\"y\", text=\"text\", text_align=\"center\", text_baseline=\"middle\")\nplot.add_glyph(text_source, glyph)\n\ndoc = Document()\ndoc.add_root(plot)\ndoc.validate()\n\nfilename = \"donut.html\"\nwith open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Donut Chart\"))\nprint(\"Wrote %s\" % filename)\nview(filename)\n", "path": "examples/models/file/donut.py"}]} | 2,008 | 457 |
gh_patches_debug_19290 | rasdani/github-patches | git_diff | Pylons__pyramid-3458 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyramid.exceptions.ConfigurationConflictError: <exception str() failed>
**Describe the bug**
While building an app I caused an error who's traceback ending the the following line:
```
pyramid.exceptions.ConfigurationConflictError: <exception str() failed>
```
What caused the error in the first place was that I copied a python package containing my views to a new package called controllers and forgot to delete the original views package. I assume that the configurator failed while trying to commit the config.scan(). Since I couldn't find any information online about the above traceback message I assume that this is probably just some internal problem with Pyramid.
**To Reproduce**
download from: https://github.com/benkawecki/pypi/tree/error
after setting up run
```
pserve development.ini
```
**Expected behavior**
I expect there to be an error message.
**Screenshots**
Screenshot of the error message:
<img width="489" alt="screen shot 2019-01-15 at 10 02 44 pm" src="https://user-images.githubusercontent.com/39999125/51224413-c57eb800-1913-11e9-9e0f-b25878a479f5.png">
Screenshot of installed packages:
<img width="488" alt="screen shot 2019-01-15 at 10 24 42 pm" src="https://user-images.githubusercontent.com/39999125/51224563-8b61e600-1914-11e9-9b04-42936f94d4bd.png">
**Additional context**
I'm looking to help out in open-source more this year so if this is an easy fix I would love to see if I can do it!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/exceptions.py`
Content:
```
1 from pyramid.httpexceptions import (
2 HTTPBadRequest,
3 HTTPNotFound,
4 HTTPForbidden,
5 )
6
7 NotFound = HTTPNotFound # bw compat
8 Forbidden = HTTPForbidden # bw compat
9
10 CR = '\n'
11
12
13 class BadCSRFOrigin(HTTPBadRequest):
14 """
15 This exception indicates the request has failed cross-site request forgery
16 origin validation.
17 """
18 title = "Bad CSRF Origin"
19 explanation = (
20 "Access is denied. This server can not verify that the origin or "
21 "referrer of your request matches the current site. Either your "
22 "browser supplied the wrong Origin or Referrer or it did not supply "
23 "one at all."
24 )
25
26
27 class BadCSRFToken(HTTPBadRequest):
28 """
29 This exception indicates the request has failed cross-site request
30 forgery token validation.
31 """
32 title = 'Bad CSRF Token'
33 explanation = (
34 'Access is denied. This server can not verify that your cross-site '
35 'request forgery token belongs to your login session. Either you '
36 'supplied the wrong cross-site request forgery token or your session '
37 'no longer exists. This may be due to session timeout or because '
38 'browser is not supplying the credentials required, as can happen '
39 'when the browser has cookies turned off.')
40
41 class PredicateMismatch(HTTPNotFound):
42 """
43 This exception is raised by multiviews when no view matches
44 all given predicates.
45
46 This exception subclasses the :class:`HTTPNotFound` exception for a
47 specific reason: if it reaches the main exception handler, it should
48 be treated as :class:`HTTPNotFound`` by any exception view
49 registrations. Thus, typically, this exception will not be seen
50 publicly.
51
52 However, this exception will be raised if the predicates of all
53 views configured to handle another exception context cannot be
54 successfully matched. For instance, if a view is configured to
55 handle a context of ``HTTPForbidden`` and the configured with
56 additional predicates, then :class:`PredicateMismatch` will be
57 raised if:
58
59 * An original view callable has raised :class:`HTTPForbidden` (thus
60 invoking an exception view); and
61 * The given request fails to match all predicates for said
62 exception view associated with :class:`HTTPForbidden`.
63
64 The same applies to any type of exception being handled by an
65 exception view.
66 """
67
68 class URLDecodeError(UnicodeDecodeError):
69 """
70 This exception is raised when :app:`Pyramid` cannot
71 successfully decode a URL or a URL path segment. This exception
72 behaves just like the Python builtin
73 :exc:`UnicodeDecodeError`. It is a subclass of the builtin
74 :exc:`UnicodeDecodeError` exception only for identity purposes,
75 mostly so an exception view can be registered when a URL cannot be
76 decoded.
77 """
78
79 class ConfigurationError(Exception):
80 """ Raised when inappropriate input values are supplied to an API
81 method of a :term:`Configurator`"""
82
83 class ConfigurationConflictError(ConfigurationError):
84 """ Raised when a configuration conflict is detected during action
85 processing"""
86
87 def __init__(self, conflicts):
88 self._conflicts = conflicts
89
90 def __str__(self):
91 r = ["Conflicting configuration actions"]
92 items = sorted(self._conflicts.items())
93 for discriminator, infos in items:
94 r.append(" For: %s" % (discriminator, ))
95 for info in infos:
96 for line in str(info).rstrip().split(CR):
97 r.append(" " + line)
98
99 return CR.join(r)
100
101
102 class ConfigurationExecutionError(ConfigurationError):
103 """An error occurred during execution of a configuration action
104 """
105
106 def __init__(self, etype, evalue, info):
107 self.etype, self.evalue, self.info = etype, evalue, info
108
109 def __str__(self):
110 return "%s: %s\n in:\n %s" % (self.etype, self.evalue, self.info)
111
112
113 class CyclicDependencyError(Exception):
114 """ The exception raised when the Pyramid topological sorter detects a
115 cyclic dependency."""
116 def __init__(self, cycles):
117 self.cycles = cycles
118
119 def __str__(self):
120 L = []
121 cycles = self.cycles
122 for cycle in cycles:
123 dependent = cycle
124 dependees = cycles[cycle]
125 L.append('%r sorts before %r' % (dependent, dependees))
126 msg = 'Implicit ordering cycle:' + '; '.join(L)
127 return msg
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyramid/exceptions.py b/pyramid/exceptions.py
--- a/pyramid/exceptions.py
+++ b/pyramid/exceptions.py
@@ -7,8 +7,6 @@
NotFound = HTTPNotFound # bw compat
Forbidden = HTTPForbidden # bw compat
-CR = '\n'
-
class BadCSRFOrigin(HTTPBadRequest):
"""
@@ -89,14 +87,13 @@
def __str__(self):
r = ["Conflicting configuration actions"]
- items = sorted(self._conflicts.items())
- for discriminator, infos in items:
- r.append(" For: %s" % (discriminator, ))
+ for discriminator, infos in self._conflicts.items():
+ r.append(" For: %s" % (discriminator,))
for info in infos:
- for line in str(info).rstrip().split(CR):
+ for line in str(info).rstrip().split('\n'):
r.append(" " + line)
- return CR.join(r)
+ return '\n'.join(r)
class ConfigurationExecutionError(ConfigurationError):
| {"golden_diff": "diff --git a/pyramid/exceptions.py b/pyramid/exceptions.py\n--- a/pyramid/exceptions.py\n+++ b/pyramid/exceptions.py\n@@ -7,8 +7,6 @@\n NotFound = HTTPNotFound # bw compat\n Forbidden = HTTPForbidden # bw compat\n \n-CR = '\\n'\n-\n \n class BadCSRFOrigin(HTTPBadRequest):\n \"\"\"\n@@ -89,14 +87,13 @@\n \n def __str__(self):\n r = [\"Conflicting configuration actions\"]\n- items = sorted(self._conflicts.items())\n- for discriminator, infos in items:\n- r.append(\" For: %s\" % (discriminator, ))\n+ for discriminator, infos in self._conflicts.items():\n+ r.append(\" For: %s\" % (discriminator,))\n for info in infos:\n- for line in str(info).rstrip().split(CR):\n+ for line in str(info).rstrip().split('\\n'):\n r.append(\" \" + line)\n \n- return CR.join(r)\n+ return '\\n'.join(r)\n \n \n class ConfigurationExecutionError(ConfigurationError):\n", "issue": "pyramid.exceptions.ConfigurationConflictError: <exception str() failed>\n**Describe the bug**\r\nWhile building an app I caused an error who's traceback ending the the following line:\r\n```\r\npyramid.exceptions.ConfigurationConflictError: <exception str() failed>\r\n```\r\nWhat caused the error in the first place was that I copied a python package containing my views to a new package called controllers and forgot to delete the original views package. I assume that the configurator failed while trying to commit the config.scan(). Since I couldn't find any information online about the above traceback message I assume that this is probably just some internal problem with Pyramid. \r\n**To Reproduce**\r\ndownload from: https://github.com/benkawecki/pypi/tree/error\r\nafter setting up run \r\n``` \r\npserve development.ini\r\n```\r\n\r\n**Expected behavior**\r\nI expect there to be an error message. \r\n\r\n**Screenshots**\r\nScreenshot of the error message:\r\n<img width=\"489\" alt=\"screen shot 2019-01-15 at 10 02 44 pm\" src=\"https://user-images.githubusercontent.com/39999125/51224413-c57eb800-1913-11e9-9e0f-b25878a479f5.png\">\r\nScreenshot of installed packages:\r\n<img width=\"488\" alt=\"screen shot 2019-01-15 at 10 24 42 pm\" src=\"https://user-images.githubusercontent.com/39999125/51224563-8b61e600-1914-11e9-9b04-42936f94d4bd.png\">\r\n\r\n\r\n\r\n**Additional context**\r\nI'm looking to help out in open-source more this year so if this is an easy fix I would love to see if I can do it!\r\n\n", "before_files": [{"content": "from pyramid.httpexceptions import (\n HTTPBadRequest,\n HTTPNotFound,\n HTTPForbidden,\n )\n\nNotFound = HTTPNotFound # bw compat\nForbidden = HTTPForbidden # bw compat\n\nCR = '\\n'\n\n\nclass BadCSRFOrigin(HTTPBadRequest):\n \"\"\"\n This exception indicates the request has failed cross-site request forgery\n origin validation.\n \"\"\"\n title = \"Bad CSRF Origin\"\n explanation = (\n \"Access is denied. This server can not verify that the origin or \"\n \"referrer of your request matches the current site. Either your \"\n \"browser supplied the wrong Origin or Referrer or it did not supply \"\n \"one at all.\"\n )\n\n\nclass BadCSRFToken(HTTPBadRequest):\n \"\"\"\n This exception indicates the request has failed cross-site request\n forgery token validation.\n \"\"\"\n title = 'Bad CSRF Token'\n explanation = (\n 'Access is denied. This server can not verify that your cross-site '\n 'request forgery token belongs to your login session. Either you '\n 'supplied the wrong cross-site request forgery token or your session '\n 'no longer exists. This may be due to session timeout or because '\n 'browser is not supplying the credentials required, as can happen '\n 'when the browser has cookies turned off.')\n\nclass PredicateMismatch(HTTPNotFound):\n \"\"\"\n This exception is raised by multiviews when no view matches\n all given predicates.\n\n This exception subclasses the :class:`HTTPNotFound` exception for a\n specific reason: if it reaches the main exception handler, it should\n be treated as :class:`HTTPNotFound`` by any exception view\n registrations. Thus, typically, this exception will not be seen\n publicly.\n\n However, this exception will be raised if the predicates of all\n views configured to handle another exception context cannot be\n successfully matched. For instance, if a view is configured to\n handle a context of ``HTTPForbidden`` and the configured with\n additional predicates, then :class:`PredicateMismatch` will be\n raised if:\n\n * An original view callable has raised :class:`HTTPForbidden` (thus\n invoking an exception view); and\n * The given request fails to match all predicates for said\n exception view associated with :class:`HTTPForbidden`.\n\n The same applies to any type of exception being handled by an\n exception view.\n \"\"\"\n\nclass URLDecodeError(UnicodeDecodeError):\n \"\"\"\n This exception is raised when :app:`Pyramid` cannot\n successfully decode a URL or a URL path segment. This exception\n behaves just like the Python builtin\n :exc:`UnicodeDecodeError`. It is a subclass of the builtin\n :exc:`UnicodeDecodeError` exception only for identity purposes,\n mostly so an exception view can be registered when a URL cannot be\n decoded.\n \"\"\"\n\nclass ConfigurationError(Exception):\n \"\"\" Raised when inappropriate input values are supplied to an API\n method of a :term:`Configurator`\"\"\"\n\nclass ConfigurationConflictError(ConfigurationError):\n \"\"\" Raised when a configuration conflict is detected during action\n processing\"\"\"\n\n def __init__(self, conflicts):\n self._conflicts = conflicts\n\n def __str__(self):\n r = [\"Conflicting configuration actions\"]\n items = sorted(self._conflicts.items())\n for discriminator, infos in items:\n r.append(\" For: %s\" % (discriminator, ))\n for info in infos:\n for line in str(info).rstrip().split(CR):\n r.append(\" \" + line)\n\n return CR.join(r)\n\n\nclass ConfigurationExecutionError(ConfigurationError):\n \"\"\"An error occurred during execution of a configuration action\n \"\"\"\n\n def __init__(self, etype, evalue, info):\n self.etype, self.evalue, self.info = etype, evalue, info\n\n def __str__(self):\n return \"%s: %s\\n in:\\n %s\" % (self.etype, self.evalue, self.info)\n\n\nclass CyclicDependencyError(Exception):\n \"\"\" The exception raised when the Pyramid topological sorter detects a\n cyclic dependency.\"\"\"\n def __init__(self, cycles):\n self.cycles = cycles\n\n def __str__(self):\n L = []\n cycles = self.cycles\n for cycle in cycles:\n dependent = cycle\n dependees = cycles[cycle]\n L.append('%r sorts before %r' % (dependent, dependees))\n msg = 'Implicit ordering cycle:' + '; '.join(L)\n return msg\n", "path": "pyramid/exceptions.py"}], "after_files": [{"content": "from pyramid.httpexceptions import (\n HTTPBadRequest,\n HTTPNotFound,\n HTTPForbidden,\n )\n\nNotFound = HTTPNotFound # bw compat\nForbidden = HTTPForbidden # bw compat\n\n\nclass BadCSRFOrigin(HTTPBadRequest):\n \"\"\"\n This exception indicates the request has failed cross-site request forgery\n origin validation.\n \"\"\"\n title = \"Bad CSRF Origin\"\n explanation = (\n \"Access is denied. This server can not verify that the origin or \"\n \"referrer of your request matches the current site. Either your \"\n \"browser supplied the wrong Origin or Referrer or it did not supply \"\n \"one at all.\"\n )\n\n\nclass BadCSRFToken(HTTPBadRequest):\n \"\"\"\n This exception indicates the request has failed cross-site request\n forgery token validation.\n \"\"\"\n title = 'Bad CSRF Token'\n explanation = (\n 'Access is denied. This server can not verify that your cross-site '\n 'request forgery token belongs to your login session. Either you '\n 'supplied the wrong cross-site request forgery token or your session '\n 'no longer exists. This may be due to session timeout or because '\n 'browser is not supplying the credentials required, as can happen '\n 'when the browser has cookies turned off.')\n\nclass PredicateMismatch(HTTPNotFound):\n \"\"\"\n This exception is raised by multiviews when no view matches\n all given predicates.\n\n This exception subclasses the :class:`HTTPNotFound` exception for a\n specific reason: if it reaches the main exception handler, it should\n be treated as :class:`HTTPNotFound`` by any exception view\n registrations. Thus, typically, this exception will not be seen\n publicly.\n\n However, this exception will be raised if the predicates of all\n views configured to handle another exception context cannot be\n successfully matched. For instance, if a view is configured to\n handle a context of ``HTTPForbidden`` and the configured with\n additional predicates, then :class:`PredicateMismatch` will be\n raised if:\n\n * An original view callable has raised :class:`HTTPForbidden` (thus\n invoking an exception view); and\n * The given request fails to match all predicates for said\n exception view associated with :class:`HTTPForbidden`.\n\n The same applies to any type of exception being handled by an\n exception view.\n \"\"\"\n\nclass URLDecodeError(UnicodeDecodeError):\n \"\"\"\n This exception is raised when :app:`Pyramid` cannot\n successfully decode a URL or a URL path segment. This exception\n behaves just like the Python builtin\n :exc:`UnicodeDecodeError`. It is a subclass of the builtin\n :exc:`UnicodeDecodeError` exception only for identity purposes,\n mostly so an exception view can be registered when a URL cannot be\n decoded.\n \"\"\"\n\nclass ConfigurationError(Exception):\n \"\"\" Raised when inappropriate input values are supplied to an API\n method of a :term:`Configurator`\"\"\"\n\nclass ConfigurationConflictError(ConfigurationError):\n \"\"\" Raised when a configuration conflict is detected during action\n processing\"\"\"\n\n def __init__(self, conflicts):\n self._conflicts = conflicts\n\n def __str__(self):\n r = [\"Conflicting configuration actions\"]\n for discriminator, infos in self._conflicts.items():\n r.append(\" For: %s\" % (discriminator,))\n for info in infos:\n for line in str(info).rstrip().split('\\n'):\n r.append(\" \" + line)\n\n return '\\n'.join(r)\n\n\nclass ConfigurationExecutionError(ConfigurationError):\n \"\"\"An error occurred during execution of a configuration action\n \"\"\"\n\n def __init__(self, etype, evalue, info):\n self.etype, self.evalue, self.info = etype, evalue, info\n\n def __str__(self):\n return \"%s: %s\\n in:\\n %s\" % (self.etype, self.evalue, self.info)\n\n\nclass CyclicDependencyError(Exception):\n \"\"\" The exception raised when the Pyramid topological sorter detects a\n cyclic dependency.\"\"\"\n def __init__(self, cycles):\n self.cycles = cycles\n\n def __str__(self):\n L = []\n cycles = self.cycles\n for cycle in cycles:\n dependent = cycle\n dependees = cycles[cycle]\n L.append('%r sorts before %r' % (dependent, dependees))\n msg = 'Implicit ordering cycle:' + '; '.join(L)\n return msg\n", "path": "pyramid/exceptions.py"}]} | 1,936 | 241 |
gh_patches_debug_3211 | rasdani/github-patches | git_diff | xonsh__xonsh-428 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't type literal tab with prompt_toolkit
In bash or zsh I would typically type `Ctl-V TAB` to insert a literal tab, however, when using prompt_tookitas my shell type this simply tries to do tab completion and there doesn't seem to be a way to create a literal tab.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/prompt_toolkit_key_bindings.py`
Content:
```
1 """Key bindings for prompt_toolkit xonsh shell."""
2 import builtins
3
4 from prompt_toolkit.filters import Filter
5 from prompt_toolkit.keys import Keys
6
7
8 class TabShouldInsertIndentFilter(Filter):
9 """
10 Filter that is intended to check if <Tab> should insert indent instead of
11 starting autocompletion.
12 It basically just checks if there are only whitespaces before the cursor -
13 if so indent should be inserted, otherwise autocompletion.
14 """
15 def __call__(self, cli):
16 before_cursor = cli.current_buffer.document.current_line_before_cursor
17
18 return bool(before_cursor.isspace())
19
20
21 def load_xonsh_bindings(key_bindings_manager):
22 """
23 Load custom key bindings.
24 """
25 handle = key_bindings_manager.registry.add_binding
26 env = builtins.__xonsh_env__
27
28 @handle(Keys.Tab, filter=TabShouldInsertIndentFilter())
29 def _(event):
30 """
31 If there are only whitespaces before current cursor position insert
32 indent instead of autocompleting.
33 """
34 event.cli.current_buffer.insert_text(env.get('INDENT'))
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/prompt_toolkit_key_bindings.py b/xonsh/prompt_toolkit_key_bindings.py
--- a/xonsh/prompt_toolkit_key_bindings.py
+++ b/xonsh/prompt_toolkit_key_bindings.py
@@ -32,3 +32,11 @@
indent instead of autocompleting.
"""
event.cli.current_buffer.insert_text(env.get('INDENT'))
+
+ @handle(Keys.BackTab)
+ def insert_literal_tab(event):
+ """
+ Insert literal tab on Shift+Tab instead of autocompleting
+ """
+ event.cli.current_buffer.insert_text(env.get('INDENT'))
+
| {"golden_diff": "diff --git a/xonsh/prompt_toolkit_key_bindings.py b/xonsh/prompt_toolkit_key_bindings.py\n--- a/xonsh/prompt_toolkit_key_bindings.py\n+++ b/xonsh/prompt_toolkit_key_bindings.py\n@@ -32,3 +32,11 @@\n indent instead of autocompleting.\n \"\"\"\n event.cli.current_buffer.insert_text(env.get('INDENT'))\n+\n+ @handle(Keys.BackTab)\n+ def insert_literal_tab(event):\n+ \"\"\"\n+ Insert literal tab on Shift+Tab instead of autocompleting\n+ \"\"\"\n+ event.cli.current_buffer.insert_text(env.get('INDENT'))\n+\n", "issue": "Can't type literal tab with prompt_toolkit\nIn bash or zsh I would typically type `Ctl-V TAB` to insert a literal tab, however, when using prompt_tookitas my shell type this simply tries to do tab completion and there doesn't seem to be a way to create a literal tab.\n\n", "before_files": [{"content": "\"\"\"Key bindings for prompt_toolkit xonsh shell.\"\"\"\nimport builtins\n\nfrom prompt_toolkit.filters import Filter\nfrom prompt_toolkit.keys import Keys\n\n\nclass TabShouldInsertIndentFilter(Filter):\n \"\"\"\n Filter that is intended to check if <Tab> should insert indent instead of\n starting autocompletion.\n It basically just checks if there are only whitespaces before the cursor -\n if so indent should be inserted, otherwise autocompletion.\n \"\"\"\n def __call__(self, cli):\n before_cursor = cli.current_buffer.document.current_line_before_cursor\n\n return bool(before_cursor.isspace())\n\n\ndef load_xonsh_bindings(key_bindings_manager):\n \"\"\"\n Load custom key bindings.\n \"\"\"\n handle = key_bindings_manager.registry.add_binding\n env = builtins.__xonsh_env__\n\n @handle(Keys.Tab, filter=TabShouldInsertIndentFilter())\n def _(event):\n \"\"\"\n If there are only whitespaces before current cursor position insert\n indent instead of autocompleting.\n \"\"\"\n event.cli.current_buffer.insert_text(env.get('INDENT'))\n", "path": "xonsh/prompt_toolkit_key_bindings.py"}], "after_files": [{"content": "\"\"\"Key bindings for prompt_toolkit xonsh shell.\"\"\"\nimport builtins\n\nfrom prompt_toolkit.filters import Filter\nfrom prompt_toolkit.keys import Keys\n\n\nclass TabShouldInsertIndentFilter(Filter):\n \"\"\"\n Filter that is intended to check if <Tab> should insert indent instead of\n starting autocompletion.\n It basically just checks if there are only whitespaces before the cursor -\n if so indent should be inserted, otherwise autocompletion.\n \"\"\"\n def __call__(self, cli):\n before_cursor = cli.current_buffer.document.current_line_before_cursor\n\n return bool(before_cursor.isspace())\n\n\ndef load_xonsh_bindings(key_bindings_manager):\n \"\"\"\n Load custom key bindings.\n \"\"\"\n handle = key_bindings_manager.registry.add_binding\n env = builtins.__xonsh_env__\n\n @handle(Keys.Tab, filter=TabShouldInsertIndentFilter())\n def _(event):\n \"\"\"\n If there are only whitespaces before current cursor position insert\n indent instead of autocompleting.\n \"\"\"\n event.cli.current_buffer.insert_text(env.get('INDENT'))\n\n @handle(Keys.BackTab)\n def insert_literal_tab(event):\n \"\"\"\n Insert literal tab on Shift+Tab instead of autocompleting\n \"\"\"\n event.cli.current_buffer.insert_text(env.get('INDENT'))\n\n", "path": "xonsh/prompt_toolkit_key_bindings.py"}]} | 617 | 143 |
gh_patches_debug_689 | rasdani/github-patches | git_diff | great-expectations__great_expectations-1500 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/cli/cli_messages.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from great_expectations import DataContext
3
4 GREETING = """<cyan>\
5 ___ _ ___ _ _ _
6 / __|_ _ ___ __ _| |_ | __|_ ___ __ ___ __| |_ __ _| |_(_)___ _ _ ___
7 | (_ | '_/ -_) _` | _| | _|\ \ / '_ \/ -_) _| _/ _` | _| / _ \ ' \(_-<
8 \___|_| \___\__,_|\__| |___/_\_\ .__/\___\__|\__\__,_|\__|_\___/_||_/__/
9 |_|
10 ~ Always know what to expect from your data ~
11 </cyan>"""
12
13 LETS_BEGIN_PROMPT = """Let's configure a new Data Context.
14
15 First, Great Expectations will create a new directory:
16
17 great_expectations
18 |-- great_expectations.yml
19 |-- expectations
20 |-- notebooks
21 |-- plugins
22 |-- .gitignore
23 |-- uncommitted
24 |-- config_variables.yml
25 |-- documentation
26 |-- validations
27
28 OK to proceed?"""
29
30 PROJECT_IS_COMPLETE = "This looks like an existing project that <green>appears complete!</green> You are <green>ready to roll.</green>\n"
31
32 RUN_INIT_AGAIN = (
33 "OK. You must run <green>great_expectations init</green> to fix the missing files!"
34 )
35
36 COMPLETE_ONBOARDING_PROMPT = """To run locally, we need some files that are not in source control.
37 - Anything existing will not be modified.
38 - Would you like to fix this automatically?"""
39
40 SLACK_SETUP_INTRO = """
41 <cyan>========== Slack Notifications ==========</cyan>
42 """
43
44 SLACK_SETUP_PROMPT = "Would you like to set up Slack data quality notifications?"
45
46 SLACK_DOC_LINK = """http://docs.greatexpectations.io/en/latest/getting_started/cli_init.html#configuring-slack-notifications
47 """
48
49 SLACK_WEBHOOK_PROMPT = """Please add your Slack webhook below. Getting one is easy!
50 """
51
52 SLACK_LATER = "\nTo setup Slack later please see the the slack section in the CLI init getting started guide."
53
54 SLACK_SETUP_COMPLETE = """
55 OK. <green>Slack is set up.</green> To modify this in the future please see the slack section in the CLI init getting started guide."""
56
57 ONBOARDING_COMPLETE = """
58 Great Expectations added some missing files required to run.
59 - You may see new files in `<yellow>great_expectations/uncommitted</yellow>`.
60 - You may need to add secrets to `<yellow>great_expectations/uncommitted/config_variables.yml</yellow>` to finish onboarding.
61 """
62
63 BUILD_DOCS_PROMPT = "Would you like to build & view this project's Data Docs!?"
64
65 NO_DATASOURCES_FOUND = """<red>Error: No datasources were found.</red> Please add one by:
66 - running `<green>great_expectations datasource new</green>` or
67 - by editing the {} file
68 """.format(
69 DataContext.GE_YML
70 )
71
72 SETUP_SUCCESS = "\n<cyan>Congratulations! Great Expectations is now set up.</cyan>"
73
74 SECTION_SEPARATOR = "\n================================================================================\n"
75
76 DONE = "Done"
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/cli/cli_messages.py b/great_expectations/cli/cli_messages.py
--- a/great_expectations/cli/cli_messages.py
+++ b/great_expectations/cli/cli_messages.py
@@ -17,6 +17,7 @@
great_expectations
|-- great_expectations.yml
|-- expectations
+ |-- checkpoints
|-- notebooks
|-- plugins
|-- .gitignore
| {"golden_diff": "diff --git a/great_expectations/cli/cli_messages.py b/great_expectations/cli/cli_messages.py\n--- a/great_expectations/cli/cli_messages.py\n+++ b/great_expectations/cli/cli_messages.py\n@@ -17,6 +17,7 @@\n great_expectations\n |-- great_expectations.yml\n |-- expectations\n+ |-- checkpoints \n |-- notebooks\n |-- plugins\n |-- .gitignore\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom great_expectations import DataContext\n\nGREETING = \"\"\"<cyan>\\\n ___ _ ___ _ _ _\n / __|_ _ ___ __ _| |_ | __|_ ___ __ ___ __| |_ __ _| |_(_)___ _ _ ___\n| (_ | '_/ -_) _` | _| | _|\\ \\ / '_ \\/ -_) _| _/ _` | _| / _ \\ ' \\(_-<\n \\___|_| \\___\\__,_|\\__| |___/_\\_\\ .__/\\___\\__|\\__\\__,_|\\__|_\\___/_||_/__/\n |_|\n ~ Always know what to expect from your data ~\n</cyan>\"\"\"\n\nLETS_BEGIN_PROMPT = \"\"\"Let's configure a new Data Context.\n\nFirst, Great Expectations will create a new directory:\n\n great_expectations\n |-- great_expectations.yml\n |-- expectations\n |-- notebooks\n |-- plugins\n |-- .gitignore\n |-- uncommitted\n |-- config_variables.yml\n |-- documentation\n |-- validations\n\nOK to proceed?\"\"\"\n\nPROJECT_IS_COMPLETE = \"This looks like an existing project that <green>appears complete!</green> You are <green>ready to roll.</green>\\n\"\n\nRUN_INIT_AGAIN = (\n \"OK. You must run <green>great_expectations init</green> to fix the missing files!\"\n)\n\nCOMPLETE_ONBOARDING_PROMPT = \"\"\"To run locally, we need some files that are not in source control.\n - Anything existing will not be modified.\n - Would you like to fix this automatically?\"\"\"\n\nSLACK_SETUP_INTRO = \"\"\"\n<cyan>========== Slack Notifications ==========</cyan>\n\"\"\"\n\nSLACK_SETUP_PROMPT = \"Would you like to set up Slack data quality notifications?\"\n\nSLACK_DOC_LINK = \"\"\"http://docs.greatexpectations.io/en/latest/getting_started/cli_init.html#configuring-slack-notifications\n\"\"\"\n\nSLACK_WEBHOOK_PROMPT = \"\"\"Please add your Slack webhook below. Getting one is easy!\n\"\"\"\n\nSLACK_LATER = \"\\nTo setup Slack later please see the the slack section in the CLI init getting started guide.\"\n\nSLACK_SETUP_COMPLETE = \"\"\"\nOK. <green>Slack is set up.</green> To modify this in the future please see the slack section in the CLI init getting started guide.\"\"\"\n\nONBOARDING_COMPLETE = \"\"\"\nGreat Expectations added some missing files required to run.\n - You may see new files in `<yellow>great_expectations/uncommitted</yellow>`.\n - You may need to add secrets to `<yellow>great_expectations/uncommitted/config_variables.yml</yellow>` to finish onboarding.\n\"\"\"\n\nBUILD_DOCS_PROMPT = \"Would you like to build & view this project's Data Docs!?\"\n\nNO_DATASOURCES_FOUND = \"\"\"<red>Error: No datasources were found.</red> Please add one by:\n - running `<green>great_expectations datasource new</green>` or\n - by editing the {} file\n\"\"\".format(\n DataContext.GE_YML\n)\n\nSETUP_SUCCESS = \"\\n<cyan>Congratulations! Great Expectations is now set up.</cyan>\"\n\nSECTION_SEPARATOR = \"\\n================================================================================\\n\"\n\nDONE = \"Done\"\n", "path": "great_expectations/cli/cli_messages.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom great_expectations import DataContext\n\nGREETING = \"\"\"<cyan>\\\n ___ _ ___ _ _ _\n / __|_ _ ___ __ _| |_ | __|_ ___ __ ___ __| |_ __ _| |_(_)___ _ _ ___\n| (_ | '_/ -_) _` | _| | _|\\ \\ / '_ \\/ -_) _| _/ _` | _| / _ \\ ' \\(_-<\n \\___|_| \\___\\__,_|\\__| |___/_\\_\\ .__/\\___\\__|\\__\\__,_|\\__|_\\___/_||_/__/\n |_|\n ~ Always know what to expect from your data ~\n</cyan>\"\"\"\n\nLETS_BEGIN_PROMPT = \"\"\"Let's configure a new Data Context.\n\nFirst, Great Expectations will create a new directory:\n\n great_expectations\n |-- great_expectations.yml\n |-- expectations\n |-- checkpoints \n |-- notebooks\n |-- plugins\n |-- .gitignore\n |-- uncommitted\n |-- config_variables.yml\n |-- documentation\n |-- validations\n\nOK to proceed?\"\"\"\n\nPROJECT_IS_COMPLETE = \"This looks like an existing project that <green>appears complete!</green> You are <green>ready to roll.</green>\\n\"\n\nRUN_INIT_AGAIN = (\n \"OK. You must run <green>great_expectations init</green> to fix the missing files!\"\n)\n\nCOMPLETE_ONBOARDING_PROMPT = \"\"\"To run locally, we need some files that are not in source control.\n - Anything existing will not be modified.\n - Would you like to fix this automatically?\"\"\"\n\nSLACK_SETUP_INTRO = \"\"\"\n<cyan>========== Slack Notifications ==========</cyan>\n\"\"\"\n\nSLACK_SETUP_PROMPT = \"Would you like to set up Slack data quality notifications?\"\n\nSLACK_DOC_LINK = \"\"\"http://docs.greatexpectations.io/en/latest/getting_started/cli_init.html#configuring-slack-notifications\n\"\"\"\n\nSLACK_WEBHOOK_PROMPT = \"\"\"Please add your Slack webhook below. Getting one is easy!\n\"\"\"\n\nSLACK_LATER = \"\\nTo setup Slack later please see the the slack section in the CLI init getting started guide.\"\n\nSLACK_SETUP_COMPLETE = \"\"\"\nOK. <green>Slack is set up.</green> To modify this in the future please see the slack section in the CLI init getting started guide.\"\"\"\n\nONBOARDING_COMPLETE = \"\"\"\nGreat Expectations added some missing files required to run.\n - You may see new files in `<yellow>great_expectations/uncommitted</yellow>`.\n - You may need to add secrets to `<yellow>great_expectations/uncommitted/config_variables.yml</yellow>` to finish onboarding.\n\"\"\"\n\nBUILD_DOCS_PROMPT = \"Would you like to build & view this project's Data Docs!?\"\n\nNO_DATASOURCES_FOUND = \"\"\"<red>Error: No datasources were found.</red> Please add one by:\n - running `<green>great_expectations datasource new</green>` or\n - by editing the {} file\n\"\"\".format(\n DataContext.GE_YML\n)\n\nSETUP_SUCCESS = \"\\n<cyan>Congratulations! Great Expectations is now set up.</cyan>\"\n\nSECTION_SEPARATOR = \"\\n================================================================================\\n\"\n\nDONE = \"Done\"\n", "path": "great_expectations/cli/cli_messages.py"}]} | 1,139 | 91 |
gh_patches_debug_25258 | rasdani/github-patches | git_diff | mindsdb__lightwood-1091 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Apple M1 support
## Your Environment
* Operating system: macOS in an M1 apple computer
## Describe your issue
As reported by @alejandrovillegas, installation currently fails in MacBooks with M1 processors. See attached log for more details.
[m1_issue.txt](https://github.com/mindsdb/lightwood/files/7042557/m1_issue.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/mixer/__init__.py`
Content:
```
1 from lightwood.mixer.base import BaseMixer
2 from lightwood.mixer.unit import Unit
3 from lightwood.mixer.neural import Neural
4 from lightwood.mixer.neural_ts import NeuralTs
5 from lightwood.mixer.xgboost import XGBoostMixer
6 from lightwood.mixer.random_forest import RandomForest
7 from lightwood.mixer.sktime import SkTime
8 from lightwood.mixer.arima import ARIMAMixer
9 from lightwood.mixer.ets import ETSMixer
10 from lightwood.mixer.gluonts import GluonTSMixer
11 from lightwood.mixer.regression import Regression
12
13 try:
14 from lightwood.mixer.qclassic import QClassic
15 except Exception:
16 QClassic = None
17
18 try:
19 from lightwood.mixer.nhits import NHitsMixer
20 except Exception:
21 NHitsMixer = None
22
23 try:
24 from lightwood.mixer.prophet import ProphetMixer
25 except Exception:
26 ProphetMixer = None
27
28 try:
29 from lightwood.mixer.lightgbm import LightGBM
30 from lightwood.mixer.lightgbm_array import LightGBMArray
31 except Exception:
32 LightGBM = None
33 LightGBMArray = None
34
35 __all__ = ['BaseMixer', 'Neural', 'NeuralTs', 'LightGBM', 'RandomForest', 'LightGBMArray', 'Unit', 'Regression',
36 'SkTime', 'QClassic', 'ProphetMixer', 'ETSMixer', 'ARIMAMixer', 'NHitsMixer', 'GluonTSMixer', 'XGBoostMixer']
37
```
Path: `lightwood/helpers/seed.py`
Content:
```
1 import random
2 import torch
3 import numpy as np
4 import mxnet as mx
5
6
7 def seed(seed_nr: int) -> None:
8 torch.manual_seed(seed_nr)
9 torch.backends.cudnn.deterministic = True
10 torch.backends.cudnn.benchmark = False
11 np.random.seed(seed_nr)
12 random.seed(seed_nr)
13 mx.random.seed(seed_nr)
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightwood/helpers/seed.py b/lightwood/helpers/seed.py
--- a/lightwood/helpers/seed.py
+++ b/lightwood/helpers/seed.py
@@ -1,7 +1,11 @@
import random
import torch
import numpy as np
-import mxnet as mx
+
+try:
+ import mxnet as mx
+except Exception:
+ mx = None
def seed(seed_nr: int) -> None:
@@ -10,4 +14,6 @@
torch.backends.cudnn.benchmark = False
np.random.seed(seed_nr)
random.seed(seed_nr)
- mx.random.seed(seed_nr)
+
+ if mx is not None:
+ mx.random.seed(seed_nr)
diff --git a/lightwood/mixer/__init__.py b/lightwood/mixer/__init__.py
--- a/lightwood/mixer/__init__.py
+++ b/lightwood/mixer/__init__.py
@@ -7,7 +7,6 @@
from lightwood.mixer.sktime import SkTime
from lightwood.mixer.arima import ARIMAMixer
from lightwood.mixer.ets import ETSMixer
-from lightwood.mixer.gluonts import GluonTSMixer
from lightwood.mixer.regression import Regression
try:
@@ -25,6 +24,11 @@
except Exception:
ProphetMixer = None
+try:
+ from lightwood.mixer.gluonts import GluonTSMixer
+except Exception:
+ GluonTSMixer = None
+
try:
from lightwood.mixer.lightgbm import LightGBM
from lightwood.mixer.lightgbm_array import LightGBMArray
| {"golden_diff": "diff --git a/lightwood/helpers/seed.py b/lightwood/helpers/seed.py\n--- a/lightwood/helpers/seed.py\n+++ b/lightwood/helpers/seed.py\n@@ -1,7 +1,11 @@\n import random\n import torch\n import numpy as np\n-import mxnet as mx\n+\n+try:\n+ import mxnet as mx\n+except Exception:\n+ mx = None\n \n \n def seed(seed_nr: int) -> None:\n@@ -10,4 +14,6 @@\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed_nr)\n random.seed(seed_nr)\n- mx.random.seed(seed_nr)\n+\n+ if mx is not None:\n+ mx.random.seed(seed_nr)\ndiff --git a/lightwood/mixer/__init__.py b/lightwood/mixer/__init__.py\n--- a/lightwood/mixer/__init__.py\n+++ b/lightwood/mixer/__init__.py\n@@ -7,7 +7,6 @@\n from lightwood.mixer.sktime import SkTime\n from lightwood.mixer.arima import ARIMAMixer\n from lightwood.mixer.ets import ETSMixer\n-from lightwood.mixer.gluonts import GluonTSMixer\n from lightwood.mixer.regression import Regression\n \n try:\n@@ -25,6 +24,11 @@\n except Exception:\n ProphetMixer = None\n \n+try:\n+ from lightwood.mixer.gluonts import GluonTSMixer\n+except Exception:\n+ GluonTSMixer = None\n+\n try:\n from lightwood.mixer.lightgbm import LightGBM\n from lightwood.mixer.lightgbm_array import LightGBMArray\n", "issue": "Apple M1 support\n## Your Environment\r\n* Operating system: macOS in an M1 apple computer\r\n\r\n## Describe your issue\r\nAs reported by @alejandrovillegas, installation currently fails in MacBooks with M1 processors. See attached log for more details.\r\n[m1_issue.txt](https://github.com/mindsdb/lightwood/files/7042557/m1_issue.txt)\r\n\r\n\n", "before_files": [{"content": "from lightwood.mixer.base import BaseMixer\nfrom lightwood.mixer.unit import Unit\nfrom lightwood.mixer.neural import Neural\nfrom lightwood.mixer.neural_ts import NeuralTs\nfrom lightwood.mixer.xgboost import XGBoostMixer\nfrom lightwood.mixer.random_forest import RandomForest\nfrom lightwood.mixer.sktime import SkTime\nfrom lightwood.mixer.arima import ARIMAMixer\nfrom lightwood.mixer.ets import ETSMixer\nfrom lightwood.mixer.gluonts import GluonTSMixer\nfrom lightwood.mixer.regression import Regression\n\ntry:\n from lightwood.mixer.qclassic import QClassic\nexcept Exception:\n QClassic = None\n\ntry:\n from lightwood.mixer.nhits import NHitsMixer\nexcept Exception:\n NHitsMixer = None\n\ntry:\n from lightwood.mixer.prophet import ProphetMixer\nexcept Exception:\n ProphetMixer = None\n\ntry:\n from lightwood.mixer.lightgbm import LightGBM\n from lightwood.mixer.lightgbm_array import LightGBMArray\nexcept Exception:\n LightGBM = None\n LightGBMArray = None\n\n__all__ = ['BaseMixer', 'Neural', 'NeuralTs', 'LightGBM', 'RandomForest', 'LightGBMArray', 'Unit', 'Regression',\n 'SkTime', 'QClassic', 'ProphetMixer', 'ETSMixer', 'ARIMAMixer', 'NHitsMixer', 'GluonTSMixer', 'XGBoostMixer']\n", "path": "lightwood/mixer/__init__.py"}, {"content": "import random\nimport torch\nimport numpy as np\nimport mxnet as mx\n\n\ndef seed(seed_nr: int) -> None:\n torch.manual_seed(seed_nr)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed_nr)\n random.seed(seed_nr)\n mx.random.seed(seed_nr)\n", "path": "lightwood/helpers/seed.py"}], "after_files": [{"content": "from lightwood.mixer.base import BaseMixer\nfrom lightwood.mixer.unit import Unit\nfrom lightwood.mixer.neural import Neural\nfrom lightwood.mixer.neural_ts import NeuralTs\nfrom lightwood.mixer.xgboost import XGBoostMixer\nfrom lightwood.mixer.random_forest import RandomForest\nfrom lightwood.mixer.sktime import SkTime\nfrom lightwood.mixer.arima import ARIMAMixer\nfrom lightwood.mixer.ets import ETSMixer\nfrom lightwood.mixer.regression import Regression\n\ntry:\n from lightwood.mixer.qclassic import QClassic\nexcept Exception:\n QClassic = None\n\ntry:\n from lightwood.mixer.nhits import NHitsMixer\nexcept Exception:\n NHitsMixer = None\n\ntry:\n from lightwood.mixer.prophet import ProphetMixer\nexcept Exception:\n ProphetMixer = None\n\ntry:\n from lightwood.mixer.gluonts import GluonTSMixer\nexcept Exception:\n GluonTSMixer = None\n\ntry:\n from lightwood.mixer.lightgbm import LightGBM\n from lightwood.mixer.lightgbm_array import LightGBMArray\nexcept Exception:\n LightGBM = None\n LightGBMArray = None\n\n__all__ = ['BaseMixer', 'Neural', 'NeuralTs', 'LightGBM', 'RandomForest', 'LightGBMArray', 'Unit', 'Regression',\n 'SkTime', 'QClassic', 'ProphetMixer', 'ETSMixer', 'ARIMAMixer', 'NHitsMixer', 'GluonTSMixer', 'XGBoostMixer']\n", "path": "lightwood/mixer/__init__.py"}, {"content": "import random\nimport torch\nimport numpy as np\n\ntry:\n import mxnet as mx\nexcept Exception:\n mx = None\n\n\ndef seed(seed_nr: int) -> None:\n torch.manual_seed(seed_nr)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed_nr)\n random.seed(seed_nr)\n\n if mx is not None:\n mx.random.seed(seed_nr)\n", "path": "lightwood/helpers/seed.py"}]} | 859 | 362 |
gh_patches_debug_5753 | rasdani/github-patches | git_diff | OpenEnergyPlatform__oeplatform-787 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Meta-schemas are not created properly
It seems that a fresh installation of the most recent version does not create the meta-schemas anymore.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `oedb_datamodels/versions/5c098aa81e2c_create_schemas.py`
Content:
```
1 """create schemas
2
3 Revision ID: 5c098aa81e2c
4 Revises: 46fb02acc3b1
5 Create Date: 2017-11-23 15:53:57.716306
6
7 """
8 import sqlalchemy as sa
9 from alembic import op
10
11 # revision identifiers, used by Alembic.
12 revision = "5c098aa81e2c"
13 down_revision = "048215319c74"
14 branch_labels = None
15 depends_on = None
16
17 schemas = [
18 "demand",
19 "economy",
20 "emission",
21 "environment",
22 "grid",
23 "boundaries",
24 "society",
25 "supply",
26 "scenario",
27 "climate",
28 "model_draft",
29 "openstreetmap",
30 "reference",
31 ]
32
33
34 def upgrade():
35 for s in schemas:
36 op.execute("CREATE SCHEMA " + s)
37
38
39 def downgrade():
40 for s in schemas:
41 op.execute("DROP SCHEMA " + s)
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py
--- a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py
+++ b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py
@@ -30,12 +30,15 @@
"reference",
]
-
def upgrade():
for s in schemas:
op.execute("CREATE SCHEMA " + s)
+ for s in schemas:
+ op.execute("CREATE SCHEMA _" + s)
def downgrade():
for s in schemas:
- op.execute("DROP SCHEMA " + s)
+ op.execute("DROP SCHEMA _" + s + " CASCADE")
+ for s in schemas:
+ op.execute("DROP SCHEMA " + s + " CASCADE")
| {"golden_diff": "diff --git a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py\n--- a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py\n+++ b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py\n@@ -30,12 +30,15 @@\n \"reference\",\n ]\n \n-\n def upgrade():\n for s in schemas:\n op.execute(\"CREATE SCHEMA \" + s)\n+ for s in schemas:\n+ op.execute(\"CREATE SCHEMA _\" + s)\n \n \n def downgrade():\n for s in schemas:\n- op.execute(\"DROP SCHEMA \" + s)\n+ op.execute(\"DROP SCHEMA _\" + s + \" CASCADE\")\n+ for s in schemas:\n+ op.execute(\"DROP SCHEMA \" + s + \" CASCADE\")\n", "issue": "Meta-schemas are not created properly\nIt seems that a fresh installation of the most recent version does not create the meta-schemas anymore.\n", "before_files": [{"content": "\"\"\"create schemas\n\nRevision ID: 5c098aa81e2c\nRevises: 46fb02acc3b1\nCreate Date: 2017-11-23 15:53:57.716306\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"5c098aa81e2c\"\ndown_revision = \"048215319c74\"\nbranch_labels = None\ndepends_on = None\n\nschemas = [\n \"demand\",\n \"economy\",\n \"emission\",\n \"environment\",\n \"grid\",\n \"boundaries\",\n \"society\",\n \"supply\",\n \"scenario\",\n \"climate\",\n \"model_draft\",\n \"openstreetmap\",\n \"reference\",\n]\n\n\ndef upgrade():\n for s in schemas:\n op.execute(\"CREATE SCHEMA \" + s)\n\n\ndef downgrade():\n for s in schemas:\n op.execute(\"DROP SCHEMA \" + s)\n", "path": "oedb_datamodels/versions/5c098aa81e2c_create_schemas.py"}], "after_files": [{"content": "\"\"\"create schemas\n\nRevision ID: 5c098aa81e2c\nRevises: 46fb02acc3b1\nCreate Date: 2017-11-23 15:53:57.716306\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"5c098aa81e2c\"\ndown_revision = \"048215319c74\"\nbranch_labels = None\ndepends_on = None\n\nschemas = [\n \"demand\",\n \"economy\",\n \"emission\",\n \"environment\",\n \"grid\",\n \"boundaries\",\n \"society\",\n \"supply\",\n \"scenario\",\n \"climate\",\n \"model_draft\",\n \"openstreetmap\",\n \"reference\",\n]\n\ndef upgrade():\n for s in schemas:\n op.execute(\"CREATE SCHEMA \" + s)\n for s in schemas:\n op.execute(\"CREATE SCHEMA _\" + s)\n\n\ndef downgrade():\n for s in schemas:\n op.execute(\"DROP SCHEMA _\" + s + \" CASCADE\")\n for s in schemas:\n op.execute(\"DROP SCHEMA \" + s + \" CASCADE\")\n", "path": "oedb_datamodels/versions/5c098aa81e2c_create_schemas.py"}]} | 622 | 225 |
gh_patches_debug_15539 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Validate lower bounds of dependencies in CI
# Description
To ensure that the lower bound of all dependencies are still sufficient for the API used, add a test that installs from a `lower-bound-requirements.txt` that might look something like
```
# core
scipy==1.4.0
click==7.0.0
tqdm==4.56.0
jsonschema==3.2.0
jsonpatch==1.23.0
pyyaml==5.1.0
# xmlio
uproot3==3.14.0
uproot==4.0.0
# minuit
iminuit==2.1.0
# ...
```
and then run the test suite.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.8'],
10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],
11 'xmlio': [
12 'uproot3~=3.14',
13 'uproot~=4.0',
14 ], # uproot3 required until writing to ROOT supported in uproot4
15 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood
16 }
17 extras_require['backends'] = sorted(
18 set(
19 extras_require['tensorflow']
20 + extras_require['torch']
21 + extras_require['jax']
22 + extras_require['minuit']
23 )
24 )
25 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
26 extras_require['lint'] = sorted({'flake8', 'black'})
27
28 extras_require['test'] = sorted(
29 set(
30 extras_require['backends']
31 + extras_require['xmlio']
32 + extras_require['contrib']
33 + extras_require['shellcomplete']
34 + [
35 'pytest~=6.0',
36 'pytest-cov>=2.5.1',
37 'pytest-mock',
38 'pytest-benchmark[histogram]',
39 'pytest-console-scripts',
40 'pytest-mpl',
41 'pydocstyle',
42 'papermill~=2.0',
43 'nteract-scrapbook~=0.2',
44 'jupyter',
45 'graphviz',
46 ]
47 )
48 )
49 extras_require['docs'] = sorted(
50 set(
51 extras_require['xmlio']
52 + [
53 'sphinx>=3.1.2',
54 'sphinxcontrib-bibtex~=2.1',
55 'sphinx-click',
56 'sphinx_rtd_theme',
57 'nbsphinx',
58 'ipywidgets',
59 'sphinx-issues',
60 'sphinx-copybutton>0.2.9',
61 ]
62 )
63 )
64 extras_require['develop'] = sorted(
65 set(
66 extras_require['docs']
67 + extras_require['lint']
68 + extras_require['test']
69 + [
70 'nbdime',
71 'bump2version',
72 'ipython',
73 'pre-commit',
74 'check-manifest',
75 'codemetapy>=0.3.4',
76 'twine',
77 ]
78 )
79 )
80 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
81
82
83 setup(
84 extras_require=extras_require,
85 use_scm_version=lambda: {'local_scheme': lambda version: ''},
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,13 +3,13 @@
extras_require = {
'shellcomplete': ['click_completion'],
'tensorflow': [
- 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
- 'tensorflow-probability~=0.10.0',
+ 'tensorflow~=2.2.1', # TensorFlow minor releases are as volatile as major
+ 'tensorflow-probability~=0.10.1',
],
'torch': ['torch~=1.8'],
- 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],
+ 'jax': ['jax~=0.2.8', 'jaxlib~=0.1.58'],
'xmlio': [
- 'uproot3~=3.14',
+ 'uproot3>=3.14.1',
'uproot~=4.0',
], # uproot3 required until writing to ROOT supported in uproot4
'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,13 +3,13 @@\n extras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n- 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n- 'tensorflow-probability~=0.10.0',\n+ 'tensorflow~=2.2.1', # TensorFlow minor releases are as volatile as major\n+ 'tensorflow-probability~=0.10.1',\n ],\n 'torch': ['torch~=1.8'],\n- 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n+ 'jax': ['jax~=0.2.8', 'jaxlib~=0.1.58'],\n 'xmlio': [\n- 'uproot3~=3.14',\n+ 'uproot3>=3.14.1',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood\n", "issue": "Validate lower bounds of dependencies in CI\n# Description\r\n\r\nTo ensure that the lower bound of all dependencies are still sufficient for the API used, add a test that installs from a `lower-bound-requirements.txt` that might look something like\r\n\r\n```\r\n# core\r\nscipy==1.4.0\r\nclick==7.0.0\r\ntqdm==4.56.0\r\njsonschema==3.2.0\r\njsonpatch==1.23.0\r\npyyaml==5.1.0\r\n# xmlio\r\nuproot3==3.14.0\r\nuproot==4.0.0\r\n# minuit\r\niminuit==2.1.0\r\n# ...\r\n```\r\n\r\nand then run the test suite.\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.8'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.1', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.1',\n ],\n 'torch': ['torch~=1.8'],\n 'jax': ['jax~=0.2.8', 'jaxlib~=0.1.58'],\n 'xmlio': [\n 'uproot3>=3.14.1',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,182 | 280 |
gh_patches_debug_723 | rasdani/github-patches | git_diff | pypi__warehouse-1177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Permanent URL (Heroku "No such app" error)
I noticed that https://warehouse.python.org/ produces a `Heroku | No such app` error at the moment. Is this intentional? Are we permanently at https://pypi.io/ now?
If so, we should probably update the URL in a few places: https://github.com/pypa/warehouse/search?utf8=%E2%9C%93&q=%22warehouse.python.org%22
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/__about__.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import os.path
14
15 __all__ = [
16 "__title__", "__summary__", "__uri__", "__version__", "__commit__",
17 "__author__", "__email__", "__license__", "__copyright__",
18 ]
19
20
21 try:
22 base_dir = os.path.dirname(os.path.abspath(__file__))
23 except NameError:
24 base_dir = None
25
26
27 __title__ = "warehouse"
28 __summary__ = "Next Generation Python Package Repository"
29 __uri__ = "https://warehouse.python.org/"
30
31 __version__ = "15.0.dev0"
32
33 if base_dir is not None and os.path.exists(os.path.join(base_dir, ".commit")):
34 with open(os.path.join(base_dir, ".commit")) as fp:
35 __commit__ = fp.read().strip()
36 else:
37 __commit__ = None
38
39 __author__ = "The Python Packaging Authority"
40 __email__ = "[email protected]"
41
42 __license__ = "Apache License, Version 2.0"
43 __copyright__ = "2015 %s" % __author__
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/__about__.py b/warehouse/__about__.py
--- a/warehouse/__about__.py
+++ b/warehouse/__about__.py
@@ -26,7 +26,7 @@
__title__ = "warehouse"
__summary__ = "Next Generation Python Package Repository"
-__uri__ = "https://warehouse.python.org/"
+__uri__ = "https://pypi.io/"
__version__ = "15.0.dev0"
| {"golden_diff": "diff --git a/warehouse/__about__.py b/warehouse/__about__.py\n--- a/warehouse/__about__.py\n+++ b/warehouse/__about__.py\n@@ -26,7 +26,7 @@\n \n __title__ = \"warehouse\"\n __summary__ = \"Next Generation Python Package Repository\"\n-__uri__ = \"https://warehouse.python.org/\"\n+__uri__ = \"https://pypi.io/\"\n \n __version__ = \"15.0.dev0\"\n", "issue": "Permanent URL (Heroku \"No such app\" error)\nI noticed that https://warehouse.python.org/ produces a `Heroku | No such app` error at the moment. Is this intentional? Are we permanently at https://pypi.io/ now?\n\nIf so, we should probably update the URL in a few places: https://github.com/pypa/warehouse/search?utf8=%E2%9C%93&q=%22warehouse.python.org%22\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__commit__\",\n \"__author__\", \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n\ntry:\n base_dir = os.path.dirname(os.path.abspath(__file__))\nexcept NameError:\n base_dir = None\n\n\n__title__ = \"warehouse\"\n__summary__ = \"Next Generation Python Package Repository\"\n__uri__ = \"https://warehouse.python.org/\"\n\n__version__ = \"15.0.dev0\"\n\nif base_dir is not None and os.path.exists(os.path.join(base_dir, \".commit\")):\n with open(os.path.join(base_dir, \".commit\")) as fp:\n __commit__ = fp.read().strip()\nelse:\n __commit__ = None\n\n__author__ = \"The Python Packaging Authority\"\n__email__ = \"[email protected]\"\n\n__license__ = \"Apache License, Version 2.0\"\n__copyright__ = \"2015 %s\" % __author__\n", "path": "warehouse/__about__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__commit__\",\n \"__author__\", \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n\ntry:\n base_dir = os.path.dirname(os.path.abspath(__file__))\nexcept NameError:\n base_dir = None\n\n\n__title__ = \"warehouse\"\n__summary__ = \"Next Generation Python Package Repository\"\n__uri__ = \"https://pypi.io/\"\n\n__version__ = \"15.0.dev0\"\n\nif base_dir is not None and os.path.exists(os.path.join(base_dir, \".commit\")):\n with open(os.path.join(base_dir, \".commit\")) as fp:\n __commit__ = fp.read().strip()\nelse:\n __commit__ = None\n\n__author__ = \"The Python Packaging Authority\"\n__email__ = \"[email protected]\"\n\n__license__ = \"Apache License, Version 2.0\"\n__copyright__ = \"2015 %s\" % __author__\n", "path": "warehouse/__about__.py"}]} | 785 | 104 |
gh_patches_debug_27547 | rasdani/github-patches | git_diff | conan-io__conan-300 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[requires} in conanfile.txt gives no error
The typo:
```
[requires}
...
```
in `conanfile.txt` does not give any error indication at all when running `conan install ...`. The way to see that something is wrong is that it doesn't do anything, but finding the cause was not as easy as it could've been.
This is on 0.9.2 on Ubuntu 15.10.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/util/config_parser.py`
Content:
```
1 import re
2 from conans.errors import ConanException
3
4
5 class ConfigParser(object):
6 """ util class to load a file with sections as [section1]
7 checking the values of those sections, and returns each section
8 as parser.section
9 Currently used in ConanInfo and ConanFileTextLoader
10 """
11 def __init__(self, text, allowed_fields=None, parse_lines=False):
12 self._sections = {}
13 self._allowed_fields = allowed_fields or []
14 pattern = re.compile("^\[([a-z_]{2,50})\]")
15 current_lines = []
16 for line in text.splitlines():
17 line = line.strip()
18 if not line or line[0] == '#':
19 continue
20 m = pattern.match(line)
21 if m:
22 group = m.group(1)
23 if self._allowed_fields and group not in self._allowed_fields:
24 raise ConanException("ConfigParser: Unrecognized field '%s'" % group)
25 current_lines = []
26 self._sections[group] = current_lines
27 else:
28 if parse_lines:
29 line = line.split('#')[0]
30 line = line.strip()
31 current_lines.append(line)
32
33 def __getattr__(self, name):
34 if name in self._sections:
35 return "\n".join(self._sections[name])
36 else:
37 if self._allowed_fields and name in self._allowed_fields:
38 return ""
39 else:
40 raise ConanException("ConfigParser: Unrecognized field '%s'" % name)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/util/config_parser.py b/conans/util/config_parser.py
--- a/conans/util/config_parser.py
+++ b/conans/util/config_parser.py
@@ -12,19 +12,26 @@
self._sections = {}
self._allowed_fields = allowed_fields or []
pattern = re.compile("^\[([a-z_]{2,50})\]")
- current_lines = []
+ current_lines = None
for line in text.splitlines():
line = line.strip()
if not line or line[0] == '#':
continue
- m = pattern.match(line)
- if m:
- group = m.group(1)
- if self._allowed_fields and group not in self._allowed_fields:
- raise ConanException("ConfigParser: Unrecognized field '%s'" % group)
+ field = None
+ if line[0] == '[':
+ m = pattern.match(line)
+ if m:
+ field = m.group(1)
+ else:
+ raise ConanException("ConfigParser: Bad syntax '%s'" % line)
+ if field:
+ if self._allowed_fields and field not in self._allowed_fields:
+ raise ConanException("ConfigParser: Unrecognized field '%s'" % field)
current_lines = []
- self._sections[group] = current_lines
+ self._sections[field] = current_lines
else:
+ if current_lines is None:
+ raise ConanException("ConfigParser: Unexpected line '%s'" % line)
if parse_lines:
line = line.split('#')[0]
line = line.strip()
| {"golden_diff": "diff --git a/conans/util/config_parser.py b/conans/util/config_parser.py\n--- a/conans/util/config_parser.py\n+++ b/conans/util/config_parser.py\n@@ -12,19 +12,26 @@\n self._sections = {}\n self._allowed_fields = allowed_fields or []\n pattern = re.compile(\"^\\[([a-z_]{2,50})\\]\")\n- current_lines = []\n+ current_lines = None\n for line in text.splitlines():\n line = line.strip()\n if not line or line[0] == '#':\n continue\n- m = pattern.match(line)\n- if m:\n- group = m.group(1)\n- if self._allowed_fields and group not in self._allowed_fields:\n- raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % group)\n+ field = None\n+ if line[0] == '[':\n+ m = pattern.match(line)\n+ if m:\n+ field = m.group(1)\n+ else:\n+ raise ConanException(\"ConfigParser: Bad syntax '%s'\" % line)\n+ if field:\n+ if self._allowed_fields and field not in self._allowed_fields:\n+ raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % field)\n current_lines = []\n- self._sections[group] = current_lines\n+ self._sections[field] = current_lines\n else:\n+ if current_lines is None:\n+ raise ConanException(\"ConfigParser: Unexpected line '%s'\" % line)\n if parse_lines:\n line = line.split('#')[0]\n line = line.strip()\n", "issue": "[requires} in conanfile.txt gives no error\nThe typo:\n\n```\n[requires}\n...\n```\n\nin `conanfile.txt` does not give any error indication at all when running `conan install ...`. The way to see that something is wrong is that it doesn't do anything, but finding the cause was not as easy as it could've been.\n\nThis is on 0.9.2 on Ubuntu 15.10.\n\n", "before_files": [{"content": "import re\nfrom conans.errors import ConanException\n\n\nclass ConfigParser(object):\n \"\"\" util class to load a file with sections as [section1]\n checking the values of those sections, and returns each section\n as parser.section\n Currently used in ConanInfo and ConanFileTextLoader\n \"\"\"\n def __init__(self, text, allowed_fields=None, parse_lines=False):\n self._sections = {}\n self._allowed_fields = allowed_fields or []\n pattern = re.compile(\"^\\[([a-z_]{2,50})\\]\")\n current_lines = []\n for line in text.splitlines():\n line = line.strip()\n if not line or line[0] == '#':\n continue\n m = pattern.match(line)\n if m:\n group = m.group(1)\n if self._allowed_fields and group not in self._allowed_fields:\n raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % group)\n current_lines = []\n self._sections[group] = current_lines\n else:\n if parse_lines:\n line = line.split('#')[0]\n line = line.strip()\n current_lines.append(line)\n\n def __getattr__(self, name):\n if name in self._sections:\n return \"\\n\".join(self._sections[name])\n else:\n if self._allowed_fields and name in self._allowed_fields:\n return \"\"\n else:\n raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % name)\n", "path": "conans/util/config_parser.py"}], "after_files": [{"content": "import re\nfrom conans.errors import ConanException\n\n\nclass ConfigParser(object):\n \"\"\" util class to load a file with sections as [section1]\n checking the values of those sections, and returns each section\n as parser.section\n Currently used in ConanInfo and ConanFileTextLoader\n \"\"\"\n def __init__(self, text, allowed_fields=None, parse_lines=False):\n self._sections = {}\n self._allowed_fields = allowed_fields or []\n pattern = re.compile(\"^\\[([a-z_]{2,50})\\]\")\n current_lines = None\n for line in text.splitlines():\n line = line.strip()\n if not line or line[0] == '#':\n continue\n field = None\n if line[0] == '[':\n m = pattern.match(line)\n if m:\n field = m.group(1)\n else:\n raise ConanException(\"ConfigParser: Bad syntax '%s'\" % line)\n if field:\n if self._allowed_fields and field not in self._allowed_fields:\n raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % field)\n current_lines = []\n self._sections[field] = current_lines\n else:\n if current_lines is None:\n raise ConanException(\"ConfigParser: Unexpected line '%s'\" % line)\n if parse_lines:\n line = line.split('#')[0]\n line = line.strip()\n current_lines.append(line)\n\n def __getattr__(self, name):\n if name in self._sections:\n return \"\\n\".join(self._sections[name])\n else:\n if self._allowed_fields and name in self._allowed_fields:\n return \"\"\n else:\n raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % name)\n", "path": "conans/util/config_parser.py"}]} | 743 | 357 |
gh_patches_debug_30732 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-194 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unexpected popup in server_roles fixture
The server_roles fixture attempts to explicitly navigate to the current server's settings page before clicking any checkboxen, but the call to do that (`settings_pg.click_on_current_server_tree_node()`) occasionally fails because a popup appears. We'll need to make sure the popup can be safely ignored, and conditionally ignore it if it appears.
Until this is fixed, a workaround is to assume that navigation with the `cnf_configuration_pg` fixture will land on the current server settings page:
``` diff
diff --git a/fixtures/server_roles.py b/fixtures/server_roles.py
index dfcb574..2b99836 100644
--- a/fixtures/server_roles.py
+++ b/fixtures/server_roles.py
@@ -94,7 +94,9 @@ def server_roles(fixtureconf, cfme_data, cnf_configuration_pg):
# Nav to the settings tab
settings_pg = cnf_configuration_pg.click_on_settings()
- server_settings_pg = settings_pg.click_on_current_server_tree_node()
+ #server_settings_pg = settings_pg.click_on_current_server_tree_node()
+ from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings
+ server_settings_pg = ServerSettings(settings_pg.testsetup)
# sst is a configuration_subpages.settings_subpages.server_settings_subpages.
# server_settings_tab.ServerSettingsTab
sst = server_settings_pg.click_on_server_tab()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fixtures/server_roles.py`
Content:
```
1 import logging
2
3 import pytest
4 from unittestzero import Assert
5
6 logger = logging.getLogger(__name__)
7
8 default_roles = (
9 'database_operations',
10 'event',
11 'ems_inventory',
12 'ems_operations',
13 'reporting',
14 'scheduler',
15 'smartstate',
16 'user_interface',
17 'web_services',
18 )
19
20 @pytest.fixture
21 def server_roles(fixtureconf, cfme_data, cnf_configuration_pg):
22 """Set the server roles based on a list of roles attached to the test using this fixture
23
24 Usage examples:
25
26 Pass the desired roles in to the "server_roles_set" decorator:
27
28 _roles = ('database_operations', 'event', 'user_interface', 'web_services')
29
30 @pytest.mark.fixtureconf(server_roles=_roles)
31 def test_appliance_roles(server_roles):
32 assert len(server_roles) == 4
33
34 Roles can be pulled from the cfme_data fixture using yaml selectors,
35 which will do a 'set' with the list of roles found at the target path:
36
37 @pytest.mark.fixtureconf(server_roles_cfmedata=('level1', 'sublevel2'))
38 def test_appliance_roles(server_roles):
39 assert len(server_roles) == 3
40
41 Which corresponds to this yaml layout:
42
43 level1:
44 sublevel2:
45 - database_operations
46 - user_interface
47 - web_services
48
49 To ensure the appliance has the default roles:
50
51 from fixtures.server_roles import default_roles
52
53 @pytest.mark.fixtureconf(server_roles=default_roles)
54 def test_appliance_roles(server_roles):
55 assert len(server_roles) == len(default_roles)
56
57 List of server role names currently exposed in the CFME interface:
58
59 - automate
60 - ems_metrics_coordinator
61 - ems_metrics_collector
62 - ems_metrics_processor
63 - database_operations
64 - database_synchronization
65 - event
66 - ems_inventory
67 - ems_operations
68 - notifier
69 - reporting
70 - scheduler
71 - smartproxy
72 - smartstate
73 - user_interface
74 - web_services
75
76 """
77
78 if 'server_roles' in fixtureconf:
79 roles_list = list(fixtureconf['server_roles'])
80 elif 'server_roles_cfmedata' in fixtureconf:
81 roles_list = cfme_data.data
82 # Drills down into cfme_data YAML by selector, expecting a list
83 # of roles at the end. A KeyError here probably means the YAMe
84 # selector is wrong
85 for selector in fixtureconf['server_roles_cfmedata']:
86 roles_list = roles_list[selector]
87 else:
88 raise Exception('server_roles config not found on test callable')
89
90 # Deselecting the user interface role is really un-fun, and is
91 # counterproductive in the middle of user interface testing.
92 if 'user_interface' not in roles_list:
93 raise Exception('Refusing to remove the user_interface role')
94
95 # Nav to the settings tab
96 settings_pg = cnf_configuration_pg.click_on_settings()
97 server_settings_pg = settings_pg.click_on_current_server_tree_node()
98 # sst is a configuration_subpages.settings_subpages.server_settings_subpages.
99 # server_settings_tab.ServerSettingsTab
100 sst = server_settings_pg.click_on_server_tab()
101
102 # Set the roles!
103 if sorted(sst.selected_server_role_names) != sorted(roles_list):
104 sst.set_server_roles(roles_list)
105 sst.save()
106 sst._wait_for_results_refresh()
107 else:
108 logger.info('Server roles already match configured fixture roles, not changing server roles')
109
110 # If this assert fails, check roles names for typos or other minor differences
111 Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list))
112
113 return sst.selected_server_role_names
114
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fixtures/server_roles.py b/fixtures/server_roles.py
--- a/fixtures/server_roles.py
+++ b/fixtures/server_roles.py
@@ -17,6 +17,7 @@
'web_services',
)
+
@pytest.fixture
def server_roles(fixtureconf, cfme_data, cnf_configuration_pg):
"""Set the server roles based on a list of roles attached to the test using this fixture
@@ -94,7 +95,11 @@
# Nav to the settings tab
settings_pg = cnf_configuration_pg.click_on_settings()
- server_settings_pg = settings_pg.click_on_current_server_tree_node()
+ # Workaround to rudely bypass a popup that sometimes appears for
+ # unknown reasons.
+ # See also: https://github.com/RedHatQE/cfme_tests/issues/168
+ from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings
+ server_settings_pg = ServerSettings(settings_pg.testsetup)
# sst is a configuration_subpages.settings_subpages.server_settings_subpages.
# server_settings_tab.ServerSettingsTab
sst = server_settings_pg.click_on_server_tab()
@@ -105,10 +110,9 @@
sst.save()
sst._wait_for_results_refresh()
else:
- logger.info('Server roles already match configured fixture roles, not changing server roles')
+ logger.info('Server roles match configured fixture roles, not changing server roles')
# If this assert fails, check roles names for typos or other minor differences
Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list))
return sst.selected_server_role_names
-
| {"golden_diff": "diff --git a/fixtures/server_roles.py b/fixtures/server_roles.py\n--- a/fixtures/server_roles.py\n+++ b/fixtures/server_roles.py\n@@ -17,6 +17,7 @@\n 'web_services',\n )\n \n+\n @pytest.fixture\n def server_roles(fixtureconf, cfme_data, cnf_configuration_pg):\n \"\"\"Set the server roles based on a list of roles attached to the test using this fixture\n@@ -94,7 +95,11 @@\n \n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n- server_settings_pg = settings_pg.click_on_current_server_tree_node()\n+ # Workaround to rudely bypass a popup that sometimes appears for\n+ # unknown reasons.\n+ # See also: https://github.com/RedHatQE/cfme_tests/issues/168\n+ from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings\n+ server_settings_pg = ServerSettings(settings_pg.testsetup)\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n@@ -105,10 +110,9 @@\n sst.save()\n sst._wait_for_results_refresh()\n else:\n- logger.info('Server roles already match configured fixture roles, not changing server roles')\n+ logger.info('Server roles match configured fixture roles, not changing server roles')\n \n # If this assert fails, check roles names for typos or other minor differences\n Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list))\n \n return sst.selected_server_role_names\n-\n", "issue": "Unexpected popup in server_roles fixture\nThe server_roles fixture attempts to explicitly navigate to the current server's settings page before clicking any checkboxen, but the call to do that (`settings_pg.click_on_current_server_tree_node()`) occasionally fails because a popup appears. We'll need to make sure the popup can be safely ignored, and conditionally ignore it if it appears.\n\nUntil this is fixed, a workaround is to assume that navigation with the `cnf_configuration_pg` fixture will land on the current server settings page:\n\n``` diff\ndiff --git a/fixtures/server_roles.py b/fixtures/server_roles.py\nindex dfcb574..2b99836 100644\n--- a/fixtures/server_roles.py\n+++ b/fixtures/server_roles.py\n@@ -94,7 +94,9 @@ def server_roles(fixtureconf, cfme_data, cnf_configuration_pg):\n\n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n- server_settings_pg = settings_pg.click_on_current_server_tree_node()\n+ #server_settings_pg = settings_pg.click_on_current_server_tree_node()\n+ from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings\n+ server_settings_pg = ServerSettings(settings_pg.testsetup)\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n```\n\n", "before_files": [{"content": "import logging\n\nimport pytest\nfrom unittestzero import Assert\n\nlogger = logging.getLogger(__name__)\n\ndefault_roles = (\n 'database_operations',\n 'event',\n 'ems_inventory',\n 'ems_operations',\n 'reporting',\n 'scheduler',\n 'smartstate',\n 'user_interface',\n 'web_services',\n)\n\[email protected]\ndef server_roles(fixtureconf, cfme_data, cnf_configuration_pg):\n \"\"\"Set the server roles based on a list of roles attached to the test using this fixture\n\n Usage examples:\n\n Pass the desired roles in to the \"server_roles_set\" decorator:\n\n _roles = ('database_operations', 'event', 'user_interface', 'web_services')\n\n @pytest.mark.fixtureconf(server_roles=_roles)\n def test_appliance_roles(server_roles):\n assert len(server_roles) == 4\n\n Roles can be pulled from the cfme_data fixture using yaml selectors,\n which will do a 'set' with the list of roles found at the target path:\n\n @pytest.mark.fixtureconf(server_roles_cfmedata=('level1', 'sublevel2'))\n def test_appliance_roles(server_roles):\n assert len(server_roles) == 3\n\n Which corresponds to this yaml layout:\n\n level1:\n sublevel2:\n - database_operations\n - user_interface\n - web_services\n\n To ensure the appliance has the default roles:\n\n from fixtures.server_roles import default_roles\n\n @pytest.mark.fixtureconf(server_roles=default_roles)\n def test_appliance_roles(server_roles):\n assert len(server_roles) == len(default_roles)\n\n List of server role names currently exposed in the CFME interface:\n\n - automate\n - ems_metrics_coordinator\n - ems_metrics_collector\n - ems_metrics_processor\n - database_operations\n - database_synchronization\n - event\n - ems_inventory\n - ems_operations\n - notifier\n - reporting\n - scheduler\n - smartproxy\n - smartstate\n - user_interface\n - web_services\n\n \"\"\"\n\n if 'server_roles' in fixtureconf:\n roles_list = list(fixtureconf['server_roles'])\n elif 'server_roles_cfmedata' in fixtureconf:\n roles_list = cfme_data.data\n # Drills down into cfme_data YAML by selector, expecting a list\n # of roles at the end. A KeyError here probably means the YAMe\n # selector is wrong\n for selector in fixtureconf['server_roles_cfmedata']:\n roles_list = roles_list[selector]\n else:\n raise Exception('server_roles config not found on test callable')\n\n # Deselecting the user interface role is really un-fun, and is\n # counterproductive in the middle of user interface testing.\n if 'user_interface' not in roles_list:\n raise Exception('Refusing to remove the user_interface role')\n\n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n server_settings_pg = settings_pg.click_on_current_server_tree_node()\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n\n # Set the roles!\n if sorted(sst.selected_server_role_names) != sorted(roles_list):\n sst.set_server_roles(roles_list)\n sst.save()\n sst._wait_for_results_refresh()\n else:\n logger.info('Server roles already match configured fixture roles, not changing server roles')\n\n # If this assert fails, check roles names for typos or other minor differences\n Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list))\n\n return sst.selected_server_role_names\n\n", "path": "fixtures/server_roles.py"}], "after_files": [{"content": "import logging\n\nimport pytest\nfrom unittestzero import Assert\n\nlogger = logging.getLogger(__name__)\n\ndefault_roles = (\n 'database_operations',\n 'event',\n 'ems_inventory',\n 'ems_operations',\n 'reporting',\n 'scheduler',\n 'smartstate',\n 'user_interface',\n 'web_services',\n)\n\n\[email protected]\ndef server_roles(fixtureconf, cfme_data, cnf_configuration_pg):\n \"\"\"Set the server roles based on a list of roles attached to the test using this fixture\n\n Usage examples:\n\n Pass the desired roles in to the \"server_roles_set\" decorator:\n\n _roles = ('database_operations', 'event', 'user_interface', 'web_services')\n\n @pytest.mark.fixtureconf(server_roles=_roles)\n def test_appliance_roles(server_roles):\n assert len(server_roles) == 4\n\n Roles can be pulled from the cfme_data fixture using yaml selectors,\n which will do a 'set' with the list of roles found at the target path:\n\n @pytest.mark.fixtureconf(server_roles_cfmedata=('level1', 'sublevel2'))\n def test_appliance_roles(server_roles):\n assert len(server_roles) == 3\n\n Which corresponds to this yaml layout:\n\n level1:\n sublevel2:\n - database_operations\n - user_interface\n - web_services\n\n To ensure the appliance has the default roles:\n\n from fixtures.server_roles import default_roles\n\n @pytest.mark.fixtureconf(server_roles=default_roles)\n def test_appliance_roles(server_roles):\n assert len(server_roles) == len(default_roles)\n\n List of server role names currently exposed in the CFME interface:\n\n - automate\n - ems_metrics_coordinator\n - ems_metrics_collector\n - ems_metrics_processor\n - database_operations\n - database_synchronization\n - event\n - ems_inventory\n - ems_operations\n - notifier\n - reporting\n - scheduler\n - smartproxy\n - smartstate\n - user_interface\n - web_services\n\n \"\"\"\n\n if 'server_roles' in fixtureconf:\n roles_list = list(fixtureconf['server_roles'])\n elif 'server_roles_cfmedata' in fixtureconf:\n roles_list = cfme_data.data\n # Drills down into cfme_data YAML by selector, expecting a list\n # of roles at the end. A KeyError here probably means the YAMe\n # selector is wrong\n for selector in fixtureconf['server_roles_cfmedata']:\n roles_list = roles_list[selector]\n else:\n raise Exception('server_roles config not found on test callable')\n\n # Deselecting the user interface role is really un-fun, and is\n # counterproductive in the middle of user interface testing.\n if 'user_interface' not in roles_list:\n raise Exception('Refusing to remove the user_interface role')\n\n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n # Workaround to rudely bypass a popup that sometimes appears for\n # unknown reasons.\n # See also: https://github.com/RedHatQE/cfme_tests/issues/168\n from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings\n server_settings_pg = ServerSettings(settings_pg.testsetup)\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n\n # Set the roles!\n if sorted(sst.selected_server_role_names) != sorted(roles_list):\n sst.set_server_roles(roles_list)\n sst.save()\n sst._wait_for_results_refresh()\n else:\n logger.info('Server roles match configured fixture roles, not changing server roles')\n\n # If this assert fails, check roles names for typos or other minor differences\n Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list))\n\n return sst.selected_server_role_names\n", "path": "fixtures/server_roles.py"}]} | 1,614 | 363 |
gh_patches_debug_18842 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6169 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
broken match histograms gallery example
## Description
This bug was reported here: https://forum.image.sc/t/exposure-match-histograms-broken/61682
Most likely this is due to some unintended change in scaling that was introduced when adding the float32 support, so hopefully it is an easy fix. I can take a look soon.
## Way to reproduce
Run the following demo with v0.19.0 or v0.19.1: https://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_histogram_matching.html#sphx-glr-auto-examples-color-exposure-plot-histogram-matching-py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/exposure/histogram_matching.py`
Content:
```
1 import numpy as np
2
3 from .._shared import utils
4
5
6 def _match_cumulative_cdf(source, template):
7 """
8 Return modified source array so that the cumulative density function of
9 its values matches the cumulative density function of the template.
10 """
11 src_values, src_unique_indices, src_counts = np.unique(source.ravel(),
12 return_inverse=True,
13 return_counts=True)
14 tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)
15
16 # calculate normalized quantiles for each array
17 src_quantiles = np.cumsum(src_counts) / source.size
18 tmpl_quantiles = np.cumsum(tmpl_counts) / template.size
19
20 interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)
21 return interp_a_values[src_unique_indices].reshape(source.shape)
22
23
24 @utils.channel_as_last_axis(channel_arg_positions=(0, 1))
25 @utils.deprecate_multichannel_kwarg()
26 def match_histograms(image, reference, *, channel_axis=None,
27 multichannel=False):
28 """Adjust an image so that its cumulative histogram matches that of another.
29
30 The adjustment is applied separately for each channel.
31
32 Parameters
33 ----------
34 image : ndarray
35 Input image. Can be gray-scale or in color.
36 reference : ndarray
37 Image to match histogram of. Must have the same number of channels as
38 image.
39 channel_axis : int or None, optional
40 If None, the image is assumed to be a grayscale (single channel) image.
41 Otherwise, this parameter indicates which axis of the array corresponds
42 to channels.
43 multichannel : bool, optional
44 Apply the matching separately for each channel. This argument is
45 deprecated: specify `channel_axis` instead.
46
47 Returns
48 -------
49 matched : ndarray
50 Transformed input image.
51
52 Raises
53 ------
54 ValueError
55 Thrown when the number of channels in the input image and the reference
56 differ.
57
58 References
59 ----------
60 .. [1] http://paulbourke.net/miscellaneous/equalisation/
61
62 """
63 if image.ndim != reference.ndim:
64 raise ValueError('Image and reference must have the same number '
65 'of channels.')
66 out_dtype = utils._supported_float_type(image.dtype)
67
68 if channel_axis is not None:
69 if image.shape[-1] != reference.shape[-1]:
70 raise ValueError('Number of channels in the input image and '
71 'reference image must match!')
72
73 matched = np.empty(image.shape, dtype=image.dtype)
74 for channel in range(image.shape[-1]):
75 matched_channel = _match_cumulative_cdf(image[..., channel],
76 reference[..., channel])
77 matched[..., channel] = matched_channel
78 else:
79 # _match_cumulative_cdf will always return float64 due to np.interp
80 matched = _match_cumulative_cdf(image, reference)
81
82 return matched.astype(out_dtype, copy=False)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/exposure/histogram_matching.py b/skimage/exposure/histogram_matching.py
--- a/skimage/exposure/histogram_matching.py
+++ b/skimage/exposure/histogram_matching.py
@@ -63,7 +63,6 @@
if image.ndim != reference.ndim:
raise ValueError('Image and reference must have the same number '
'of channels.')
- out_dtype = utils._supported_float_type(image.dtype)
if channel_axis is not None:
if image.shape[-1] != reference.shape[-1]:
@@ -79,4 +78,8 @@
# _match_cumulative_cdf will always return float64 due to np.interp
matched = _match_cumulative_cdf(image, reference)
- return matched.astype(out_dtype, copy=False)
+ if matched.dtype.kind == 'f':
+ # output a float32 result when the input is float16 or float32
+ out_dtype = utils._supported_float_type(image.dtype)
+ matched = matched.astype(out_dtype, copy=False)
+ return matched
| {"golden_diff": "diff --git a/skimage/exposure/histogram_matching.py b/skimage/exposure/histogram_matching.py\n--- a/skimage/exposure/histogram_matching.py\n+++ b/skimage/exposure/histogram_matching.py\n@@ -63,7 +63,6 @@\n if image.ndim != reference.ndim:\n raise ValueError('Image and reference must have the same number '\n 'of channels.')\n- out_dtype = utils._supported_float_type(image.dtype)\n \n if channel_axis is not None:\n if image.shape[-1] != reference.shape[-1]:\n@@ -79,4 +78,8 @@\n # _match_cumulative_cdf will always return float64 due to np.interp\n matched = _match_cumulative_cdf(image, reference)\n \n- return matched.astype(out_dtype, copy=False)\n+ if matched.dtype.kind == 'f':\n+ # output a float32 result when the input is float16 or float32\n+ out_dtype = utils._supported_float_type(image.dtype)\n+ matched = matched.astype(out_dtype, copy=False)\n+ return matched\n", "issue": "broken match histograms gallery example\n## Description\r\n\r\nThis bug was reported here: https://forum.image.sc/t/exposure-match-histograms-broken/61682\r\n\r\nMost likely this is due to some unintended change in scaling that was introduced when adding the float32 support, so hopefully it is an easy fix. I can take a look soon.\r\n\r\n## Way to reproduce\r\n\r\nRun the following demo with v0.19.0 or v0.19.1: https://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_histogram_matching.html#sphx-glr-auto-examples-color-exposure-plot-histogram-matching-py\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .._shared import utils\n\n\ndef _match_cumulative_cdf(source, template):\n \"\"\"\n Return modified source array so that the cumulative density function of\n its values matches the cumulative density function of the template.\n \"\"\"\n src_values, src_unique_indices, src_counts = np.unique(source.ravel(),\n return_inverse=True,\n return_counts=True)\n tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)\n\n # calculate normalized quantiles for each array\n src_quantiles = np.cumsum(src_counts) / source.size\n tmpl_quantiles = np.cumsum(tmpl_counts) / template.size\n\n interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)\n return interp_a_values[src_unique_indices].reshape(source.shape)\n\n\[email protected]_as_last_axis(channel_arg_positions=(0, 1))\[email protected]_multichannel_kwarg()\ndef match_histograms(image, reference, *, channel_axis=None,\n multichannel=False):\n \"\"\"Adjust an image so that its cumulative histogram matches that of another.\n\n The adjustment is applied separately for each channel.\n\n Parameters\n ----------\n image : ndarray\n Input image. Can be gray-scale or in color.\n reference : ndarray\n Image to match histogram of. Must have the same number of channels as\n image.\n channel_axis : int or None, optional\n If None, the image is assumed to be a grayscale (single channel) image.\n Otherwise, this parameter indicates which axis of the array corresponds\n to channels.\n multichannel : bool, optional\n Apply the matching separately for each channel. This argument is\n deprecated: specify `channel_axis` instead.\n\n Returns\n -------\n matched : ndarray\n Transformed input image.\n\n Raises\n ------\n ValueError\n Thrown when the number of channels in the input image and the reference\n differ.\n\n References\n ----------\n .. [1] http://paulbourke.net/miscellaneous/equalisation/\n\n \"\"\"\n if image.ndim != reference.ndim:\n raise ValueError('Image and reference must have the same number '\n 'of channels.')\n out_dtype = utils._supported_float_type(image.dtype)\n\n if channel_axis is not None:\n if image.shape[-1] != reference.shape[-1]:\n raise ValueError('Number of channels in the input image and '\n 'reference image must match!')\n\n matched = np.empty(image.shape, dtype=image.dtype)\n for channel in range(image.shape[-1]):\n matched_channel = _match_cumulative_cdf(image[..., channel],\n reference[..., channel])\n matched[..., channel] = matched_channel\n else:\n # _match_cumulative_cdf will always return float64 due to np.interp\n matched = _match_cumulative_cdf(image, reference)\n\n return matched.astype(out_dtype, copy=False)\n", "path": "skimage/exposure/histogram_matching.py"}], "after_files": [{"content": "import numpy as np\n\nfrom .._shared import utils\n\n\ndef _match_cumulative_cdf(source, template):\n \"\"\"\n Return modified source array so that the cumulative density function of\n its values matches the cumulative density function of the template.\n \"\"\"\n src_values, src_unique_indices, src_counts = np.unique(source.ravel(),\n return_inverse=True,\n return_counts=True)\n tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)\n\n # calculate normalized quantiles for each array\n src_quantiles = np.cumsum(src_counts) / source.size\n tmpl_quantiles = np.cumsum(tmpl_counts) / template.size\n\n interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)\n return interp_a_values[src_unique_indices].reshape(source.shape)\n\n\[email protected]_as_last_axis(channel_arg_positions=(0, 1))\[email protected]_multichannel_kwarg()\ndef match_histograms(image, reference, *, channel_axis=None,\n multichannel=False):\n \"\"\"Adjust an image so that its cumulative histogram matches that of another.\n\n The adjustment is applied separately for each channel.\n\n Parameters\n ----------\n image : ndarray\n Input image. Can be gray-scale or in color.\n reference : ndarray\n Image to match histogram of. Must have the same number of channels as\n image.\n channel_axis : int or None, optional\n If None, the image is assumed to be a grayscale (single channel) image.\n Otherwise, this parameter indicates which axis of the array corresponds\n to channels.\n multichannel : bool, optional\n Apply the matching separately for each channel. This argument is\n deprecated: specify `channel_axis` instead.\n\n Returns\n -------\n matched : ndarray\n Transformed input image.\n\n Raises\n ------\n ValueError\n Thrown when the number of channels in the input image and the reference\n differ.\n\n References\n ----------\n .. [1] http://paulbourke.net/miscellaneous/equalisation/\n\n \"\"\"\n if image.ndim != reference.ndim:\n raise ValueError('Image and reference must have the same number '\n 'of channels.')\n\n if channel_axis is not None:\n if image.shape[-1] != reference.shape[-1]:\n raise ValueError('Number of channels in the input image and '\n 'reference image must match!')\n\n matched = np.empty(image.shape, dtype=image.dtype)\n for channel in range(image.shape[-1]):\n matched_channel = _match_cumulative_cdf(image[..., channel],\n reference[..., channel])\n matched[..., channel] = matched_channel\n else:\n # _match_cumulative_cdf will always return float64 due to np.interp\n matched = _match_cumulative_cdf(image, reference)\n\n if matched.dtype.kind == 'f':\n # output a float32 result when the input is float16 or float32\n out_dtype = utils._supported_float_type(image.dtype)\n matched = matched.astype(out_dtype, copy=False)\n return matched\n", "path": "skimage/exposure/histogram_matching.py"}]} | 1,179 | 239 |
gh_patches_debug_49612 | rasdani/github-patches | git_diff | StackStorm__st2-5306 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a CI lint task to check st2client's README.md
We need to make sure that the st2client `README.rst` file is acceptable to PyPI, since any syntax errors in it will cause the `push_st2client` task of the `st2cd.st2_finalize_release` workflow to fail.
We can check the syntax using the same renderer that PyPI itself uses:
```bash
# Use the same README renderer that PyPI uses to catch syntax issues in the
# README.rst file # st2client uses README.rst
# https://pypi.org/help/#description-content-type
# https://pypi.org/project/readme-renderer
# https://packaging.python.org/tutorials/packaging-projects/#description
echo "Checking README.rst syntax"
virtualenv venv-st2client-readme-checker
. venv-st2client-readme-checker/bin/activate
pip install --upgrade readme_renderer
python -m readme_renderer README.rst
deactivate
```
It would be nice if we could catch these errors before release, which means that we should create a step in our CI tooling to check it before any bad changes get merged.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2client/setup.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright 2020 The StackStorm Authors.
3 # Copyright 2019 Extreme Networks, Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import os.path
18
19 from setuptools import setup, find_packages
20
21 from dist_utils import check_pip_version
22 from dist_utils import fetch_requirements
23 from dist_utils import apply_vagrant_workaround
24
25 from st2client import __version__
26
27 check_pip_version()
28
29 ST2_COMPONENT = "st2client"
30 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
31 REQUIREMENTS_FILE = os.path.join(BASE_DIR, "requirements.txt")
32 README_FILE = os.path.join(BASE_DIR, "README.rst")
33
34 install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
35 apply_vagrant_workaround()
36
37 with open(README_FILE) as f:
38 readme = f.read()
39
40 setup(
41 name=ST2_COMPONENT,
42 version=__version__,
43 description=(
44 "Python client library and CLI for the StackStorm (st2) event-driven "
45 "automation platform."
46 ),
47 long_description=readme,
48 author="StackStorm",
49 author_email="[email protected]",
50 url="https://stackstorm.com/",
51 classifiers=[
52 "Development Status :: 5 - Production/Stable",
53 "Intended Audience :: Information Technology",
54 "Intended Audience :: Developers",
55 "Intended Audience :: System Administrators",
56 "License :: OSI Approved :: Apache Software License",
57 "Operating System :: POSIX :: Linux",
58 "Programming Language :: Python",
59 "Programming Language :: Python :: 3",
60 "Programming Language :: Python :: 3.6",
61 ],
62 install_requires=install_reqs,
63 dependency_links=dep_links,
64 test_suite=ST2_COMPONENT,
65 zip_safe=False,
66 include_package_data=True,
67 packages=find_packages(exclude=["setuptools", "tests"]),
68 entry_points={"console_scripts": ["st2 = st2client.shell:main"]},
69 project_urls={
70 "Pack Exchange": "https://exchange.stackstorm.org",
71 "Repository": "https://github.com/StackStorm/st2",
72 "Documentation": "https://docs.stackstorm.com",
73 "Community": "https://stackstorm.com/community-signup",
74 "Questions": "https://forum.stackstorm.com/",
75 "Donate": "https://funding.communitybridge.org/projects/stackstorm",
76 "News/Blog": "https://stackstorm.com/blog",
77 "Security": "https://docs.stackstorm.com/latest/security.html",
78 "Bug Reports": "https://github.com/StackStorm/st2/issues",
79 },
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2client/setup.py b/st2client/setup.py
--- a/st2client/setup.py
+++ b/st2client/setup.py
@@ -45,6 +45,7 @@
"automation platform."
),
long_description=readme,
+ long_description_content_type="text/x-rst",
author="StackStorm",
author_email="[email protected]",
url="https://stackstorm.com/",
| {"golden_diff": "diff --git a/st2client/setup.py b/st2client/setup.py\n--- a/st2client/setup.py\n+++ b/st2client/setup.py\n@@ -45,6 +45,7 @@\n \"automation platform.\"\n ),\n long_description=readme,\n+ long_description_content_type=\"text/x-rst\",\n author=\"StackStorm\",\n author_email=\"[email protected]\",\n url=\"https://stackstorm.com/\",\n", "issue": "Add a CI lint task to check st2client's README.md\nWe need to make sure that the st2client `README.rst` file is acceptable to PyPI, since any syntax errors in it will cause the `push_st2client` task of the `st2cd.st2_finalize_release` workflow to fail.\r\n\r\nWe can check the syntax using the same renderer that PyPI itself uses:\r\n\r\n```bash\r\n# Use the same README renderer that PyPI uses to catch syntax issues in the\r\n# README.rst file # st2client uses README.rst\r\n# https://pypi.org/help/#description-content-type\r\n# https://pypi.org/project/readme-renderer\r\n# https://packaging.python.org/tutorials/packaging-projects/#description\r\necho \"Checking README.rst syntax\"\r\nvirtualenv venv-st2client-readme-checker\r\n. venv-st2client-readme-checker/bin/activate\r\npip install --upgrade readme_renderer\r\npython -m readme_renderer README.rst\r\ndeactivate\r\n```\r\n\r\nIt would be nice if we could catch these errors before release, which means that we should create a step in our CI tooling to check it before any bad changes get merged.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import check_pip_version\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\n\nfrom st2client import __version__\n\ncheck_pip_version()\n\nST2_COMPONENT = \"st2client\"\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, \"requirements.txt\")\nREADME_FILE = os.path.join(BASE_DIR, \"README.rst\")\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\napply_vagrant_workaround()\n\nwith open(README_FILE) as f:\n readme = f.read()\n\nsetup(\n name=ST2_COMPONENT,\n version=__version__,\n description=(\n \"Python client library and CLI for the StackStorm (st2) event-driven \"\n \"automation platform.\"\n ),\n long_description=readme,\n author=\"StackStorm\",\n author_email=\"[email protected]\",\n url=\"https://stackstorm.com/\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=[\"setuptools\", \"tests\"]),\n entry_points={\"console_scripts\": [\"st2 = st2client.shell:main\"]},\n project_urls={\n \"Pack Exchange\": \"https://exchange.stackstorm.org\",\n \"Repository\": \"https://github.com/StackStorm/st2\",\n \"Documentation\": \"https://docs.stackstorm.com\",\n \"Community\": \"https://stackstorm.com/community-signup\",\n \"Questions\": \"https://forum.stackstorm.com/\",\n \"Donate\": \"https://funding.communitybridge.org/projects/stackstorm\",\n \"News/Blog\": \"https://stackstorm.com/blog\",\n \"Security\": \"https://docs.stackstorm.com/latest/security.html\",\n \"Bug Reports\": \"https://github.com/StackStorm/st2/issues\",\n },\n)\n", "path": "st2client/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import check_pip_version\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\n\nfrom st2client import __version__\n\ncheck_pip_version()\n\nST2_COMPONENT = \"st2client\"\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, \"requirements.txt\")\nREADME_FILE = os.path.join(BASE_DIR, \"README.rst\")\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\napply_vagrant_workaround()\n\nwith open(README_FILE) as f:\n readme = f.read()\n\nsetup(\n name=ST2_COMPONENT,\n version=__version__,\n description=(\n \"Python client library and CLI for the StackStorm (st2) event-driven \"\n \"automation platform.\"\n ),\n long_description=readme,\n long_description_content_type=\"text/x-rst\",\n author=\"StackStorm\",\n author_email=\"[email protected]\",\n url=\"https://stackstorm.com/\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=[\"setuptools\", \"tests\"]),\n entry_points={\"console_scripts\": [\"st2 = st2client.shell:main\"]},\n project_urls={\n \"Pack Exchange\": \"https://exchange.stackstorm.org\",\n \"Repository\": \"https://github.com/StackStorm/st2\",\n \"Documentation\": \"https://docs.stackstorm.com\",\n \"Community\": \"https://stackstorm.com/community-signup\",\n \"Questions\": \"https://forum.stackstorm.com/\",\n \"Donate\": \"https://funding.communitybridge.org/projects/stackstorm\",\n \"News/Blog\": \"https://stackstorm.com/blog\",\n \"Security\": \"https://docs.stackstorm.com/latest/security.html\",\n \"Bug Reports\": \"https://github.com/StackStorm/st2/issues\",\n },\n)\n", "path": "st2client/setup.py"}]} | 1,325 | 94 |
gh_patches_debug_9236 | rasdani/github-patches | git_diff | beetbox__beets-1435 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
embedart plugin disregards "write: no" and "-W" config opts
Any import action with `embedart` enabled causes a full write of tags and art, even when config file specifies `write: no`, or commandline includes `-W`.
CAUSE: `art.py` does not check config option `import/write` before calling `try_write`.
SUGGESTED FIX: check config option first! :) PR will be attached presently.
embedart plugin disregards "write: no" and "-W" config opts
Any import action with `embedart` enabled causes a full write of tags and art, even when config file specifies `write: no`, or commandline includes `-W`.
CAUSE: `art.py` does not check config option `import/write` before calling `try_write`.
SUGGESTED FIX: check config option first! :) PR will be attached presently.
embedart plugin disregards "write: no" and "-W" config opts
Any import action with `embedart` enabled causes a full write of tags and art, even when config file specifies `write: no`, or commandline includes `-W`.
CAUSE: `art.py` does not check config option `import/write` before calling `try_write`.
SUGGESTED FIX: check config option first! :) PR will be attached presently.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/embedart.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2015, Adrian Sampson.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Allows beets to embed album art into file metadata."""
16 from __future__ import (division, absolute_import, print_function,
17 unicode_literals)
18
19 import os.path
20
21 from beets.plugins import BeetsPlugin
22 from beets import ui
23 from beets.ui import decargs
24 from beets.util import syspath, normpath, displayable_path, bytestring_path
25 from beets.util.artresizer import ArtResizer
26 from beets import config
27 from beets import art
28
29
30 class EmbedCoverArtPlugin(BeetsPlugin):
31 """Allows albumart to be embedded into the actual files.
32 """
33 def __init__(self):
34 super(EmbedCoverArtPlugin, self).__init__()
35 self.config.add({
36 'maxwidth': 0,
37 'auto': True,
38 'compare_threshold': 0,
39 'ifempty': False,
40 })
41
42 if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:
43 self.config['maxwidth'] = 0
44 self._log.warning(u"ImageMagick or PIL not found; "
45 u"'maxwidth' option ignored")
46 if self.config['compare_threshold'].get(int) and not \
47 ArtResizer.shared.can_compare:
48 self.config['compare_threshold'] = 0
49 self._log.warning(u"ImageMagick 6.8.7 or higher not installed; "
50 u"'compare_threshold' option ignored")
51
52 self.register_listener('art_set', self.process_album)
53
54 def commands(self):
55 # Embed command.
56 embed_cmd = ui.Subcommand(
57 'embedart', help='embed image files into file metadata'
58 )
59 embed_cmd.parser.add_option(
60 '-f', '--file', metavar='PATH', help='the image file to embed'
61 )
62 maxwidth = self.config['maxwidth'].get(int)
63 compare_threshold = self.config['compare_threshold'].get(int)
64 ifempty = self.config['ifempty'].get(bool)
65
66 def embed_func(lib, opts, args):
67 if opts.file:
68 imagepath = normpath(opts.file)
69 if not os.path.isfile(syspath(imagepath)):
70 raise ui.UserError(u'image file {0} not found'.format(
71 displayable_path(imagepath)
72 ))
73 for item in lib.items(decargs(args)):
74 art.embed_item(self._log, item, imagepath, maxwidth, None,
75 compare_threshold, ifempty)
76 else:
77 for album in lib.albums(decargs(args)):
78 art.embed_album(self._log, album, maxwidth, False,
79 compare_threshold, ifempty)
80
81 embed_cmd.func = embed_func
82
83 # Extract command.
84 extract_cmd = ui.Subcommand('extractart',
85 help='extract an image from file metadata')
86 extract_cmd.parser.add_option('-o', dest='outpath',
87 help='image output file')
88 extract_cmd.parser.add_option('-n', dest='filename',
89 help='image filename to create for all '
90 'matched albums')
91 extract_cmd.parser.add_option('-a', dest='associate',
92 action='store_true',
93 help='associate the extracted images '
94 'with the album')
95
96 def extract_func(lib, opts, args):
97 if opts.outpath:
98 art.extract_first(self._log, normpath(opts.outpath),
99 lib.items(decargs(args)))
100 else:
101 filename = bytestring_path(opts.filename or
102 config['art_filename'].get())
103 if os.path.dirname(filename) != '':
104 self._log.error(u"Only specify a name rather than a path "
105 u"for -n")
106 return
107 for album in lib.albums(decargs(args)):
108 artpath = normpath(os.path.join(album.path, filename))
109 artpath = art.extract_first(self._log, artpath,
110 album.items())
111 if artpath and opts.associate:
112 album.set_art(artpath)
113 album.store()
114 extract_cmd.func = extract_func
115
116 # Clear command.
117 clear_cmd = ui.Subcommand('clearart',
118 help='remove images from file metadata')
119
120 def clear_func(lib, opts, args):
121 art.clear(self._log, lib, decargs(args))
122 clear_cmd.func = clear_func
123
124 return [embed_cmd, extract_cmd, clear_cmd]
125
126 def process_album(self, album):
127 """Automatically embed art after art has been set
128 """
129 if self.config['auto']:
130 max_width = self.config['maxwidth'].get(int)
131 art.embed_album(self._log, album, max_width, True,
132 self.config['compare_threshold'].get(int),
133 self.config['ifempty'].get(bool))
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py
--- a/beetsplug/embedart.py
+++ b/beetsplug/embedart.py
@@ -126,7 +126,7 @@
def process_album(self, album):
"""Automatically embed art after art has been set
"""
- if self.config['auto']:
+ if self.config['auto'] and config['import']['write']:
max_width = self.config['maxwidth'].get(int)
art.embed_album(self._log, album, max_width, True,
self.config['compare_threshold'].get(int),
| {"golden_diff": "diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py\n--- a/beetsplug/embedart.py\n+++ b/beetsplug/embedart.py\n@@ -126,7 +126,7 @@\n def process_album(self, album):\n \"\"\"Automatically embed art after art has been set\n \"\"\"\n- if self.config['auto']:\n+ if self.config['auto'] and config['import']['write']:\n max_width = self.config['maxwidth'].get(int)\n art.embed_album(self._log, album, max_width, True,\n self.config['compare_threshold'].get(int),\n", "issue": "embedart plugin disregards \"write: no\" and \"-W\" config opts\nAny import action with `embedart` enabled causes a full write of tags and art, even when config file specifies `write: no`, or commandline includes `-W`.\n\nCAUSE: `art.py` does not check config option `import/write` before calling `try_write`.\n\nSUGGESTED FIX: check config option first! :) PR will be attached presently.\n\nembedart plugin disregards \"write: no\" and \"-W\" config opts\nAny import action with `embedart` enabled causes a full write of tags and art, even when config file specifies `write: no`, or commandline includes `-W`.\n\nCAUSE: `art.py` does not check config option `import/write` before calling `try_write`.\n\nSUGGESTED FIX: check config option first! :) PR will be attached presently.\n\nembedart plugin disregards \"write: no\" and \"-W\" config opts\nAny import action with `embedart` enabled causes a full write of tags and art, even when config file specifies `write: no`, or commandline includes `-W`.\n\nCAUSE: `art.py` does not check config option `import/write` before calling `try_write`.\n\nSUGGESTED FIX: check config option first! :) PR will be attached presently.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows beets to embed album art into file metadata.\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os.path\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets.ui import decargs\nfrom beets.util import syspath, normpath, displayable_path, bytestring_path\nfrom beets.util.artresizer import ArtResizer\nfrom beets import config\nfrom beets import art\n\n\nclass EmbedCoverArtPlugin(BeetsPlugin):\n \"\"\"Allows albumart to be embedded into the actual files.\n \"\"\"\n def __init__(self):\n super(EmbedCoverArtPlugin, self).__init__()\n self.config.add({\n 'maxwidth': 0,\n 'auto': True,\n 'compare_threshold': 0,\n 'ifempty': False,\n })\n\n if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:\n self.config['maxwidth'] = 0\n self._log.warning(u\"ImageMagick or PIL not found; \"\n u\"'maxwidth' option ignored\")\n if self.config['compare_threshold'].get(int) and not \\\n ArtResizer.shared.can_compare:\n self.config['compare_threshold'] = 0\n self._log.warning(u\"ImageMagick 6.8.7 or higher not installed; \"\n u\"'compare_threshold' option ignored\")\n\n self.register_listener('art_set', self.process_album)\n\n def commands(self):\n # Embed command.\n embed_cmd = ui.Subcommand(\n 'embedart', help='embed image files into file metadata'\n )\n embed_cmd.parser.add_option(\n '-f', '--file', metavar='PATH', help='the image file to embed'\n )\n maxwidth = self.config['maxwidth'].get(int)\n compare_threshold = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n\n def embed_func(lib, opts, args):\n if opts.file:\n imagepath = normpath(opts.file)\n if not os.path.isfile(syspath(imagepath)):\n raise ui.UserError(u'image file {0} not found'.format(\n displayable_path(imagepath)\n ))\n for item in lib.items(decargs(args)):\n art.embed_item(self._log, item, imagepath, maxwidth, None,\n compare_threshold, ifempty)\n else:\n for album in lib.albums(decargs(args)):\n art.embed_album(self._log, album, maxwidth, False,\n compare_threshold, ifempty)\n\n embed_cmd.func = embed_func\n\n # Extract command.\n extract_cmd = ui.Subcommand('extractart',\n help='extract an image from file metadata')\n extract_cmd.parser.add_option('-o', dest='outpath',\n help='image output file')\n extract_cmd.parser.add_option('-n', dest='filename',\n help='image filename to create for all '\n 'matched albums')\n extract_cmd.parser.add_option('-a', dest='associate',\n action='store_true',\n help='associate the extracted images '\n 'with the album')\n\n def extract_func(lib, opts, args):\n if opts.outpath:\n art.extract_first(self._log, normpath(opts.outpath),\n lib.items(decargs(args)))\n else:\n filename = bytestring_path(opts.filename or\n config['art_filename'].get())\n if os.path.dirname(filename) != '':\n self._log.error(u\"Only specify a name rather than a path \"\n u\"for -n\")\n return\n for album in lib.albums(decargs(args)):\n artpath = normpath(os.path.join(album.path, filename))\n artpath = art.extract_first(self._log, artpath,\n album.items())\n if artpath and opts.associate:\n album.set_art(artpath)\n album.store()\n extract_cmd.func = extract_func\n\n # Clear command.\n clear_cmd = ui.Subcommand('clearart',\n help='remove images from file metadata')\n\n def clear_func(lib, opts, args):\n art.clear(self._log, lib, decargs(args))\n clear_cmd.func = clear_func\n\n return [embed_cmd, extract_cmd, clear_cmd]\n\n def process_album(self, album):\n \"\"\"Automatically embed art after art has been set\n \"\"\"\n if self.config['auto']:\n max_width = self.config['maxwidth'].get(int)\n art.embed_album(self._log, album, max_width, True,\n self.config['compare_threshold'].get(int),\n self.config['ifempty'].get(bool))\n", "path": "beetsplug/embedart.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows beets to embed album art into file metadata.\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os.path\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets.ui import decargs\nfrom beets.util import syspath, normpath, displayable_path, bytestring_path\nfrom beets.util.artresizer import ArtResizer\nfrom beets import config\nfrom beets import art\n\n\nclass EmbedCoverArtPlugin(BeetsPlugin):\n \"\"\"Allows albumart to be embedded into the actual files.\n \"\"\"\n def __init__(self):\n super(EmbedCoverArtPlugin, self).__init__()\n self.config.add({\n 'maxwidth': 0,\n 'auto': True,\n 'compare_threshold': 0,\n 'ifempty': False,\n })\n\n if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:\n self.config['maxwidth'] = 0\n self._log.warning(u\"ImageMagick or PIL not found; \"\n u\"'maxwidth' option ignored\")\n if self.config['compare_threshold'].get(int) and not \\\n ArtResizer.shared.can_compare:\n self.config['compare_threshold'] = 0\n self._log.warning(u\"ImageMagick 6.8.7 or higher not installed; \"\n u\"'compare_threshold' option ignored\")\n\n self.register_listener('art_set', self.process_album)\n\n def commands(self):\n # Embed command.\n embed_cmd = ui.Subcommand(\n 'embedart', help='embed image files into file metadata'\n )\n embed_cmd.parser.add_option(\n '-f', '--file', metavar='PATH', help='the image file to embed'\n )\n maxwidth = self.config['maxwidth'].get(int)\n compare_threshold = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n\n def embed_func(lib, opts, args):\n if opts.file:\n imagepath = normpath(opts.file)\n if not os.path.isfile(syspath(imagepath)):\n raise ui.UserError(u'image file {0} not found'.format(\n displayable_path(imagepath)\n ))\n for item in lib.items(decargs(args)):\n art.embed_item(self._log, item, imagepath, maxwidth, None,\n compare_threshold, ifempty)\n else:\n for album in lib.albums(decargs(args)):\n art.embed_album(self._log, album, maxwidth, False,\n compare_threshold, ifempty)\n\n embed_cmd.func = embed_func\n\n # Extract command.\n extract_cmd = ui.Subcommand('extractart',\n help='extract an image from file metadata')\n extract_cmd.parser.add_option('-o', dest='outpath',\n help='image output file')\n extract_cmd.parser.add_option('-n', dest='filename',\n help='image filename to create for all '\n 'matched albums')\n extract_cmd.parser.add_option('-a', dest='associate',\n action='store_true',\n help='associate the extracted images '\n 'with the album')\n\n def extract_func(lib, opts, args):\n if opts.outpath:\n art.extract_first(self._log, normpath(opts.outpath),\n lib.items(decargs(args)))\n else:\n filename = bytestring_path(opts.filename or\n config['art_filename'].get())\n if os.path.dirname(filename) != '':\n self._log.error(u\"Only specify a name rather than a path \"\n u\"for -n\")\n return\n for album in lib.albums(decargs(args)):\n artpath = normpath(os.path.join(album.path, filename))\n artpath = art.extract_first(self._log, artpath,\n album.items())\n if artpath and opts.associate:\n album.set_art(artpath)\n album.store()\n extract_cmd.func = extract_func\n\n # Clear command.\n clear_cmd = ui.Subcommand('clearart',\n help='remove images from file metadata')\n\n def clear_func(lib, opts, args):\n art.clear(self._log, lib, decargs(args))\n clear_cmd.func = clear_func\n\n return [embed_cmd, extract_cmd, clear_cmd]\n\n def process_album(self, album):\n \"\"\"Automatically embed art after art has been set\n \"\"\"\n if self.config['auto'] and config['import']['write']:\n max_width = self.config['maxwidth'].get(int)\n art.embed_album(self._log, album, max_width, True,\n self.config['compare_threshold'].get(int),\n self.config['ifempty'].get(bool))\n", "path": "beetsplug/embedart.py"}]} | 1,962 | 133 |
gh_patches_debug_1313 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4853 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Confusing error message to end user
In https://github.com/rtfd/readthedocs.org/issues/4071#issuecomment-405939492 I realized that we are saying that we have a problem parsing the YAML file but the problem is in fact in one of the options set from the web admin dashboard.
Example:

There is no `requirements_file` entry in the YAML file (https://github.com/geopandas/geopandas/blob/master/readthedocs.yml) but it exists under the `Admin -> Advanced Settings` field form.
We need to improve this error to something more user-friendly that expresses the real error. It's not an error on parsing the YAML file. The file was parsed properly, but the problem is with one of the values from one of the fields.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/doc_builder/exceptions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Exceptions raised when building documentation."""
3
4 from __future__ import division, print_function, unicode_literals
5
6 from django.utils.translation import ugettext_noop
7
8
9 class BuildEnvironmentException(Exception):
10 message = None
11 status_code = None
12
13 def __init__(self, message=None, **kwargs):
14 self.status_code = kwargs.pop('status_code', None) or self.status_code or 1
15 message = message or self.get_default_message()
16 super(BuildEnvironmentException, self).__init__(message, **kwargs)
17
18 def get_default_message(self):
19 return self.message
20
21
22 class BuildEnvironmentError(BuildEnvironmentException):
23 GENERIC_WITH_BUILD_ID = ugettext_noop(
24 'There was a problem with Read the Docs while building your documentation. '
25 'Please try again later. '
26 'However, if this problem persists, '
27 'please report this to us with your build id ({build_id}).',
28 )
29
30
31 class BuildEnvironmentCreationFailed(BuildEnvironmentError):
32 message = ugettext_noop('Build environment creation failed')
33
34
35 class VersionLockedError(BuildEnvironmentError):
36 message = ugettext_noop('Version locked, retrying in 5 minutes.')
37 status_code = 423
38
39
40 class ProjectBuildsSkippedError(BuildEnvironmentError):
41 message = ugettext_noop('Builds for this project are temporarily disabled')
42
43
44 class YAMLParseError(BuildEnvironmentError):
45 GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
46 'Problem parsing YAML configuration. {exception}',
47 )
48
49
50 class BuildTimeoutError(BuildEnvironmentError):
51 message = ugettext_noop('Build exited due to time out')
52
53
54 class BuildEnvironmentWarning(BuildEnvironmentException):
55 pass
56
57
58 class MkDocsYAMLParseError(BuildEnvironmentError):
59 GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
60 'Problem parsing MkDocs YAML configuration. {exception}',
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py
--- a/readthedocs/doc_builder/exceptions.py
+++ b/readthedocs/doc_builder/exceptions.py
@@ -43,7 +43,7 @@
class YAMLParseError(BuildEnvironmentError):
GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
- 'Problem parsing YAML configuration. {exception}',
+ 'Problem in your project\'s configuration. {exception}',
)
| {"golden_diff": "diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py\n--- a/readthedocs/doc_builder/exceptions.py\n+++ b/readthedocs/doc_builder/exceptions.py\n@@ -43,7 +43,7 @@\n \n class YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n- 'Problem parsing YAML configuration. {exception}',\n+ 'Problem in your project\\'s configuration. {exception}',\n )\n", "issue": "Confusing error message to end user\nIn https://github.com/rtfd/readthedocs.org/issues/4071#issuecomment-405939492 I realized that we are saying that we have a problem parsing the YAML file but the problem is in fact in one of the options set from the web admin dashboard.\r\n\r\nExample:\r\n\r\n\r\n\r\nThere is no `requirements_file` entry in the YAML file (https://github.com/geopandas/geopandas/blob/master/readthedocs.yml) but it exists under the `Admin -> Advanced Settings` field form.\r\n\r\nWe need to improve this error to something more user-friendly that expresses the real error. It's not an error on parsing the YAML file. The file was parsed properly, but the problem is with one of the values from one of the fields.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop('status_code', None) or self.status_code or 1\n message = message or self.get_default_message()\n super(BuildEnvironmentException, self).__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please try again later. '\n 'However, if this problem persists, '\n 'please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing YAML configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n\n\nclass MkDocsYAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing MkDocs YAML configuration. {exception}',\n )\n", "path": "readthedocs/doc_builder/exceptions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop('status_code', None) or self.status_code or 1\n message = message or self.get_default_message()\n super(BuildEnvironmentException, self).__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please try again later. '\n 'However, if this problem persists, '\n 'please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem in your project\\'s configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n\n\nclass MkDocsYAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing MkDocs YAML configuration. {exception}',\n )\n", "path": "readthedocs/doc_builder/exceptions.py"}]} | 1,031 | 105 |
gh_patches_debug_50216 | rasdani/github-patches | git_diff | pex-tool__pex-991 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.12
On the docket:
- [x] A PEX_EXTRA_SYS_PATH runtime variable #989
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.11'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.11'
+__version__ = '2.1.12'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.11'\n+__version__ = '2.1.12'\n", "issue": "Release 2.1.12\nOn the docket:\r\n- [x] A PEX_EXTRA_SYS_PATH runtime variable #989 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.11'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.12'\n", "path": "pex/version.py"}]} | 338 | 96 |
gh_patches_debug_14012 | rasdani/github-patches | git_diff | streamlink__streamlink-4355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.ard_live: Unable to parse MEDIAINFO
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
On streamlink 3.1.1 Linux:
```
[cli][info] Found matching plugin ard_live for URL https://live.daserste.de/
error: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\n<html lang="de" i ...)
```
Streamlink 2.0.0 Windows works fine. Can't find a working 2.0.0 Linux build to verify.
3.1.1 seems to expect a player url at `https://live.daserste.de/live-de-102~playerJson.json` and 2.0.0 at `https://www.daserste.de/live/live-de-102~playerJson.json`.
Is there a commandline arg to override it?
### Debug log
```text
[cli][debug] OS: Linux-5.15.2-arch1-1-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.2
[cli][debug] Streamlink: 3.1.1
[cli][debug] Requests(2.27.0), Socks(1.7.1), Websocket(1.2.3)
[cli][debug] Arguments:
[cli][debug] url=https://live.daserste.de/
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin ard_live for URL https://live.daserste.de/
[plugins.ard_live][debug] Player URL: 'https://live.daserste.de/live-de-102~playerJson.json'
error: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\n<html lang="de" i ...)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/ard_live.py`
Content:
```
1 import logging
2 import re
3 from urllib.parse import urljoin
4
5 from streamlink.plugin import Plugin, PluginError, pluginmatcher
6 from streamlink.plugin.api import validate
7 from streamlink.stream.hls import HLSStream
8 from streamlink.stream.http import HTTPStream
9
10 log = logging.getLogger(__name__)
11
12
13 @pluginmatcher(re.compile(
14 r"https?://((www|live)\.)?daserste\.de/"
15 ))
16 class ARDLive(Plugin):
17 _QUALITY_MAP = {
18 4: "1080p",
19 3: "720p",
20 2: "540p",
21 1: "270p",
22 0: "180p"
23 }
24
25 def _get_streams(self):
26 try:
27 data_url = self.session.http.get(self.url, schema=validate.Schema(
28 validate.parse_html(),
29 validate.xml_find(".//*[@data-ctrl-player]"),
30 validate.get("data-ctrl-player"),
31 validate.transform(lambda s: s.replace("'", "\"")),
32 validate.parse_json(),
33 {"url": str},
34 validate.get("url")
35 ))
36 except PluginError:
37 return
38
39 data_url = urljoin(self.url, data_url)
40 log.debug(f"Player URL: '{data_url}'")
41
42 self.title, media = self.session.http.get(data_url, schema=validate.Schema(
43 validate.parse_json(name="MEDIAINFO"),
44 {"mc": {
45 validate.optional("_title"): str,
46 "_mediaArray": [validate.all(
47 {
48 "_mediaStreamArray": [validate.all(
49 {
50 "_quality": validate.any(str, int),
51 "_stream": [validate.url()],
52 },
53 validate.union_get("_quality", ("_stream", 0))
54 )]
55 },
56 validate.get("_mediaStreamArray"),
57 validate.transform(dict)
58 )]
59 }},
60 validate.get("mc"),
61 validate.union_get("_title", ("_mediaArray", 0))
62 ))
63
64 if media.get("auto"):
65 yield from HLSStream.parse_variant_playlist(self.session, media.get("auto")).items()
66 else:
67 for quality, stream in media.items():
68 yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream)
69
70
71 __plugin__ = ARDLive
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/ard_live.py b/src/streamlink/plugins/ard_live.py
--- a/src/streamlink/plugins/ard_live.py
+++ b/src/streamlink/plugins/ard_live.py
@@ -14,6 +14,7 @@
r"https?://((www|live)\.)?daserste\.de/"
))
class ARDLive(Plugin):
+ _URL_DATA_BASE = "https://www.daserste.de/"
_QUALITY_MAP = {
4: "1080p",
3: "720p",
@@ -36,7 +37,7 @@
except PluginError:
return
- data_url = urljoin(self.url, data_url)
+ data_url = urljoin(self._URL_DATA_BASE, data_url)
log.debug(f"Player URL: '{data_url}'")
self.title, media = self.session.http.get(data_url, schema=validate.Schema(
| {"golden_diff": "diff --git a/src/streamlink/plugins/ard_live.py b/src/streamlink/plugins/ard_live.py\n--- a/src/streamlink/plugins/ard_live.py\n+++ b/src/streamlink/plugins/ard_live.py\n@@ -14,6 +14,7 @@\n r\"https?://((www|live)\\.)?daserste\\.de/\"\n ))\n class ARDLive(Plugin):\n+ _URL_DATA_BASE = \"https://www.daserste.de/\"\n _QUALITY_MAP = {\n 4: \"1080p\",\n 3: \"720p\",\n@@ -36,7 +37,7 @@\n except PluginError:\n return\n \n- data_url = urljoin(self.url, data_url)\n+ data_url = urljoin(self._URL_DATA_BASE, data_url)\n log.debug(f\"Player URL: '{data_url}'\")\n \n self.title, media = self.session.http.get(data_url, schema=validate.Schema(\n", "issue": "plugins.ard_live: Unable to parse MEDIAINFO\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nOn streamlink 3.1.1 Linux:\r\n\r\n```\r\n[cli][info] Found matching plugin ard_live for URL https://live.daserste.de/\r\nerror: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\\n<html lang=\"de\" i ...)\r\n```\r\n\r\nStreamlink 2.0.0 Windows works fine. Can't find a working 2.0.0 Linux build to verify.\r\n\r\n3.1.1 seems to expect a player url at `https://live.daserste.de/live-de-102~playerJson.json` and 2.0.0 at `https://www.daserste.de/live/live-de-102~playerJson.json`.\r\n\r\nIs there a commandline arg to override it?\n\n### Debug log\n\n```text\n[cli][debug] OS: Linux-5.15.2-arch1-1-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.2\r\n[cli][debug] Streamlink: 3.1.1\r\n[cli][debug] Requests(2.27.0), Socks(1.7.1), Websocket(1.2.3)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://live.daserste.de/\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin ard_live for URL https://live.daserste.de/\r\n[plugins.ard_live][debug] Player URL: 'https://live.daserste.de/live-de-102~playerJson.json'\r\nerror: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\\n<html lang=\"de\" i ...)\n```\n\n", "before_files": [{"content": "import logging\nimport re\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://((www|live)\\.)?daserste\\.de/\"\n))\nclass ARDLive(Plugin):\n _QUALITY_MAP = {\n 4: \"1080p\",\n 3: \"720p\",\n 2: \"540p\",\n 1: \"270p\",\n 0: \"180p\"\n }\n\n def _get_streams(self):\n try:\n data_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//*[@data-ctrl-player]\"),\n validate.get(\"data-ctrl-player\"),\n validate.transform(lambda s: s.replace(\"'\", \"\\\"\")),\n validate.parse_json(),\n {\"url\": str},\n validate.get(\"url\")\n ))\n except PluginError:\n return\n\n data_url = urljoin(self.url, data_url)\n log.debug(f\"Player URL: '{data_url}'\")\n\n self.title, media = self.session.http.get(data_url, schema=validate.Schema(\n validate.parse_json(name=\"MEDIAINFO\"),\n {\"mc\": {\n validate.optional(\"_title\"): str,\n \"_mediaArray\": [validate.all(\n {\n \"_mediaStreamArray\": [validate.all(\n {\n \"_quality\": validate.any(str, int),\n \"_stream\": [validate.url()],\n },\n validate.union_get(\"_quality\", (\"_stream\", 0))\n )]\n },\n validate.get(\"_mediaStreamArray\"),\n validate.transform(dict)\n )]\n }},\n validate.get(\"mc\"),\n validate.union_get(\"_title\", (\"_mediaArray\", 0))\n ))\n\n if media.get(\"auto\"):\n yield from HLSStream.parse_variant_playlist(self.session, media.get(\"auto\")).items()\n else:\n for quality, stream in media.items():\n yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream)\n\n\n__plugin__ = ARDLive\n", "path": "src/streamlink/plugins/ard_live.py"}], "after_files": [{"content": "import logging\nimport re\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://((www|live)\\.)?daserste\\.de/\"\n))\nclass ARDLive(Plugin):\n _URL_DATA_BASE = \"https://www.daserste.de/\"\n _QUALITY_MAP = {\n 4: \"1080p\",\n 3: \"720p\",\n 2: \"540p\",\n 1: \"270p\",\n 0: \"180p\"\n }\n\n def _get_streams(self):\n try:\n data_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//*[@data-ctrl-player]\"),\n validate.get(\"data-ctrl-player\"),\n validate.transform(lambda s: s.replace(\"'\", \"\\\"\")),\n validate.parse_json(),\n {\"url\": str},\n validate.get(\"url\")\n ))\n except PluginError:\n return\n\n data_url = urljoin(self._URL_DATA_BASE, data_url)\n log.debug(f\"Player URL: '{data_url}'\")\n\n self.title, media = self.session.http.get(data_url, schema=validate.Schema(\n validate.parse_json(name=\"MEDIAINFO\"),\n {\"mc\": {\n validate.optional(\"_title\"): str,\n \"_mediaArray\": [validate.all(\n {\n \"_mediaStreamArray\": [validate.all(\n {\n \"_quality\": validate.any(str, int),\n \"_stream\": [validate.url()],\n },\n validate.union_get(\"_quality\", (\"_stream\", 0))\n )]\n },\n validate.get(\"_mediaStreamArray\"),\n validate.transform(dict)\n )]\n }},\n validate.get(\"mc\"),\n validate.union_get(\"_title\", (\"_mediaArray\", 0))\n ))\n\n if media.get(\"auto\"):\n yield from HLSStream.parse_variant_playlist(self.session, media.get(\"auto\")).items()\n else:\n for quality, stream in media.items():\n yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream)\n\n\n__plugin__ = ARDLive\n", "path": "src/streamlink/plugins/ard_live.py"}]} | 1,444 | 207 |
gh_patches_debug_38602 | rasdani/github-patches | git_diff | streamlink__streamlink-2969 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Plugin App17 cannot fetch any stream
## Plugin Issue
<!-- Replace [ ] with [x] in order to check the box -->
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
`Stream currently unavailable.`
The output of the Streamlink is always the same. Even if the streamer is streaming online.
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
Paste any URL (e.g., `https://17.live/live/{any stream ID}`) and execute the command.
### Log output
<!--
TEXT LOG OUTPUT IS REQUIRED for a plugin issue!
Use the `--loglevel debug` parameter and avoid using parameters which suppress log output.
https://streamlink.github.io/cli.html#cmdoption-l
Make sure to **remove usernames and passwords**
You can copy the output to https://gist.github.com/ or paste it below.
-->
```
C:\>streamlink https://17.live/live/7000362 -l trace
[22:44:25,408][cli][debug] OS: Windows 10
[22:44:25,409][cli][debug] Python: 3.6.6
[22:44:25,410][cli][debug] Streamlink: 1.4.1
[22:44:25,410][cli][debug] Requests(2.23.0), Socks(1.7.1), Websocket(0.57.0)
[22:44:25,410][cli][info] Found matching plugin app17 for URL https://17.live/live/7000362
[22:44:25,735][plugin.app17][info] Stream currently unavailable.
error: No playable streams found on this URL: https://17.live/live/7000362
```
### Additional comments, screenshots, etc.
[screenshot](https://imgur.com/MFn7K0y)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/app17.py`
Content:
```
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugin.api import useragents
5 from streamlink.stream import HLSStream, RTMPStream, HTTPStream
6
7 API_URL = "https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo"
8
9 _url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)")
10 _status_re = re.compile(r'\\"closeBy\\":\\"\\"')
11 _rtmp_re = re.compile(r'\\"url\\"\s*:\s*\\"(.+?)\\"')
12
13
14 class App17(Plugin):
15 @classmethod
16 def can_handle_url(cls, url):
17 return _url_re.match(url)
18
19 def _get_streams(self):
20 match = _url_re.match(self.url)
21 channel = match.group("channel")
22
23 self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})
24
25 payload = '{"liveStreamID": "%s"}' % (channel)
26 res = self.session.http.post(API_URL, data=payload)
27 status = _status_re.search(res.text)
28 if not status:
29 self.logger.info("Stream currently unavailable.")
30 return
31
32 http_url = _rtmp_re.search(res.text).group(1)
33 https_url = http_url.replace("http:", "https:")
34 yield "live", HTTPStream(self.session, https_url)
35
36 if 'pull-rtmp' in http_url:
37 rtmp_url = http_url.replace("http:", "rtmp:").replace(".flv", "")
38 stream = RTMPStream(self.session, {
39 "rtmp": rtmp_url,
40 "live": True,
41 "pageUrl": self.url,
42 })
43 yield "live", stream
44
45 if 'wansu-' in http_url:
46 hls_url = http_url.replace(".flv", "/playlist.m3u8")
47 else:
48 hls_url = http_url.replace("live-hdl", "live-hls").replace(".flv", ".m3u8")
49
50 s = []
51 for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
52 yield s
53 if not s:
54 yield "live", HLSStream(self.session, hls_url)
55
56
57 __plugin__ = App17
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/app17.py b/src/streamlink/plugins/app17.py
--- a/src/streamlink/plugins/app17.py
+++ b/src/streamlink/plugins/app17.py
@@ -1,35 +1,49 @@
+import logging
import re
from streamlink.plugin import Plugin
-from streamlink.plugin.api import useragents
+from streamlink.plugin.api import useragents, validate
from streamlink.stream import HLSStream, RTMPStream, HTTPStream
-API_URL = "https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo"
-
-_url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)")
-_status_re = re.compile(r'\\"closeBy\\":\\"\\"')
-_rtmp_re = re.compile(r'\\"url\\"\s*:\s*\\"(.+?)\\"')
+log = logging.getLogger(__name__)
class App17(Plugin):
+ _url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)")
+ API_URL = "https://api-dsa.17app.co/api/v1/lives/{0}/viewers/alive"
+
+ _api_schema = validate.Schema(
+ {
+ "rtmpUrls": [{
+ validate.optional("provider"): validate.any(int, None),
+ "url": validate.url(),
+ }],
+ },
+ validate.get("rtmpUrls"),
+ )
+
@classmethod
def can_handle_url(cls, url):
- return _url_re.match(url)
+ return cls._url_re.match(url) is not None
def _get_streams(self):
- match = _url_re.match(self.url)
+ match = self._url_re.match(self.url)
channel = match.group("channel")
self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})
- payload = '{"liveStreamID": "%s"}' % (channel)
- res = self.session.http.post(API_URL, data=payload)
- status = _status_re.search(res.text)
- if not status:
- self.logger.info("Stream currently unavailable.")
+ data = '{"liveStreamID":"%s"}' % (channel)
+
+ try:
+ res = self.session.http.post(self.API_URL.format(channel), data=data)
+ res_json = self.session.http.json(res, schema=self._api_schema)
+ log.trace("{0!r}".format(res_json))
+ http_url = res_json[0]["url"]
+ except Exception as e:
+ log.info("Stream currently unavailable.")
+ log.debug(str(e))
return
- http_url = _rtmp_re.search(res.text).group(1)
https_url = http_url.replace("http:", "https:")
yield "live", HTTPStream(self.session, https_url)
@@ -47,11 +61,16 @@
else:
hls_url = http_url.replace("live-hdl", "live-hls").replace(".flv", ".m3u8")
- s = []
- for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
- yield s
+ s = HLSStream.parse_variant_playlist(self.session, hls_url)
if not s:
yield "live", HLSStream(self.session, hls_url)
+ else:
+ if len(s) == 1:
+ for _n, _s in s.items():
+ yield "live", _s
+ else:
+ for _s in s.items():
+ yield _s
__plugin__ = App17
| {"golden_diff": "diff --git a/src/streamlink/plugins/app17.py b/src/streamlink/plugins/app17.py\n--- a/src/streamlink/plugins/app17.py\n+++ b/src/streamlink/plugins/app17.py\n@@ -1,35 +1,49 @@\n+import logging\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.plugin.api import useragents\n+from streamlink.plugin.api import useragents, validate\n from streamlink.stream import HLSStream, RTMPStream, HTTPStream\n \n-API_URL = \"https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo\"\n-\n-_url_re = re.compile(r\"https://17.live/live/(?P<channel>[^/&?]+)\")\n-_status_re = re.compile(r'\\\\\"closeBy\\\\\":\\\\\"\\\\\"')\n-_rtmp_re = re.compile(r'\\\\\"url\\\\\"\\s*:\\s*\\\\\"(.+?)\\\\\"')\n+log = logging.getLogger(__name__)\n \n \n class App17(Plugin):\n+ _url_re = re.compile(r\"https://17.live/live/(?P<channel>[^/&?]+)\")\n+ API_URL = \"https://api-dsa.17app.co/api/v1/lives/{0}/viewers/alive\"\n+\n+ _api_schema = validate.Schema(\n+ {\n+ \"rtmpUrls\": [{\n+ validate.optional(\"provider\"): validate.any(int, None),\n+ \"url\": validate.url(),\n+ }],\n+ },\n+ validate.get(\"rtmpUrls\"),\n+ )\n+\n @classmethod\n def can_handle_url(cls, url):\n- return _url_re.match(url)\n+ return cls._url_re.match(url) is not None\n \n def _get_streams(self):\n- match = _url_re.match(self.url)\n+ match = self._url_re.match(self.url)\n channel = match.group(\"channel\")\n \n self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})\n \n- payload = '{\"liveStreamID\": \"%s\"}' % (channel)\n- res = self.session.http.post(API_URL, data=payload)\n- status = _status_re.search(res.text)\n- if not status:\n- self.logger.info(\"Stream currently unavailable.\")\n+ data = '{\"liveStreamID\":\"%s\"}' % (channel)\n+\n+ try:\n+ res = self.session.http.post(self.API_URL.format(channel), data=data)\n+ res_json = self.session.http.json(res, schema=self._api_schema)\n+ log.trace(\"{0!r}\".format(res_json))\n+ http_url = res_json[0][\"url\"]\n+ except Exception as e:\n+ log.info(\"Stream currently unavailable.\")\n+ log.debug(str(e))\n return\n \n- http_url = _rtmp_re.search(res.text).group(1)\n https_url = http_url.replace(\"http:\", \"https:\")\n yield \"live\", HTTPStream(self.session, https_url)\n \n@@ -47,11 +61,16 @@\n else:\n hls_url = http_url.replace(\"live-hdl\", \"live-hls\").replace(\".flv\", \".m3u8\")\n \n- s = []\n- for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n- yield s\n+ s = HLSStream.parse_variant_playlist(self.session, hls_url)\n if not s:\n yield \"live\", HLSStream(self.session, hls_url)\n+ else:\n+ if len(s) == 1:\n+ for _n, _s in s.items():\n+ yield \"live\", _s\n+ else:\n+ for _s in s.items():\n+ yield _s\n \n \n __plugin__ = App17\n", "issue": "Plugin App17 cannot fetch any stream\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\n`Stream currently unavailable.`\r\nThe output of the Streamlink is always the same. Even if the streamer is streaming online.\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\nPaste any URL (e.g., `https://17.live/live/{any stream ID}`) and execute the command.\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n-->\r\n\r\n```\r\nC:\\>streamlink https://17.live/live/7000362 -l trace\r\n[22:44:25,408][cli][debug] OS: Windows 10\r\n[22:44:25,409][cli][debug] Python: 3.6.6\r\n[22:44:25,410][cli][debug] Streamlink: 1.4.1\r\n[22:44:25,410][cli][debug] Requests(2.23.0), Socks(1.7.1), Websocket(0.57.0)\r\n[22:44:25,410][cli][info] Found matching plugin app17 for URL https://17.live/live/7000362\r\n[22:44:25,735][plugin.app17][info] Stream currently unavailable.\r\nerror: No playable streams found on this URL: https://17.live/live/7000362\r\n```\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n[screenshot](https://imgur.com/MFn7K0y)\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import HLSStream, RTMPStream, HTTPStream\n\nAPI_URL = \"https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo\"\n\n_url_re = re.compile(r\"https://17.live/live/(?P<channel>[^/&?]+)\")\n_status_re = re.compile(r'\\\\\"closeBy\\\\\":\\\\\"\\\\\"')\n_rtmp_re = re.compile(r'\\\\\"url\\\\\"\\s*:\\s*\\\\\"(.+?)\\\\\"')\n\n\nclass App17(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n channel = match.group(\"channel\")\n\n self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})\n\n payload = '{\"liveStreamID\": \"%s\"}' % (channel)\n res = self.session.http.post(API_URL, data=payload)\n status = _status_re.search(res.text)\n if not status:\n self.logger.info(\"Stream currently unavailable.\")\n return\n\n http_url = _rtmp_re.search(res.text).group(1)\n https_url = http_url.replace(\"http:\", \"https:\")\n yield \"live\", HTTPStream(self.session, https_url)\n\n if 'pull-rtmp' in http_url:\n rtmp_url = http_url.replace(\"http:\", \"rtmp:\").replace(\".flv\", \"\")\n stream = RTMPStream(self.session, {\n \"rtmp\": rtmp_url,\n \"live\": True,\n \"pageUrl\": self.url,\n })\n yield \"live\", stream\n\n if 'wansu-' in http_url:\n hls_url = http_url.replace(\".flv\", \"/playlist.m3u8\")\n else:\n hls_url = http_url.replace(\"live-hdl\", \"live-hls\").replace(\".flv\", \".m3u8\")\n\n s = []\n for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n yield s\n if not s:\n yield \"live\", HLSStream(self.session, hls_url)\n\n\n__plugin__ = App17\n", "path": "src/streamlink/plugins/app17.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream import HLSStream, RTMPStream, HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\nclass App17(Plugin):\n _url_re = re.compile(r\"https://17.live/live/(?P<channel>[^/&?]+)\")\n API_URL = \"https://api-dsa.17app.co/api/v1/lives/{0}/viewers/alive\"\n\n _api_schema = validate.Schema(\n {\n \"rtmpUrls\": [{\n validate.optional(\"provider\"): validate.any(int, None),\n \"url\": validate.url(),\n }],\n },\n validate.get(\"rtmpUrls\"),\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n match = self._url_re.match(self.url)\n channel = match.group(\"channel\")\n\n self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})\n\n data = '{\"liveStreamID\":\"%s\"}' % (channel)\n\n try:\n res = self.session.http.post(self.API_URL.format(channel), data=data)\n res_json = self.session.http.json(res, schema=self._api_schema)\n log.trace(\"{0!r}\".format(res_json))\n http_url = res_json[0][\"url\"]\n except Exception as e:\n log.info(\"Stream currently unavailable.\")\n log.debug(str(e))\n return\n\n https_url = http_url.replace(\"http:\", \"https:\")\n yield \"live\", HTTPStream(self.session, https_url)\n\n if 'pull-rtmp' in http_url:\n rtmp_url = http_url.replace(\"http:\", \"rtmp:\").replace(\".flv\", \"\")\n stream = RTMPStream(self.session, {\n \"rtmp\": rtmp_url,\n \"live\": True,\n \"pageUrl\": self.url,\n })\n yield \"live\", stream\n\n if 'wansu-' in http_url:\n hls_url = http_url.replace(\".flv\", \"/playlist.m3u8\")\n else:\n hls_url = http_url.replace(\"live-hdl\", \"live-hls\").replace(\".flv\", \".m3u8\")\n\n s = HLSStream.parse_variant_playlist(self.session, hls_url)\n if not s:\n yield \"live\", HLSStream(self.session, hls_url)\n else:\n if len(s) == 1:\n for _n, _s in s.items():\n yield \"live\", _s\n else:\n for _s in s.items():\n yield _s\n\n\n__plugin__ = App17\n", "path": "src/streamlink/plugins/app17.py"}]} | 1,357 | 808 |
gh_patches_debug_66700 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-10076 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Synthesis failed for recommender
Hello! Autosynth couldn't regenerate recommender. :broken_heart:
Here's the output from running `synth.py`:
```
Cloning into 'working_repo'...
Switched to branch 'autosynth-recommender'
Running synthtool
['/tmpfs/src/git/autosynth/env/bin/python3', '-m', 'synthtool', 'synth.py', '--']
synthtool > Executing /tmpfs/src/git/autosynth/working_repo/recommender/synth.py.
synthtool > Ensuring dependencies.
synthtool > Pulling artman image.
latest: Pulling from googleapis/artman
Digest: sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57
Status: Image is up to date for googleapis/artman:latest
synthtool > Cloning googleapis.
Traceback (most recent call last):
File "/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py", line 87, in <module>
main()
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py", line 79, in main
spec.loader.exec_module(synth_module) # type: ignore
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "/tmpfs/src/git/autosynth/working_repo/recommender/synth.py", line 32, in <module>
include_protos=True
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py", line 50, in py_library
return self._generate_code(service, version, "python", **kwargs)
File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py", line 121, in _generate_code
f"Unable to find configuration yaml file: {(googleapis / config_path)}."
FileNotFoundError: Unable to find configuration yaml file: /home/kbuilder/.cache/synthtool/googleapis/google/cloud/recommender/artman_recommender_v1beta1.yaml.
synthtool > Cleaned up 1 temporary directories.
synthtool > Wrote metadata to synth.metadata.
Synthesis failed
```
Google internal developers can see the full log [here](https://sponge/74dee1a3-0367-43bf-9f40-1001ae7ea243).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recommender/synth.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script is used to synthesize generated parts of this library."""
16 import re
17
18 import synthtool as s
19 from synthtool import gcp
20
21 gapic = gcp.GAPICGenerator()
22 versions = ["v1beta1"]
23 common = gcp.CommonTemplates()
24
25
26 # ----------------------------------------------------------------------------
27 # Generate Cloud Recommender
28 # ----------------------------------------------------------------------------
29 for version in versions:
30 library = gapic.py_library(
31 "recommender", version,
32 include_protos=True
33 )
34 s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])
35
36 # ----------------------------------------------------------------------------
37 # Add templated files
38 # ----------------------------------------------------------------------------
39 templated_files = common.py_library(unit_cov_level=97, cov_level=100)
40 s.move(templated_files, excludes=['noxfile.py'])
41
42 s.shell.run(["nox", "-s", "blacken"], hide_output=False)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recommender/synth.py b/recommender/synth.py
--- a/recommender/synth.py
+++ b/recommender/synth.py
@@ -29,7 +29,8 @@
for version in versions:
library = gapic.py_library(
"recommender", version,
- include_protos=True
+ include_protos=True,
+ config_path="/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml"
)
s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])
| {"golden_diff": "diff --git a/recommender/synth.py b/recommender/synth.py\n--- a/recommender/synth.py\n+++ b/recommender/synth.py\n@@ -29,7 +29,8 @@\n for version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n- include_protos=True\n+ include_protos=True,\n+ config_path=\"/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml\"\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n", "issue": "Synthesis failed for recommender\nHello! Autosynth couldn't regenerate recommender. :broken_heart:\n\nHere's the output from running `synth.py`:\n\n```\nCloning into 'working_repo'...\nSwitched to branch 'autosynth-recommender'\nRunning synthtool\n['/tmpfs/src/git/autosynth/env/bin/python3', '-m', 'synthtool', 'synth.py', '--']\nsynthtool > Executing /tmpfs/src/git/autosynth/working_repo/recommender/synth.py.\nsynthtool > Ensuring dependencies.\nsynthtool > Pulling artman image.\nlatest: Pulling from googleapis/artman\nDigest: sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57\nStatus: Image is up to date for googleapis/artman:latest\nsynthtool > Cloning googleapis.\nTraceback (most recent call last):\n File \"/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py\", line 87, in <module>\n main()\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 764, in __call__\n return self.main(*args, **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 717, in main\n rv = self.invoke(ctx)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 555, in invoke\n return callback(*args, **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py\", line 79, in main\n spec.loader.exec_module(synth_module) # type: ignore\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\n File \"<frozen importlib._bootstrap>\", line 205, in _call_with_frames_removed\n File \"/tmpfs/src/git/autosynth/working_repo/recommender/synth.py\", line 32, in <module>\n include_protos=True\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py\", line 50, in py_library\n return self._generate_code(service, version, \"python\", **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py\", line 121, in _generate_code\n f\"Unable to find configuration yaml file: {(googleapis / config_path)}.\"\nFileNotFoundError: Unable to find configuration yaml file: /home/kbuilder/.cache/synthtool/googleapis/google/cloud/recommender/artman_recommender_v1beta1.yaml.\nsynthtool > Cleaned up 1 temporary directories.\nsynthtool > Wrote metadata to synth.metadata.\n\nSynthesis failed\n\n```\n\nGoogle internal developers can see the full log [here](https://sponge/74dee1a3-0367-43bf-9f40-1001ae7ea243).\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\nimport re\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\nversions = [\"v1beta1\"]\ncommon = gcp.CommonTemplates()\n\n\n# ----------------------------------------------------------------------------\n# Generate Cloud Recommender\n# ----------------------------------------------------------------------------\nfor version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n include_protos=True\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=97, cov_level=100)\ns.move(templated_files, excludes=['noxfile.py'])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False) ", "path": "recommender/synth.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\nimport re\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\nversions = [\"v1beta1\"]\ncommon = gcp.CommonTemplates()\n\n\n# ----------------------------------------------------------------------------\n# Generate Cloud Recommender\n# ----------------------------------------------------------------------------\nfor version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n include_protos=True,\n config_path=\"/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml\"\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=97, cov_level=100)\ns.move(templated_files, excludes=['noxfile.py'])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False) ", "path": "recommender/synth.py"}]} | 1,536 | 135 |
gh_patches_debug_16252 | rasdani/github-patches | git_diff | fedora-infra__bodhi-2097 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bodhi-check-policies only operates on unpushed updates
I noticed today when working on #1514 that the update query that ```bodhi-check-policies``` performs only selects [unpushed updates](https://github.com/fedora-infra/bodhi/blob/3.1.0/bodhi/server/scripts/check_policies.py#L40-L42):
```
updates = models.Update.query.filter(models.Update.pushed == false())\
.filter(models.Update.status.in_(
[models.UpdateStatus.pending, models.UpdateStatus.testing]))
```
I think this is probably not what we want, as [testing updates are marked pushed](https://github.com/fedora-infra/bodhi/blob/3b655f2/bodhi/server/consumers/masher.py#L845-L857). This means we will only consult Greenwave about pending updates.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/scripts/check_policies.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright © 2017 Red Hat, Inc.
3 #
4 # This file is part of Bodhi.
5 #
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19
20 """
21 Check the enforced policies by Greenwave for each open update.
22
23 Ideally, this should be done in a fedmsg consumer but we currently do not have any
24 messages in the message bus yet.
25 """
26 import click
27 from sqlalchemy.sql.expression import false
28
29 from bodhi.server import config, initialize_db, models, Session
30
31
32 @click.command()
33 @click.version_option(message='%(version)s')
34 def check():
35 """Check the enforced policies by Greenwave for each open update."""
36 initialize_db(config.config)
37 session = Session()
38
39 updates = models.Update.query.filter(models.Update.pushed == false())\
40 .filter(models.Update.status.in_(
41 [models.UpdateStatus.pending, models.UpdateStatus.testing]))
42 for update in updates:
43 try:
44 update.update_test_gating_status()
45 session.commit()
46 except Exception as e:
47 # If there is a problem talking to Greenwave server, print the error.
48 click.echo(str(e))
49 session.rollback()
50
51
52 if __name__ == '__main__':
53 check()
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/scripts/check_policies.py b/bodhi/server/scripts/check_policies.py
--- a/bodhi/server/scripts/check_policies.py
+++ b/bodhi/server/scripts/check_policies.py
@@ -24,7 +24,6 @@
messages in the message bus yet.
"""
import click
-from sqlalchemy.sql.expression import false
from bodhi.server import config, initialize_db, models, Session
@@ -36,9 +35,8 @@
initialize_db(config.config)
session = Session()
- updates = models.Update.query.filter(models.Update.pushed == false())\
- .filter(models.Update.status.in_(
- [models.UpdateStatus.pending, models.UpdateStatus.testing]))
+ updates = models.Update.query.filter(models.Update.status.in_(
+ [models.UpdateStatus.pending, models.UpdateStatus.testing]))
for update in updates:
try:
update.update_test_gating_status()
| {"golden_diff": "diff --git a/bodhi/server/scripts/check_policies.py b/bodhi/server/scripts/check_policies.py\n--- a/bodhi/server/scripts/check_policies.py\n+++ b/bodhi/server/scripts/check_policies.py\n@@ -24,7 +24,6 @@\n messages in the message bus yet.\n \"\"\"\n import click\n-from sqlalchemy.sql.expression import false\n \n from bodhi.server import config, initialize_db, models, Session\n \n@@ -36,9 +35,8 @@\n initialize_db(config.config)\n session = Session()\n \n- updates = models.Update.query.filter(models.Update.pushed == false())\\\n- .filter(models.Update.status.in_(\n- [models.UpdateStatus.pending, models.UpdateStatus.testing]))\n+ updates = models.Update.query.filter(models.Update.status.in_(\n+ [models.UpdateStatus.pending, models.UpdateStatus.testing]))\n for update in updates:\n try:\n update.update_test_gating_status()\n", "issue": "bodhi-check-policies only operates on unpushed updates\nI noticed today when working on #1514 that the update query that ```bodhi-check-policies``` performs only selects [unpushed updates](https://github.com/fedora-infra/bodhi/blob/3.1.0/bodhi/server/scripts/check_policies.py#L40-L42):\r\n\r\n```\r\nupdates = models.Update.query.filter(models.Update.pushed == false())\\\r\n .filter(models.Update.status.in_(\r\n [models.UpdateStatus.pending, models.UpdateStatus.testing]))\r\n```\r\n\r\nI think this is probably not what we want, as [testing updates are marked pushed](https://github.com/fedora-infra/bodhi/blob/3b655f2/bodhi/server/consumers/masher.py#L845-L857). This means we will only consult Greenwave about pending updates.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\"\"\"\nCheck the enforced policies by Greenwave for each open update.\n\nIdeally, this should be done in a fedmsg consumer but we currently do not have any\nmessages in the message bus yet.\n\"\"\"\nimport click\nfrom sqlalchemy.sql.expression import false\n\nfrom bodhi.server import config, initialize_db, models, Session\n\n\[email protected]()\[email protected]_option(message='%(version)s')\ndef check():\n \"\"\"Check the enforced policies by Greenwave for each open update.\"\"\"\n initialize_db(config.config)\n session = Session()\n\n updates = models.Update.query.filter(models.Update.pushed == false())\\\n .filter(models.Update.status.in_(\n [models.UpdateStatus.pending, models.UpdateStatus.testing]))\n for update in updates:\n try:\n update.update_test_gating_status()\n session.commit()\n except Exception as e:\n # If there is a problem talking to Greenwave server, print the error.\n click.echo(str(e))\n session.rollback()\n\n\nif __name__ == '__main__':\n check()\n", "path": "bodhi/server/scripts/check_policies.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\"\"\"\nCheck the enforced policies by Greenwave for each open update.\n\nIdeally, this should be done in a fedmsg consumer but we currently do not have any\nmessages in the message bus yet.\n\"\"\"\nimport click\n\nfrom bodhi.server import config, initialize_db, models, Session\n\n\[email protected]()\[email protected]_option(message='%(version)s')\ndef check():\n \"\"\"Check the enforced policies by Greenwave for each open update.\"\"\"\n initialize_db(config.config)\n session = Session()\n\n updates = models.Update.query.filter(models.Update.status.in_(\n [models.UpdateStatus.pending, models.UpdateStatus.testing]))\n for update in updates:\n try:\n update.update_test_gating_status()\n session.commit()\n except Exception as e:\n # If there is a problem talking to Greenwave server, print the error.\n click.echo(str(e))\n session.rollback()\n\n\nif __name__ == '__main__':\n check()\n", "path": "bodhi/server/scripts/check_policies.py"}]} | 970 | 199 |
gh_patches_debug_26464 | rasdani/github-patches | git_diff | geopandas__geopandas-2418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
wrong country code ISO_3 for France in naturalearth_lowres dataset
Not sure this is the right place to mention it, but code iso_3 for France should be 'FRA' not -99.
```
import geopandas
path = geopandas.datasets.get_path('naturalearth_lowres')
world = geopandas.read_file(path)
world[world['name']=='France'].iso_a3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/datasets/naturalearth_creation.py`
Content:
```
1 """
2 Script that generates the included dataset 'naturalearth_lowres.shp'.
3
4 Raw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/
5 Current version used: version 4.1.0
6 """ # noqa (E501 link is longer than max line length)
7
8 import geopandas as gpd
9
10 # assumes zipfile from naturalearthdata was downloaded to current directory
11 world_raw = gpd.read_file("zip://./ne_110m_admin_0_countries.zip")
12 # subsets columns of interest for geopandas examples
13 world_df = world_raw[
14 ["POP_EST", "CONTINENT", "NAME", "ISO_A3", "GDP_MD_EST", "geometry"]
15 ]
16 world_df.columns = world_df.columns.str.lower()
17 world_df.to_file(
18 driver="ESRI Shapefile", filename="./naturalearth_lowres/naturalearth_lowres.shp"
19 )
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geopandas/datasets/naturalearth_creation.py b/geopandas/datasets/naturalearth_creation.py
--- a/geopandas/datasets/naturalearth_creation.py
+++ b/geopandas/datasets/naturalearth_creation.py
@@ -1,19 +1,29 @@
"""
Script that generates the included dataset 'naturalearth_lowres.shp'.
-Raw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/
-Current version used: version 4.1.0
+Raw data: https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/cultural/ne_110m_admin_0_countries.zip
+Current version used: version 5.0.1
""" # noqa (E501 link is longer than max line length)
import geopandas as gpd
# assumes zipfile from naturalearthdata was downloaded to current directory
world_raw = gpd.read_file("zip://./ne_110m_admin_0_countries.zip")
+
+# not ideal - fix some country codes
+mask = world_raw["ISO_A3"].eq("-99") & world_raw["TYPE"].isin(
+ ["Sovereign country", "Country"]
+)
+world_raw.loc[mask, "ISO_A3"] = world_raw.loc[mask, "ADM0_A3"]
+
# subsets columns of interest for geopandas examples
world_df = world_raw[
- ["POP_EST", "CONTINENT", "NAME", "ISO_A3", "GDP_MD_EST", "geometry"]
-]
+ ["POP_EST", "CONTINENT", "NAME", "ISO_A3", "GDP_MD", "geometry"]
+].rename(
+ columns={"GDP_MD": "GDP_MD_EST"}
+) # column has changed name...
world_df.columns = world_df.columns.str.lower()
+
world_df.to_file(
driver="ESRI Shapefile", filename="./naturalearth_lowres/naturalearth_lowres.shp"
)
| {"golden_diff": "diff --git a/geopandas/datasets/naturalearth_creation.py b/geopandas/datasets/naturalearth_creation.py\n--- a/geopandas/datasets/naturalearth_creation.py\n+++ b/geopandas/datasets/naturalearth_creation.py\n@@ -1,19 +1,29 @@\n \"\"\"\n Script that generates the included dataset 'naturalearth_lowres.shp'.\n \n-Raw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/\n-Current version used: version 4.1.0\n+Raw data: https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/cultural/ne_110m_admin_0_countries.zip\n+Current version used: version 5.0.1\n \"\"\" # noqa (E501 link is longer than max line length)\n \n import geopandas as gpd\n \n # assumes zipfile from naturalearthdata was downloaded to current directory\n world_raw = gpd.read_file(\"zip://./ne_110m_admin_0_countries.zip\")\n+\n+# not ideal - fix some country codes\n+mask = world_raw[\"ISO_A3\"].eq(\"-99\") & world_raw[\"TYPE\"].isin(\n+ [\"Sovereign country\", \"Country\"]\n+)\n+world_raw.loc[mask, \"ISO_A3\"] = world_raw.loc[mask, \"ADM0_A3\"]\n+\n # subsets columns of interest for geopandas examples\n world_df = world_raw[\n- [\"POP_EST\", \"CONTINENT\", \"NAME\", \"ISO_A3\", \"GDP_MD_EST\", \"geometry\"]\n-]\n+ [\"POP_EST\", \"CONTINENT\", \"NAME\", \"ISO_A3\", \"GDP_MD\", \"geometry\"]\n+].rename(\n+ columns={\"GDP_MD\": \"GDP_MD_EST\"}\n+) # column has changed name...\n world_df.columns = world_df.columns.str.lower()\n+\n world_df.to_file(\n driver=\"ESRI Shapefile\", filename=\"./naturalearth_lowres/naturalearth_lowres.shp\"\n )\n", "issue": "wrong country code ISO_3 for France in naturalearth_lowres dataset\nNot sure this is the right place to mention it, but code iso_3 for France should be 'FRA' not -99.\r\n\r\n```\r\n import geopandas\r\n path = geopandas.datasets.get_path('naturalearth_lowres')\r\n world = geopandas.read_file(path)\r\n world[world['name']=='France'].iso_a3\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nScript that generates the included dataset 'naturalearth_lowres.shp'.\n\nRaw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/\nCurrent version used: version 4.1.0\n\"\"\" # noqa (E501 link is longer than max line length)\n\nimport geopandas as gpd\n\n# assumes zipfile from naturalearthdata was downloaded to current directory\nworld_raw = gpd.read_file(\"zip://./ne_110m_admin_0_countries.zip\")\n# subsets columns of interest for geopandas examples\nworld_df = world_raw[\n [\"POP_EST\", \"CONTINENT\", \"NAME\", \"ISO_A3\", \"GDP_MD_EST\", \"geometry\"]\n]\nworld_df.columns = world_df.columns.str.lower()\nworld_df.to_file(\n driver=\"ESRI Shapefile\", filename=\"./naturalearth_lowres/naturalearth_lowres.shp\"\n)\n", "path": "geopandas/datasets/naturalearth_creation.py"}], "after_files": [{"content": "\"\"\"\nScript that generates the included dataset 'naturalearth_lowres.shp'.\n\nRaw data: https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/cultural/ne_110m_admin_0_countries.zip\nCurrent version used: version 5.0.1\n\"\"\" # noqa (E501 link is longer than max line length)\n\nimport geopandas as gpd\n\n# assumes zipfile from naturalearthdata was downloaded to current directory\nworld_raw = gpd.read_file(\"zip://./ne_110m_admin_0_countries.zip\")\n\n# not ideal - fix some country codes\nmask = world_raw[\"ISO_A3\"].eq(\"-99\") & world_raw[\"TYPE\"].isin(\n [\"Sovereign country\", \"Country\"]\n)\nworld_raw.loc[mask, \"ISO_A3\"] = world_raw.loc[mask, \"ADM0_A3\"]\n\n# subsets columns of interest for geopandas examples\nworld_df = world_raw[\n [\"POP_EST\", \"CONTINENT\", \"NAME\", \"ISO_A3\", \"GDP_MD\", \"geometry\"]\n].rename(\n columns={\"GDP_MD\": \"GDP_MD_EST\"}\n) # column has changed name...\nworld_df.columns = world_df.columns.str.lower()\n\nworld_df.to_file(\n driver=\"ESRI Shapefile\", filename=\"./naturalearth_lowres/naturalearth_lowres.shp\"\n)\n", "path": "geopandas/datasets/naturalearth_creation.py"}]} | 605 | 461 |
gh_patches_debug_18801 | rasdani/github-patches | git_diff | TheAlgorithms__Python-338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug report for radix sort
**Description**
if the test case for [`radix_sort.py`](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py) is `[104, 203, 308, 401]`, the result would be `[401, 203, 104, 308]`
It's wrong!
The reason is that if the `tmp` is always `0` in one loop, it will exit the loop. In other words, If the same digit of all numbers is 0, then the result may be wrong. The similar example like:
*Input*: `[2018, 33017, 24016]`
*Output*: `[24016, 33017, 2018]`
Wrong again!!
**Suggestion**
Do not use `maxLength` as a loop variable because the value of `maxLength` is related to `tmp`.
I think that by finding the maximum value of the array and assigning it to `max_digit`, using another variable `digit` with an initial value of 1 as the loop variable, each loop `digit` is multiplied by 10, and exit the loops when the `digit` greater than `max_digit`, which can guarantee the correct number of loops.
And the complexity will be O(nk + n) . n is the size of input list and k is the digit length of the number.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sorts/radix_sort.py`
Content:
```
1 def radixsort(lst):
2 RADIX = 10
3 maxLength = False
4 tmp , placement = -1, 1
5
6 while not maxLength:
7 maxLength = True
8 # declare and initialize buckets
9 buckets = [list() for _ in range( RADIX )]
10
11 # split lst between lists
12 for i in lst:
13 tmp = int((i / placement) % RADIX)
14 buckets[tmp].append(i)
15
16 if maxLength and tmp > 0:
17 maxLength = False
18
19 # empty lists into lst array
20 a = 0
21 for b in range( RADIX ):
22 buck = buckets[b]
23 for i in buck:
24 lst[a] = i
25 a += 1
26
27 # move to next
28 placement *= RADIX
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py
--- a/sorts/radix_sort.py
+++ b/sorts/radix_sort.py
@@ -1,28 +1,26 @@
def radixsort(lst):
- RADIX = 10
- maxLength = False
- tmp , placement = -1, 1
+ RADIX = 10
+ placement = 1
- while not maxLength:
- maxLength = True
- # declare and initialize buckets
- buckets = [list() for _ in range( RADIX )]
+ # get the maximum number
+ max_digit = max(lst)
- # split lst between lists
- for i in lst:
- tmp = int((i / placement) % RADIX)
- buckets[tmp].append(i)
+ while placement < max_digit:
+ # declare and initialize buckets
+ buckets = [list() for _ in range( RADIX )]
- if maxLength and tmp > 0:
- maxLength = False
+ # split lst between lists
+ for i in lst:
+ tmp = int((i / placement) % RADIX)
+ buckets[tmp].append(i)
- # empty lists into lst array
- a = 0
- for b in range( RADIX ):
- buck = buckets[b]
- for i in buck:
- lst[a] = i
- a += 1
+ # empty lists into lst array
+ a = 0
+ for b in range( RADIX ):
+ buck = buckets[b]
+ for i in buck:
+ lst[a] = i
+ a += 1
- # move to next
- placement *= RADIX
+ # move to next
+ placement *= RADIX
| {"golden_diff": "diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py\n--- a/sorts/radix_sort.py\n+++ b/sorts/radix_sort.py\n@@ -1,28 +1,26 @@\n def radixsort(lst):\n- RADIX = 10\n- maxLength = False\n- tmp , placement = -1, 1\n+ RADIX = 10\n+ placement = 1\n \n- while not maxLength:\n- maxLength = True\n- # declare and initialize buckets\n- buckets = [list() for _ in range( RADIX )]\n+ # get the maximum number\n+ max_digit = max(lst)\n \n- # split lst between lists\n- for i in lst:\n- tmp = int((i / placement) % RADIX)\n- buckets[tmp].append(i)\n+ while placement < max_digit:\n+ # declare and initialize buckets\n+ buckets = [list() for _ in range( RADIX )]\n \n- if maxLength and tmp > 0:\n- maxLength = False\n+ # split lst between lists\n+ for i in lst:\n+ tmp = int((i / placement) % RADIX)\n+ buckets[tmp].append(i)\n \n- # empty lists into lst array\n- a = 0\n- for b in range( RADIX ):\n- buck = buckets[b]\n- for i in buck:\n- lst[a] = i\n- a += 1\n+ # empty lists into lst array\n+ a = 0\n+ for b in range( RADIX ):\n+ buck = buckets[b]\n+ for i in buck:\n+ lst[a] = i\n+ a += 1\n \n- # move to next\n- placement *= RADIX\n+ # move to next\n+ placement *= RADIX\n", "issue": "Bug report for radix sort\n**Description**\r\n\r\nif the test case for [`radix_sort.py`](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py) is `[104, 203, 308, 401]`, the result would be `[401, 203, 104, 308]`\r\n\r\nIt's wrong!\r\n\r\nThe reason is that if the `tmp` is always `0` in one loop, it will exit the loop. In other words, If the same digit of all numbers is 0, then the result may be wrong. The similar example like:\r\n*Input*: `[2018, 33017, 24016]`\r\n*Output*: `[24016, 33017, 2018]`\r\nWrong again!!\r\n\r\n**Suggestion**\r\n\r\nDo not use `maxLength` as a loop variable because the value of `maxLength` is related to `tmp`.\r\n\r\nI think that by finding the maximum value of the array and assigning it to `max_digit`, using another variable `digit` with an initial value of 1 as the loop variable, each loop `digit` is multiplied by 10, and exit the loops when the `digit` greater than `max_digit`, which can guarantee the correct number of loops.\r\n\r\nAnd the complexity will be O(nk + n) . n is the size of input list and k is the digit length of the number.\n", "before_files": [{"content": "def radixsort(lst):\n RADIX = 10\n maxLength = False\n tmp , placement = -1, 1\n\n while not maxLength:\n maxLength = True\n # declare and initialize buckets\n buckets = [list() for _ in range( RADIX )]\n\n # split lst between lists\n for i in lst:\n tmp = int((i / placement) % RADIX)\n buckets[tmp].append(i)\n\n if maxLength and tmp > 0:\n maxLength = False\n\n # empty lists into lst array\n a = 0\n for b in range( RADIX ):\n buck = buckets[b]\n for i in buck:\n lst[a] = i\n a += 1\n\n # move to next\n placement *= RADIX\n", "path": "sorts/radix_sort.py"}], "after_files": [{"content": "def radixsort(lst):\n RADIX = 10\n placement = 1\n\n # get the maximum number\n max_digit = max(lst)\n\n while placement < max_digit:\n # declare and initialize buckets\n buckets = [list() for _ in range( RADIX )]\n\n # split lst between lists\n for i in lst:\n tmp = int((i / placement) % RADIX)\n buckets[tmp].append(i)\n\n # empty lists into lst array\n a = 0\n for b in range( RADIX ):\n buck = buckets[b]\n for i in buck:\n lst[a] = i\n a += 1\n\n # move to next\n placement *= RADIX\n", "path": "sorts/radix_sort.py"}]} | 810 | 413 |
gh_patches_debug_26262 | rasdani/github-patches | git_diff | conan-io__conan-2927 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version("6") < Version("6.0") == True
Version model has a bug.
Version("6") is considered lower than "6.0". It introduced a bug (fixed in #2885) with the default`std`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/model/version.py`
Content:
```
1 import re
2
3
4 class Version(str):
5 """ This is NOT an implementation of semver, as users may use any pattern in their versions.
6 It is just a helper to parse .-, and compare taking into account integers when possible
7 """
8 version_pattern = re.compile('[.-]')
9
10 def __new__(cls, content):
11 return str.__new__(cls, content.strip())
12
13 @property
14 def as_list(self):
15 if not hasattr(self, "_cached_list"):
16 tokens = self.rsplit('+', 1)
17 self._base = tokens[0]
18 if len(tokens) == 2:
19 self._build = tokens[1]
20 self._cached_list = []
21 tokens = Version.version_pattern.split(tokens[0])
22 for item in tokens:
23 self._cached_list.append(int(item) if item.isdigit() else item)
24 return self._cached_list
25
26 def major(self, fill=True):
27 self_list = self.as_list
28 if not isinstance(self_list[0], int):
29 return self._base
30 v = str(self_list[0]) if self_list else "0"
31 if fill:
32 return Version(".".join([v, 'Y', 'Z']))
33 return Version(v)
34
35 def stable(self):
36 """ same as major, but as semver, 0.Y.Z is not considered
37 stable, so return it as is
38 """
39 if self.as_list[0] == 0:
40 return self
41 return self.major()
42
43 def minor(self, fill=True):
44 self_list = self.as_list
45 if not isinstance(self_list[0], int):
46 return self._base
47 v0 = str(self_list[0]) if len(self_list) > 0 else "0"
48 v1 = str(self_list[1]) if len(self_list) > 1 else "0"
49 if fill:
50 return Version(".".join([v0, v1, 'Z']))
51 return Version(".".join([v0, v1]))
52
53 def patch(self):
54 self_list = self.as_list
55 if not isinstance(self_list[0], int):
56 return self._base
57 v0 = str(self_list[0]) if len(self_list) > 0 else "0"
58 v1 = str(self_list[1]) if len(self_list) > 1 else "0"
59 v2 = str(self_list[2]) if len(self_list) > 2 else "0"
60 return Version(".".join([v0, v1, v2]))
61
62 def pre(self):
63 self_list = self.as_list
64 if not isinstance(self_list[0], int):
65 return self._base
66 v0 = str(self_list[0]) if len(self_list) > 0 else "0"
67 v1 = str(self_list[1]) if len(self_list) > 1 else "0"
68 v2 = str(self_list[2]) if len(self_list) > 2 else "0"
69 v = ".".join([v0, v1, v2])
70 if len(self_list) > 3:
71 v += "-%s" % self_list[3]
72 return Version(v)
73
74 @property
75 def build(self):
76 if hasattr(self, "_build"):
77 return self._build
78 return ""
79
80 @property
81 def base(self):
82 self.as_list
83 return Version(self._base)
84
85 def compatible(self, other):
86 if not isinstance(other, Version):
87 other = Version(other)
88 for v1, v2 in zip(self.as_list, other.as_list):
89 if v1 in ["X", "Y", "Z"] or v2 in ["X", "Y", "Z"]:
90 return True
91 if v1 != v2:
92 return False
93 return True
94
95 def __cmp__(self, other):
96 if other is None:
97 return 1
98 if not isinstance(other, Version):
99 other = Version(other)
100
101 other_list = other.as_list
102 for ind, el in enumerate(self.as_list):
103 if ind + 1 > len(other_list):
104 if isinstance(el, int):
105 return 1
106 return -1
107 if not isinstance(el, int) and isinstance(other_list[ind], int):
108 # Version compare with 1.4.rc2
109 return -1
110 elif not isinstance(other_list[ind], int) and isinstance(el, int):
111 return 1
112 elif el == other_list[ind]:
113 continue
114 elif el > other_list[ind]:
115 return 1
116 else:
117 return -1
118 if len(other_list) > len(self.as_list):
119 return -1
120 else:
121 return 0
122
123 def __gt__(self, other):
124 return self.__cmp__(other) == 1
125
126 def __lt__(self, other):
127 return self.__cmp__(other) == -1
128
129 def __le__(self, other):
130 return self.__cmp__(other) in [0, -1]
131
132 def __ge__(self, other):
133 return self.__cmp__(other) in [0, 1]
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/model/version.py b/conans/model/version.py
--- a/conans/model/version.py
+++ b/conans/model/version.py
@@ -98,6 +98,18 @@
if not isinstance(other, Version):
other = Version(other)
+ # Check equals
+ def get_el(a_list, index):
+ if len(a_list) - 1 < index:
+ return 0 # out of range, 4 == 4.0 == 4.0.0
+ return a_list[index]
+
+ equals = all(get_el(other.as_list, ind) == get_el(self.as_list, ind)
+ for ind in range(0, max(len(other.as_list), len(self.as_list))))
+ if equals:
+ return 0
+
+ # Check greater than or less than
other_list = other.as_list
for ind, el in enumerate(self.as_list):
if ind + 1 > len(other_list):
@@ -117,8 +129,6 @@
return -1
if len(other_list) > len(self.as_list):
return -1
- else:
- return 0
def __gt__(self, other):
return self.__cmp__(other) == 1
@@ -131,3 +141,12 @@
def __ge__(self, other):
return self.__cmp__(other) in [0, 1]
+
+ def __eq__(self, other):
+ return self.__cmp__(other) == 0
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return str.__hash__(self)
| {"golden_diff": "diff --git a/conans/model/version.py b/conans/model/version.py\n--- a/conans/model/version.py\n+++ b/conans/model/version.py\n@@ -98,6 +98,18 @@\n if not isinstance(other, Version):\n other = Version(other)\n \n+ # Check equals\n+ def get_el(a_list, index):\n+ if len(a_list) - 1 < index:\n+ return 0 # out of range, 4 == 4.0 == 4.0.0\n+ return a_list[index]\n+\n+ equals = all(get_el(other.as_list, ind) == get_el(self.as_list, ind)\n+ for ind in range(0, max(len(other.as_list), len(self.as_list))))\n+ if equals:\n+ return 0\n+\n+ # Check greater than or less than\n other_list = other.as_list\n for ind, el in enumerate(self.as_list):\n if ind + 1 > len(other_list):\n@@ -117,8 +129,6 @@\n return -1\n if len(other_list) > len(self.as_list):\n return -1\n- else:\n- return 0\n \n def __gt__(self, other):\n return self.__cmp__(other) == 1\n@@ -131,3 +141,12 @@\n \n def __ge__(self, other):\n return self.__cmp__(other) in [0, 1]\n+\n+ def __eq__(self, other):\n+ return self.__cmp__(other) == 0\n+\n+ def __ne__(self, other):\n+ return not self.__eq__(other)\n+\n+ def __hash__(self):\n+ return str.__hash__(self)\n", "issue": "Version(\"6\") < Version(\"6.0\") == True \nVersion model has a bug.\r\nVersion(\"6\") is considered lower than \"6.0\". It introduced a bug (fixed in #2885) with the default`std`.\r\n\n", "before_files": [{"content": "import re\n\n\nclass Version(str):\n \"\"\" This is NOT an implementation of semver, as users may use any pattern in their versions.\n It is just a helper to parse .-, and compare taking into account integers when possible\n \"\"\"\n version_pattern = re.compile('[.-]')\n\n def __new__(cls, content):\n return str.__new__(cls, content.strip())\n\n @property\n def as_list(self):\n if not hasattr(self, \"_cached_list\"):\n tokens = self.rsplit('+', 1)\n self._base = tokens[0]\n if len(tokens) == 2:\n self._build = tokens[1]\n self._cached_list = []\n tokens = Version.version_pattern.split(tokens[0])\n for item in tokens:\n self._cached_list.append(int(item) if item.isdigit() else item)\n return self._cached_list\n\n def major(self, fill=True):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v = str(self_list[0]) if self_list else \"0\"\n if fill:\n return Version(\".\".join([v, 'Y', 'Z']))\n return Version(v)\n\n def stable(self):\n \"\"\" same as major, but as semver, 0.Y.Z is not considered\n stable, so return it as is\n \"\"\"\n if self.as_list[0] == 0:\n return self\n return self.major()\n\n def minor(self, fill=True):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n if fill:\n return Version(\".\".join([v0, v1, 'Z']))\n return Version(\".\".join([v0, v1]))\n\n def patch(self):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n v2 = str(self_list[2]) if len(self_list) > 2 else \"0\"\n return Version(\".\".join([v0, v1, v2]))\n\n def pre(self):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n v2 = str(self_list[2]) if len(self_list) > 2 else \"0\"\n v = \".\".join([v0, v1, v2])\n if len(self_list) > 3:\n v += \"-%s\" % self_list[3]\n return Version(v)\n\n @property\n def build(self):\n if hasattr(self, \"_build\"):\n return self._build\n return \"\"\n\n @property\n def base(self):\n self.as_list\n return Version(self._base)\n\n def compatible(self, other):\n if not isinstance(other, Version):\n other = Version(other)\n for v1, v2 in zip(self.as_list, other.as_list):\n if v1 in [\"X\", \"Y\", \"Z\"] or v2 in [\"X\", \"Y\", \"Z\"]:\n return True\n if v1 != v2:\n return False\n return True\n\n def __cmp__(self, other):\n if other is None:\n return 1\n if not isinstance(other, Version):\n other = Version(other)\n\n other_list = other.as_list\n for ind, el in enumerate(self.as_list):\n if ind + 1 > len(other_list):\n if isinstance(el, int):\n return 1\n return -1\n if not isinstance(el, int) and isinstance(other_list[ind], int):\n # Version compare with 1.4.rc2\n return -1\n elif not isinstance(other_list[ind], int) and isinstance(el, int):\n return 1\n elif el == other_list[ind]:\n continue\n elif el > other_list[ind]:\n return 1\n else:\n return -1\n if len(other_list) > len(self.as_list):\n return -1\n else:\n return 0\n\n def __gt__(self, other):\n return self.__cmp__(other) == 1\n\n def __lt__(self, other):\n return self.__cmp__(other) == -1\n\n def __le__(self, other):\n return self.__cmp__(other) in [0, -1]\n\n def __ge__(self, other):\n return self.__cmp__(other) in [0, 1]\n", "path": "conans/model/version.py"}], "after_files": [{"content": "import re\n\n\nclass Version(str):\n \"\"\" This is NOT an implementation of semver, as users may use any pattern in their versions.\n It is just a helper to parse .-, and compare taking into account integers when possible\n \"\"\"\n version_pattern = re.compile('[.-]')\n\n def __new__(cls, content):\n return str.__new__(cls, content.strip())\n\n @property\n def as_list(self):\n if not hasattr(self, \"_cached_list\"):\n tokens = self.rsplit('+', 1)\n self._base = tokens[0]\n if len(tokens) == 2:\n self._build = tokens[1]\n self._cached_list = []\n tokens = Version.version_pattern.split(tokens[0])\n for item in tokens:\n self._cached_list.append(int(item) if item.isdigit() else item)\n return self._cached_list\n\n def major(self, fill=True):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v = str(self_list[0]) if self_list else \"0\"\n if fill:\n return Version(\".\".join([v, 'Y', 'Z']))\n return Version(v)\n\n def stable(self):\n \"\"\" same as major, but as semver, 0.Y.Z is not considered\n stable, so return it as is\n \"\"\"\n if self.as_list[0] == 0:\n return self\n return self.major()\n\n def minor(self, fill=True):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n if fill:\n return Version(\".\".join([v0, v1, 'Z']))\n return Version(\".\".join([v0, v1]))\n\n def patch(self):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n v2 = str(self_list[2]) if len(self_list) > 2 else \"0\"\n return Version(\".\".join([v0, v1, v2]))\n\n def pre(self):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n v2 = str(self_list[2]) if len(self_list) > 2 else \"0\"\n v = \".\".join([v0, v1, v2])\n if len(self_list) > 3:\n v += \"-%s\" % self_list[3]\n return Version(v)\n\n @property\n def build(self):\n if hasattr(self, \"_build\"):\n return self._build\n return \"\"\n\n @property\n def base(self):\n self.as_list\n return Version(self._base)\n\n def compatible(self, other):\n if not isinstance(other, Version):\n other = Version(other)\n for v1, v2 in zip(self.as_list, other.as_list):\n if v1 in [\"X\", \"Y\", \"Z\"] or v2 in [\"X\", \"Y\", \"Z\"]:\n return True\n if v1 != v2:\n return False\n return True\n\n def __cmp__(self, other):\n if other is None:\n return 1\n if not isinstance(other, Version):\n other = Version(other)\n\n # Check equals\n def get_el(a_list, index):\n if len(a_list) - 1 < index:\n return 0 # out of range, 4 == 4.0 == 4.0.0\n return a_list[index]\n\n equals = all(get_el(other.as_list, ind) == get_el(self.as_list, ind)\n for ind in range(0, max(len(other.as_list), len(self.as_list))))\n if equals:\n return 0\n\n # Check greater than or less than\n other_list = other.as_list\n for ind, el in enumerate(self.as_list):\n if ind + 1 > len(other_list):\n if isinstance(el, int):\n return 1\n return -1\n if not isinstance(el, int) and isinstance(other_list[ind], int):\n # Version compare with 1.4.rc2\n return -1\n elif not isinstance(other_list[ind], int) and isinstance(el, int):\n return 1\n elif el == other_list[ind]:\n continue\n elif el > other_list[ind]:\n return 1\n else:\n return -1\n if len(other_list) > len(self.as_list):\n return -1\n\n def __gt__(self, other):\n return self.__cmp__(other) == 1\n\n def __lt__(self, other):\n return self.__cmp__(other) == -1\n\n def __le__(self, other):\n return self.__cmp__(other) in [0, -1]\n\n def __ge__(self, other):\n return self.__cmp__(other) in [0, 1]\n\n def __eq__(self, other):\n return self.__cmp__(other) == 0\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return str.__hash__(self)\n", "path": "conans/model/version.py"}]} | 1,713 | 387 |
gh_patches_debug_25246 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-2734 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError in _wrap_get_create
### Which version of dd-trace-py are you using? 0.50.3
### Which version of pip are you using? 20.3.3
### Which version of the libraries are you using? `dogpile.cache==0.7.1`
### How can we reproduce your problem?
Install redis if needed (on macOS):
```
brew install redis
brew services start redis
```
Create a dogpile region cache:
```
from dogpile.cache import make_region
region = make_region("foo",
key_mangler=lambda key: '{}:{}:{}'.format("foo", "env", key)) \
.configure('dogpile.cache.redis', arguments=dict(url="redis://localhost:6379")
```
Call `get_or_create` using only kwargs:
```
return token_cache.get_or_create(
key="cache_key", creator=lambda: make_value(foo, bar, buzz))
```
Invoke your script via ddtrace-run:
```
ddtrace-run python3 test_ddtrace_and_dogpile.py
```
### What is the result that you get?
```
[ERROR] IndexError: tuple index out of range
Traceback (most recent call last):
File "/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py", line 54, in __call__
return self.func(*args, **kwargs)
File "/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py", line 124, in __call__
self.response = self.func(event, context, **kwargs)
File "/var/task/handler.py", line 105, in execute_task
executor = executor_getter(event)
File "/var/task/handler.py", line 43, in get_step_executor
executor = ETLStepExecutor(Worker.lambda_instance.value, **get_parameters_for_inference_pipeline(event))
File "/var/lang/lib/python3.8/site-packages/sondes/containers/executors/base.py", line 24, in __init__
client = get_third_party_client(kwargs["third_party_module_name"])(
File "/var/task/caiso/client.py", line 17, in __init__
self.api = get_caiso_api_client(
File "/var/task/caiso/utility/caiso_api_client.py", line 40, in get_caiso_api_client
token = get_login_token(service_url, username, password, worker, environment, cache_host)
File "/var/task/caiso/utility/caiso_api_client.py", line 32, in get_login_token
return token_cache.get_or_create(
File "/var/lang/lib/python3.8/site-packages/ddtrace/contrib/dogpile_cache/region.py", line 14, in _wrap_get_create
key = args[0]
```
### What is the result that you expected?
No error
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/dogpile_cache/region.py`
Content:
```
1 import dogpile
2
3 from ddtrace.ext import SpanTypes
4
5 from ...constants import SPAN_MEASURED_KEY
6 from ...pin import Pin
7
8
9 def _wrap_get_create(func, instance, args, kwargs):
10 pin = Pin.get_from(dogpile.cache)
11 if not pin or not pin.enabled():
12 return func(*args, **kwargs)
13
14 key = args[0]
15 with pin.tracer.trace("dogpile.cache", resource="get_or_create", span_type=SpanTypes.CACHE) as span:
16 span.set_tag(SPAN_MEASURED_KEY)
17 span.set_tag("key", key)
18 span.set_tag("region", instance.name)
19 span.set_tag("backend", instance.actual_backend.__class__.__name__)
20 return func(*args, **kwargs)
21
22
23 def _wrap_get_create_multi(func, instance, args, kwargs):
24 pin = Pin.get_from(dogpile.cache)
25 if not pin or not pin.enabled():
26 return func(*args, **kwargs)
27
28 keys = args[0]
29 with pin.tracer.trace("dogpile.cache", resource="get_or_create_multi", span_type="cache") as span:
30 span.set_tag(SPAN_MEASURED_KEY)
31 span.set_tag("keys", keys)
32 span.set_tag("region", instance.name)
33 span.set_tag("backend", instance.actual_backend.__class__.__name__)
34 return func(*args, **kwargs)
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/dogpile_cache/region.py b/ddtrace/contrib/dogpile_cache/region.py
--- a/ddtrace/contrib/dogpile_cache/region.py
+++ b/ddtrace/contrib/dogpile_cache/region.py
@@ -4,6 +4,7 @@
from ...constants import SPAN_MEASURED_KEY
from ...pin import Pin
+from ...utils import get_argument_value
def _wrap_get_create(func, instance, args, kwargs):
@@ -11,7 +12,7 @@
if not pin or not pin.enabled():
return func(*args, **kwargs)
- key = args[0]
+ key = get_argument_value(args, kwargs, 0, "key")
with pin.tracer.trace("dogpile.cache", resource="get_or_create", span_type=SpanTypes.CACHE) as span:
span.set_tag(SPAN_MEASURED_KEY)
span.set_tag("key", key)
@@ -25,7 +26,7 @@
if not pin or not pin.enabled():
return func(*args, **kwargs)
- keys = args[0]
+ keys = get_argument_value(args, kwargs, 0, "keys")
with pin.tracer.trace("dogpile.cache", resource="get_or_create_multi", span_type="cache") as span:
span.set_tag(SPAN_MEASURED_KEY)
span.set_tag("keys", keys)
| {"golden_diff": "diff --git a/ddtrace/contrib/dogpile_cache/region.py b/ddtrace/contrib/dogpile_cache/region.py\n--- a/ddtrace/contrib/dogpile_cache/region.py\n+++ b/ddtrace/contrib/dogpile_cache/region.py\n@@ -4,6 +4,7 @@\n \n from ...constants import SPAN_MEASURED_KEY\n from ...pin import Pin\n+from ...utils import get_argument_value\n \n \n def _wrap_get_create(func, instance, args, kwargs):\n@@ -11,7 +12,7 @@\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n \n- key = args[0]\n+ key = get_argument_value(args, kwargs, 0, \"key\")\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create\", span_type=SpanTypes.CACHE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"key\", key)\n@@ -25,7 +26,7 @@\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n \n- keys = args[0]\n+ keys = get_argument_value(args, kwargs, 0, \"keys\")\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create_multi\", span_type=\"cache\") as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"keys\", keys)\n", "issue": "IndexError in _wrap_get_create\n### Which version of dd-trace-py are you using? 0.50.3\r\n\r\n### Which version of pip are you using? 20.3.3\r\n\r\n### Which version of the libraries are you using? `dogpile.cache==0.7.1`\r\n\r\n### How can we reproduce your problem?\r\n\r\nInstall redis if needed (on macOS):\r\n```\r\nbrew install redis\r\nbrew services start redis\r\n```\r\n\r\nCreate a dogpile region cache:\r\n```\r\nfrom dogpile.cache import make_region\r\nregion = make_region(\"foo\",\r\n key_mangler=lambda key: '{}:{}:{}'.format(\"foo\", \"env\", key)) \\\r\n .configure('dogpile.cache.redis', arguments=dict(url=\"redis://localhost:6379\")\r\n```\r\n\r\nCall `get_or_create` using only kwargs:\r\n\r\n```\r\n return token_cache.get_or_create(\r\n key=\"cache_key\", creator=lambda: make_value(foo, bar, buzz))\r\n\r\n```\r\n\r\nInvoke your script via ddtrace-run:\r\n\r\n```\r\nddtrace-run python3 test_ddtrace_and_dogpile.py\r\n```\r\n\r\n### What is the result that you get?\r\n\r\n```\r\n[ERROR] IndexError: tuple index out of range\r\nTraceback (most recent call last):\r\n File \"/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py\", line 54, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py\", line 124, in __call__\r\n self.response = self.func(event, context, **kwargs)\r\n File \"/var/task/handler.py\", line 105, in execute_task\r\n executor = executor_getter(event)\r\n File \"/var/task/handler.py\", line 43, in get_step_executor\r\n executor = ETLStepExecutor(Worker.lambda_instance.value, **get_parameters_for_inference_pipeline(event))\r\n File \"/var/lang/lib/python3.8/site-packages/sondes/containers/executors/base.py\", line 24, in __init__\r\n client = get_third_party_client(kwargs[\"third_party_module_name\"])(\r\n File \"/var/task/caiso/client.py\", line 17, in __init__\r\n self.api = get_caiso_api_client(\r\n File \"/var/task/caiso/utility/caiso_api_client.py\", line 40, in get_caiso_api_client\r\n token = get_login_token(service_url, username, password, worker, environment, cache_host)\r\n File \"/var/task/caiso/utility/caiso_api_client.py\", line 32, in get_login_token\r\n return token_cache.get_or_create(\r\n File \"/var/lang/lib/python3.8/site-packages/ddtrace/contrib/dogpile_cache/region.py\", line 14, in _wrap_get_create\r\n key = args[0]\r\n```\r\n\r\n### What is the result that you expected?\r\nNo error \n", "before_files": [{"content": "import dogpile\n\nfrom ddtrace.ext import SpanTypes\n\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...pin import Pin\n\n\ndef _wrap_get_create(func, instance, args, kwargs):\n pin = Pin.get_from(dogpile.cache)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n key = args[0]\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create\", span_type=SpanTypes.CACHE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"key\", key)\n span.set_tag(\"region\", instance.name)\n span.set_tag(\"backend\", instance.actual_backend.__class__.__name__)\n return func(*args, **kwargs)\n\n\ndef _wrap_get_create_multi(func, instance, args, kwargs):\n pin = Pin.get_from(dogpile.cache)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n keys = args[0]\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create_multi\", span_type=\"cache\") as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"keys\", keys)\n span.set_tag(\"region\", instance.name)\n span.set_tag(\"backend\", instance.actual_backend.__class__.__name__)\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/dogpile_cache/region.py"}], "after_files": [{"content": "import dogpile\n\nfrom ddtrace.ext import SpanTypes\n\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...pin import Pin\nfrom ...utils import get_argument_value\n\n\ndef _wrap_get_create(func, instance, args, kwargs):\n pin = Pin.get_from(dogpile.cache)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n key = get_argument_value(args, kwargs, 0, \"key\")\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create\", span_type=SpanTypes.CACHE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"key\", key)\n span.set_tag(\"region\", instance.name)\n span.set_tag(\"backend\", instance.actual_backend.__class__.__name__)\n return func(*args, **kwargs)\n\n\ndef _wrap_get_create_multi(func, instance, args, kwargs):\n pin = Pin.get_from(dogpile.cache)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n keys = get_argument_value(args, kwargs, 0, \"keys\")\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create_multi\", span_type=\"cache\") as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"keys\", keys)\n span.set_tag(\"region\", instance.name)\n span.set_tag(\"backend\", instance.actual_backend.__class__.__name__)\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/dogpile_cache/region.py"}]} | 1,250 | 313 |
gh_patches_debug_20132 | rasdani/github-patches | git_diff | ipython__ipython-6827 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/files/ only serving attachments
https://github.com/ipython/ipython/pull/6624 changes the behavior of serving html files from '/files/'.
Instead of displaying a html page, I get the option to download it as file.
Before I got as a response:
``` Python
import requests
r = requests.get('http://localhost:8888/files/Untitled10.html')
print(r.headers)
{'Last-Modified': 'Sat, 23 Aug 2014 16:03:02 GMT',
'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:14:47 GMT; Path=/',
'Content-Type': 'text/html', 'X-Frame-Options': 'SAMEORIGIN',
'Server': 'TornadoServer/4.0.2', 'Date': 'Fri, 31 Oct 2014 22:14:47 GMT',
'Content-Length': '237903', 'Accept-Ranges': 'bytes'}
```
Now I get
``` Python
r = requests.get('http://localhost:8888/files/Untitled10.html')
print(r.headers)
{'Date': 'Fri, 31 Oct 2014 22:17:27 GMT',
'Transfer-Encoding': 'chunked', 'X-Frame-Options': 'SAMEORIGIN',
'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:17:27 GMT; Path=/',
'Server': 'TornadoServer/4.0.2', 'Content-Type': 'text/html',
'Content-Disposition': 'attachment; filename="Untitled10.html"'}
```
Is this an intended new behavior ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/html/files/handlers.py`
Content:
```
1 """Serve files directly from the ContentsManager."""
2
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 import os
7 import mimetypes
8 import json
9 import base64
10
11 from tornado import web
12
13 from IPython.html.base.handlers import IPythonHandler
14
15 class FilesHandler(IPythonHandler):
16 """serve files via ContentsManager"""
17
18 @web.authenticated
19 def get(self, path):
20 cm = self.settings['contents_manager']
21 if cm.is_hidden(path):
22 self.log.info("Refusing to serve hidden file, via 404 Error")
23 raise web.HTTPError(404)
24
25 path, name = os.path.split(path)
26 model = cm.get_model(name, path)
27
28 if model['type'] == 'notebook':
29 self.set_header('Content-Type', 'application/json')
30 else:
31 cur_mime = mimetypes.guess_type(name)[0]
32 if cur_mime is not None:
33 self.set_header('Content-Type', cur_mime)
34
35 self.set_header('Content-Disposition','attachment; filename="%s"' % name)
36
37 if model['format'] == 'base64':
38 b64_bytes = model['content'].encode('ascii')
39 self.write(base64.decodestring(b64_bytes))
40 elif model['format'] == 'json':
41 self.write(json.dumps(model['content']))
42 else:
43 self.write(model['content'])
44 self.flush()
45
46 default_handlers = [
47 (r"/files/(.*)", FilesHandler),
48 ]
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/html/files/handlers.py b/IPython/html/files/handlers.py
--- a/IPython/html/files/handlers.py
+++ b/IPython/html/files/handlers.py
@@ -24,7 +24,10 @@
path, name = os.path.split(path)
model = cm.get_model(name, path)
-
+
+ if self.get_argument("download", False):
+ self.set_header('Content-Disposition','attachment; filename="%s"' % name)
+
if model['type'] == 'notebook':
self.set_header('Content-Type', 'application/json')
else:
@@ -32,8 +35,6 @@
if cur_mime is not None:
self.set_header('Content-Type', cur_mime)
- self.set_header('Content-Disposition','attachment; filename="%s"' % name)
-
if model['format'] == 'base64':
b64_bytes = model['content'].encode('ascii')
self.write(base64.decodestring(b64_bytes))
| {"golden_diff": "diff --git a/IPython/html/files/handlers.py b/IPython/html/files/handlers.py\n--- a/IPython/html/files/handlers.py\n+++ b/IPython/html/files/handlers.py\n@@ -24,7 +24,10 @@\n \n path, name = os.path.split(path)\n model = cm.get_model(name, path)\n-\n+ \n+ if self.get_argument(\"download\", False):\n+ self.set_header('Content-Disposition','attachment; filename=\"%s\"' % name)\n+ \n if model['type'] == 'notebook':\n self.set_header('Content-Type', 'application/json')\n else:\n@@ -32,8 +35,6 @@\n if cur_mime is not None:\n self.set_header('Content-Type', cur_mime)\n \n- self.set_header('Content-Disposition','attachment; filename=\"%s\"' % name)\n-\n if model['format'] == 'base64':\n b64_bytes = model['content'].encode('ascii')\n self.write(base64.decodestring(b64_bytes))\n", "issue": "/files/ only serving attachments\nhttps://github.com/ipython/ipython/pull/6624 changes the behavior of serving html files from '/files/'.\nInstead of displaying a html page, I get the option to download it as file.\n\nBefore I got as a response:\n\n``` Python\nimport requests\nr = requests.get('http://localhost:8888/files/Untitled10.html')\nprint(r.headers)\n\n{'Last-Modified': 'Sat, 23 Aug 2014 16:03:02 GMT', \n'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:14:47 GMT; Path=/',\n'Content-Type': 'text/html', 'X-Frame-Options': 'SAMEORIGIN', \n'Server': 'TornadoServer/4.0.2', 'Date': 'Fri, 31 Oct 2014 22:14:47 GMT', \n'Content-Length': '237903', 'Accept-Ranges': 'bytes'}\n```\n\nNow I get\n\n``` Python\nr = requests.get('http://localhost:8888/files/Untitled10.html')\nprint(r.headers)\n\n{'Date': 'Fri, 31 Oct 2014 22:17:27 GMT', \n'Transfer-Encoding': 'chunked', 'X-Frame-Options': 'SAMEORIGIN', \n'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:17:27 GMT; Path=/',\n 'Server': 'TornadoServer/4.0.2', 'Content-Type': 'text/html', \n'Content-Disposition': 'attachment; filename=\"Untitled10.html\"'}\n```\n\nIs this an intended new behavior ?\n\n", "before_files": [{"content": "\"\"\"Serve files directly from the ContentsManager.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nimport mimetypes\nimport json\nimport base64\n\nfrom tornado import web\n\nfrom IPython.html.base.handlers import IPythonHandler\n\nclass FilesHandler(IPythonHandler):\n \"\"\"serve files via ContentsManager\"\"\"\n\n @web.authenticated\n def get(self, path):\n cm = self.settings['contents_manager']\n if cm.is_hidden(path):\n self.log.info(\"Refusing to serve hidden file, via 404 Error\")\n raise web.HTTPError(404)\n\n path, name = os.path.split(path)\n model = cm.get_model(name, path)\n\n if model['type'] == 'notebook':\n self.set_header('Content-Type', 'application/json')\n else:\n cur_mime = mimetypes.guess_type(name)[0]\n if cur_mime is not None:\n self.set_header('Content-Type', cur_mime)\n \n self.set_header('Content-Disposition','attachment; filename=\"%s\"' % name)\n\n if model['format'] == 'base64':\n b64_bytes = model['content'].encode('ascii')\n self.write(base64.decodestring(b64_bytes))\n elif model['format'] == 'json':\n self.write(json.dumps(model['content']))\n else:\n self.write(model['content'])\n self.flush()\n\ndefault_handlers = [\n (r\"/files/(.*)\", FilesHandler),\n]", "path": "IPython/html/files/handlers.py"}], "after_files": [{"content": "\"\"\"Serve files directly from the ContentsManager.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nimport mimetypes\nimport json\nimport base64\n\nfrom tornado import web\n\nfrom IPython.html.base.handlers import IPythonHandler\n\nclass FilesHandler(IPythonHandler):\n \"\"\"serve files via ContentsManager\"\"\"\n\n @web.authenticated\n def get(self, path):\n cm = self.settings['contents_manager']\n if cm.is_hidden(path):\n self.log.info(\"Refusing to serve hidden file, via 404 Error\")\n raise web.HTTPError(404)\n\n path, name = os.path.split(path)\n model = cm.get_model(name, path)\n \n if self.get_argument(\"download\", False):\n self.set_header('Content-Disposition','attachment; filename=\"%s\"' % name)\n \n if model['type'] == 'notebook':\n self.set_header('Content-Type', 'application/json')\n else:\n cur_mime = mimetypes.guess_type(name)[0]\n if cur_mime is not None:\n self.set_header('Content-Type', cur_mime)\n \n if model['format'] == 'base64':\n b64_bytes = model['content'].encode('ascii')\n self.write(base64.decodestring(b64_bytes))\n elif model['format'] == 'json':\n self.write(json.dumps(model['content']))\n else:\n self.write(model['content'])\n self.flush()\n\ndefault_handlers = [\n (r\"/files/(.*)\", FilesHandler),\n]", "path": "IPython/html/files/handlers.py"}]} | 1,093 | 229 |
gh_patches_debug_7870 | rasdani/github-patches | git_diff | pytorch__examples-897 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo
https://github.com/pytorch/examples/blob/4db11160c21d0e26634ca1fcb94a73ad8d870ba7/snli/train.py#L122
`validation` instead of `valiation`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snli/train.py`
Content:
```
1 import os
2 import time
3 import glob
4
5 import torch
6 import torch.optim as O
7 import torch.nn as nn
8
9 from torchtext import data
10 from torchtext import datasets
11
12 from model import SNLIClassifier
13 from util import get_args, makedirs
14
15
16 args = get_args()
17 if torch.cuda.is_available():
18 torch.cuda.set_device(args.gpu)
19 device = torch.device('cuda:{}'.format(args.gpu))
20 else:
21 device = torch.device('cpu')
22
23 inputs = data.Field(lower=args.lower, tokenize='spacy')
24 answers = data.Field(sequential=False)
25
26 train, dev, test = datasets.SNLI.splits(inputs, answers)
27
28 inputs.build_vocab(train, dev, test)
29 if args.word_vectors:
30 if os.path.isfile(args.vector_cache):
31 inputs.vocab.vectors = torch.load(args.vector_cache)
32 else:
33 inputs.vocab.load_vectors(args.word_vectors)
34 makedirs(os.path.dirname(args.vector_cache))
35 torch.save(inputs.vocab.vectors, args.vector_cache)
36 answers.build_vocab(train)
37
38 train_iter, dev_iter, test_iter = data.BucketIterator.splits(
39 (train, dev, test), batch_size=args.batch_size, device=device)
40
41 config = args
42 config.n_embed = len(inputs.vocab)
43 config.d_out = len(answers.vocab)
44 config.n_cells = config.n_layers
45
46 # double the number of cells for bidirectional networks
47 if config.birnn:
48 config.n_cells *= 2
49
50 if args.resume_snapshot:
51 model = torch.load(args.resume_snapshot, map_location=device)
52 else:
53 model = SNLIClassifier(config)
54 if args.word_vectors:
55 model.embed.weight.data.copy_(inputs.vocab.vectors)
56 model.to(device)
57
58 criterion = nn.CrossEntropyLoss()
59 opt = O.Adam(model.parameters(), lr=args.lr)
60
61 iterations = 0
62 start = time.time()
63 best_dev_acc = -1
64 header = ' Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss Accuracy Dev/Accuracy'
65 dev_log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}'.split(','))
66 log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{},{:12.4f},{}'.split(','))
67 makedirs(args.save_path)
68 print(header)
69
70 for epoch in range(args.epochs):
71 train_iter.init_epoch()
72 n_correct, n_total = 0, 0
73 for batch_idx, batch in enumerate(train_iter):
74
75 # switch model to training mode, clear gradient accumulators
76 model.train(); opt.zero_grad()
77
78 iterations += 1
79
80 # forward pass
81 answer = model(batch)
82
83 # calculate accuracy of predictions in the current batch
84 n_correct += (torch.max(answer, 1)[1].view(batch.label.size()) == batch.label).sum().item()
85 n_total += batch.batch_size
86 train_acc = 100. * n_correct/n_total
87
88 # calculate loss of the network output with respect to training labels
89 loss = criterion(answer, batch.label)
90
91 # backpropagate and update optimizer learning rate
92 loss.backward(); opt.step()
93
94 # checkpoint model periodically
95 if iterations % args.save_every == 0:
96 snapshot_prefix = os.path.join(args.save_path, 'snapshot')
97 snapshot_path = snapshot_prefix + '_acc_{:.4f}_loss_{:.6f}_iter_{}_model.pt'.format(train_acc, loss.item(), iterations)
98 torch.save(model, snapshot_path)
99 for f in glob.glob(snapshot_prefix + '*'):
100 if f != snapshot_path:
101 os.remove(f)
102
103 # evaluate performance on validation set periodically
104 if iterations % args.dev_every == 0:
105
106 # switch model to evaluation mode
107 model.eval(); dev_iter.init_epoch()
108
109 # calculate accuracy on validation set
110 n_dev_correct, dev_loss = 0, 0
111 with torch.no_grad():
112 for dev_batch_idx, dev_batch in enumerate(dev_iter):
113 answer = model(dev_batch)
114 n_dev_correct += (torch.max(answer, 1)[1].view(dev_batch.label.size()) == dev_batch.label).sum().item()
115 dev_loss = criterion(answer, dev_batch.label)
116 dev_acc = 100. * n_dev_correct / len(dev)
117
118 print(dev_log_template.format(time.time()-start,
119 epoch, iterations, 1+batch_idx, len(train_iter),
120 100. * (1+batch_idx) / len(train_iter), loss.item(), dev_loss.item(), train_acc, dev_acc))
121
122 # update best valiation set accuracy
123 if dev_acc > best_dev_acc:
124
125 # found a model with better validation set accuracy
126
127 best_dev_acc = dev_acc
128 snapshot_prefix = os.path.join(args.save_path, 'best_snapshot')
129 snapshot_path = snapshot_prefix + '_devacc_{}_devloss_{}__iter_{}_model.pt'.format(dev_acc, dev_loss.item(), iterations)
130
131 # save model, delete previous 'best_snapshot' files
132 torch.save(model, snapshot_path)
133 for f in glob.glob(snapshot_prefix + '*'):
134 if f != snapshot_path:
135 os.remove(f)
136
137 elif iterations % args.log_every == 0:
138
139 # print progress message
140 print(log_template.format(time.time()-start,
141 epoch, iterations, 1+batch_idx, len(train_iter),
142 100. * (1+batch_idx) / len(train_iter), loss.item(), ' '*8, n_correct/n_total*100, ' '*12))
143 if args.dry_run:
144 break
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snli/train.py b/snli/train.py
--- a/snli/train.py
+++ b/snli/train.py
@@ -119,7 +119,7 @@
epoch, iterations, 1+batch_idx, len(train_iter),
100. * (1+batch_idx) / len(train_iter), loss.item(), dev_loss.item(), train_acc, dev_acc))
- # update best valiation set accuracy
+ # update best validation set accuracy
if dev_acc > best_dev_acc:
# found a model with better validation set accuracy
| {"golden_diff": "diff --git a/snli/train.py b/snli/train.py\n--- a/snli/train.py\n+++ b/snli/train.py\n@@ -119,7 +119,7 @@\n epoch, iterations, 1+batch_idx, len(train_iter),\n 100. * (1+batch_idx) / len(train_iter), loss.item(), dev_loss.item(), train_acc, dev_acc))\n \n- # update best valiation set accuracy\n+ # update best validation set accuracy\n if dev_acc > best_dev_acc:\n \n # found a model with better validation set accuracy\n", "issue": "Typo\nhttps://github.com/pytorch/examples/blob/4db11160c21d0e26634ca1fcb94a73ad8d870ba7/snli/train.py#L122\r\n\r\n`validation` instead of `valiation`\n", "before_files": [{"content": "import os\nimport time\nimport glob\n\nimport torch\nimport torch.optim as O\nimport torch.nn as nn\n\nfrom torchtext import data\nfrom torchtext import datasets\n\nfrom model import SNLIClassifier\nfrom util import get_args, makedirs\n\n\nargs = get_args()\nif torch.cuda.is_available():\n torch.cuda.set_device(args.gpu)\n device = torch.device('cuda:{}'.format(args.gpu))\nelse:\n device = torch.device('cpu')\n\ninputs = data.Field(lower=args.lower, tokenize='spacy')\nanswers = data.Field(sequential=False)\n\ntrain, dev, test = datasets.SNLI.splits(inputs, answers)\n\ninputs.build_vocab(train, dev, test)\nif args.word_vectors:\n if os.path.isfile(args.vector_cache):\n inputs.vocab.vectors = torch.load(args.vector_cache)\n else:\n inputs.vocab.load_vectors(args.word_vectors)\n makedirs(os.path.dirname(args.vector_cache))\n torch.save(inputs.vocab.vectors, args.vector_cache)\nanswers.build_vocab(train)\n\ntrain_iter, dev_iter, test_iter = data.BucketIterator.splits(\n (train, dev, test), batch_size=args.batch_size, device=device)\n\nconfig = args\nconfig.n_embed = len(inputs.vocab)\nconfig.d_out = len(answers.vocab)\nconfig.n_cells = config.n_layers\n\n# double the number of cells for bidirectional networks\nif config.birnn:\n config.n_cells *= 2\n\nif args.resume_snapshot:\n model = torch.load(args.resume_snapshot, map_location=device)\nelse:\n model = SNLIClassifier(config)\n if args.word_vectors:\n model.embed.weight.data.copy_(inputs.vocab.vectors)\n model.to(device)\n\ncriterion = nn.CrossEntropyLoss()\nopt = O.Adam(model.parameters(), lr=args.lr)\n\niterations = 0\nstart = time.time()\nbest_dev_acc = -1\nheader = ' Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss Accuracy Dev/Accuracy'\ndev_log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}'.split(','))\nlog_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{},{:12.4f},{}'.split(','))\nmakedirs(args.save_path)\nprint(header)\n\nfor epoch in range(args.epochs):\n train_iter.init_epoch()\n n_correct, n_total = 0, 0\n for batch_idx, batch in enumerate(train_iter):\n\n # switch model to training mode, clear gradient accumulators\n model.train(); opt.zero_grad()\n\n iterations += 1\n\n # forward pass\n answer = model(batch)\n\n # calculate accuracy of predictions in the current batch\n n_correct += (torch.max(answer, 1)[1].view(batch.label.size()) == batch.label).sum().item()\n n_total += batch.batch_size\n train_acc = 100. * n_correct/n_total\n\n # calculate loss of the network output with respect to training labels\n loss = criterion(answer, batch.label)\n\n # backpropagate and update optimizer learning rate\n loss.backward(); opt.step()\n\n # checkpoint model periodically\n if iterations % args.save_every == 0:\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = snapshot_prefix + '_acc_{:.4f}_loss_{:.6f}_iter_{}_model.pt'.format(train_acc, loss.item(), iterations)\n torch.save(model, snapshot_path)\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate performance on validation set periodically\n if iterations % args.dev_every == 0:\n\n # switch model to evaluation mode\n model.eval(); dev_iter.init_epoch()\n\n # calculate accuracy on validation set\n n_dev_correct, dev_loss = 0, 0\n with torch.no_grad():\n for dev_batch_idx, dev_batch in enumerate(dev_iter):\n answer = model(dev_batch)\n n_dev_correct += (torch.max(answer, 1)[1].view(dev_batch.label.size()) == dev_batch.label).sum().item()\n dev_loss = criterion(answer, dev_batch.label)\n dev_acc = 100. * n_dev_correct / len(dev)\n\n print(dev_log_template.format(time.time()-start,\n epoch, iterations, 1+batch_idx, len(train_iter),\n 100. * (1+batch_idx) / len(train_iter), loss.item(), dev_loss.item(), train_acc, dev_acc))\n\n # update best valiation set accuracy\n if dev_acc > best_dev_acc:\n\n # found a model with better validation set accuracy\n\n best_dev_acc = dev_acc\n snapshot_prefix = os.path.join(args.save_path, 'best_snapshot')\n snapshot_path = snapshot_prefix + '_devacc_{}_devloss_{}__iter_{}_model.pt'.format(dev_acc, dev_loss.item(), iterations)\n\n # save model, delete previous 'best_snapshot' files\n torch.save(model, snapshot_path)\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n elif iterations % args.log_every == 0:\n\n # print progress message\n print(log_template.format(time.time()-start,\n epoch, iterations, 1+batch_idx, len(train_iter),\n 100. * (1+batch_idx) / len(train_iter), loss.item(), ' '*8, n_correct/n_total*100, ' '*12))\n if args.dry_run:\n break\n", "path": "snli/train.py"}], "after_files": [{"content": "import os\nimport time\nimport glob\n\nimport torch\nimport torch.optim as O\nimport torch.nn as nn\n\nfrom torchtext import data\nfrom torchtext import datasets\n\nfrom model import SNLIClassifier\nfrom util import get_args, makedirs\n\n\nargs = get_args()\nif torch.cuda.is_available():\n torch.cuda.set_device(args.gpu)\n device = torch.device('cuda:{}'.format(args.gpu))\nelse:\n device = torch.device('cpu')\n\ninputs = data.Field(lower=args.lower, tokenize='spacy')\nanswers = data.Field(sequential=False)\n\ntrain, dev, test = datasets.SNLI.splits(inputs, answers)\n\ninputs.build_vocab(train, dev, test)\nif args.word_vectors:\n if os.path.isfile(args.vector_cache):\n inputs.vocab.vectors = torch.load(args.vector_cache)\n else:\n inputs.vocab.load_vectors(args.word_vectors)\n makedirs(os.path.dirname(args.vector_cache))\n torch.save(inputs.vocab.vectors, args.vector_cache)\nanswers.build_vocab(train)\n\ntrain_iter, dev_iter, test_iter = data.BucketIterator.splits(\n (train, dev, test), batch_size=args.batch_size, device=device)\n\nconfig = args\nconfig.n_embed = len(inputs.vocab)\nconfig.d_out = len(answers.vocab)\nconfig.n_cells = config.n_layers\n\n# double the number of cells for bidirectional networks\nif config.birnn:\n config.n_cells *= 2\n\nif args.resume_snapshot:\n model = torch.load(args.resume_snapshot, map_location=device)\nelse:\n model = SNLIClassifier(config)\n if args.word_vectors:\n model.embed.weight.data.copy_(inputs.vocab.vectors)\n model.to(device)\n\ncriterion = nn.CrossEntropyLoss()\nopt = O.Adam(model.parameters(), lr=args.lr)\n\niterations = 0\nstart = time.time()\nbest_dev_acc = -1\nheader = ' Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss Accuracy Dev/Accuracy'\ndev_log_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}'.split(','))\nlog_template = ' '.join('{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{},{:12.4f},{}'.split(','))\nmakedirs(args.save_path)\nprint(header)\n\nfor epoch in range(args.epochs):\n train_iter.init_epoch()\n n_correct, n_total = 0, 0\n for batch_idx, batch in enumerate(train_iter):\n\n # switch model to training mode, clear gradient accumulators\n model.train(); opt.zero_grad()\n\n iterations += 1\n\n # forward pass\n answer = model(batch)\n\n # calculate accuracy of predictions in the current batch\n n_correct += (torch.max(answer, 1)[1].view(batch.label.size()) == batch.label).sum().item()\n n_total += batch.batch_size\n train_acc = 100. * n_correct/n_total\n\n # calculate loss of the network output with respect to training labels\n loss = criterion(answer, batch.label)\n\n # backpropagate and update optimizer learning rate\n loss.backward(); opt.step()\n\n # checkpoint model periodically\n if iterations % args.save_every == 0:\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = snapshot_prefix + '_acc_{:.4f}_loss_{:.6f}_iter_{}_model.pt'.format(train_acc, loss.item(), iterations)\n torch.save(model, snapshot_path)\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate performance on validation set periodically\n if iterations % args.dev_every == 0:\n\n # switch model to evaluation mode\n model.eval(); dev_iter.init_epoch()\n\n # calculate accuracy on validation set\n n_dev_correct, dev_loss = 0, 0\n with torch.no_grad():\n for dev_batch_idx, dev_batch in enumerate(dev_iter):\n answer = model(dev_batch)\n n_dev_correct += (torch.max(answer, 1)[1].view(dev_batch.label.size()) == dev_batch.label).sum().item()\n dev_loss = criterion(answer, dev_batch.label)\n dev_acc = 100. * n_dev_correct / len(dev)\n\n print(dev_log_template.format(time.time()-start,\n epoch, iterations, 1+batch_idx, len(train_iter),\n 100. * (1+batch_idx) / len(train_iter), loss.item(), dev_loss.item(), train_acc, dev_acc))\n\n # update best validation set accuracy\n if dev_acc > best_dev_acc:\n\n # found a model with better validation set accuracy\n\n best_dev_acc = dev_acc\n snapshot_prefix = os.path.join(args.save_path, 'best_snapshot')\n snapshot_path = snapshot_prefix + '_devacc_{}_devloss_{}__iter_{}_model.pt'.format(dev_acc, dev_loss.item(), iterations)\n\n # save model, delete previous 'best_snapshot' files\n torch.save(model, snapshot_path)\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n elif iterations % args.log_every == 0:\n\n # print progress message\n print(log_template.format(time.time()-start,\n epoch, iterations, 1+batch_idx, len(train_iter),\n 100. * (1+batch_idx) / len(train_iter), loss.item(), ' '*8, n_correct/n_total*100, ' '*12))\n if args.dry_run:\n break\n", "path": "snli/train.py"}]} | 1,937 | 129 |
gh_patches_debug_33413 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1466 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarning on `pkg_resources` in Python 3.10
### Expected behaviour
ODC running in Python 3.10 shouldn't print `DeprecationWarning`s.
### Actual behaviour
When running some ODC code:
```
.../testenv/lib/python3.10/site-packages/datacube/drivers/driver_cache.py:54: DeprecationWarning: pkg_resources is deprecated as an API
from pkg_resources import iter_entry_points
```
Which is: https://github.com/opendatacube/datacube-core/blob/develop/datacube/drivers/driver_cache.py#L29
This code should be replaced with modules from the Python standard library, see: [Package Discovery and Resource Access using pkg_resources - setuptools 68.0.0.post20230621 documentation](https://setuptools.pypa.io/en/latest/pkg_resources.html) and [importlib.resources – Resources — Python 3.11.4 documentation](https://docs.python.org/3/library/importlib.resources.html#module-importlib.resources).
We've already set the minimum Python version as 3.8, and the stdlib modules exist since 3.7, so we don't need to add the external backports as dependencies.
### Environment information
* Which ``datacube --version`` are you using?
`Open Data Cube core, version 1.8.13`
* What datacube deployment/enviornment are you running against?
Local installation from conda-forge.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/drivers/driver_cache.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2023 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 import logging
6 from typing import Dict, Any, Tuple, Iterable
7
8 _LOG = logging.getLogger(__name__)
9
10
11 def load_drivers(group: str) -> Dict[str, Any]:
12 """
13 Load available drivers for a given group name.
14
15 Gracefully handles:
16
17 - Driver module not able to be imported
18 - Driver init function throwing an exception or returning None
19
20 By having driver entry_points pointing to a function, we defer loading the driver
21 module or running any code until required.
22
23 :param group: Name of the entry point group e.g. "datacube.plugins.io.read"
24
25 :returns: Dictionary String -> Driver Object
26 """
27
28 def safe_load(ep):
29 from pkg_resources import DistributionNotFound
30 # pylint: disable=broad-except,bare-except
31 try:
32 driver_init = ep.load()
33 except DistributionNotFound:
34 # This happens when entry points were marked with extra features,
35 # but extra feature were not requested for installation
36 return None
37 except Exception as e:
38 _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
39 _LOG.warning('Error was: %s', repr(e))
40 return None
41
42 try:
43 driver = driver_init()
44 except Exception:
45 _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)
46 return None
47
48 if driver is None:
49 _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)
50
51 return driver
52
53 def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
54 from pkg_resources import iter_entry_points
55 for ep in iter_entry_points(group=group, name=None):
56 driver = safe_load(ep)
57 if driver is not None:
58 yield (ep.name, driver)
59
60 return dict((name, driver) for name, driver in resolve_all(group))
61
```
Path: `docs/click_utils.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2023 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 import pkg_resources
6 from docutils.nodes import literal_block, section, title, make_id
7 from sphinx.domains import Domain
8 from docutils.parsers.rst import Directive
9 import importlib
10
11 import click
12
13
14 class ClickHelpDirective(Directive):
15 has_content = True
16 required_arguments = 1
17
18 def run(self):
19 root_cmd = self.arguments[0]
20
21 env = self.state.document.settings.env
22
23 group = find_script_callable_from_env(root_cmd, env)
24
25 return [generate_help_text(group, [root_cmd])]
26
27
28 def find_script_callable_from_env(name, env):
29 commands = env.config.click_utils_commands
30
31 module, function_name = commands[name].split(':')
32 module = importlib.import_module(module)
33 return getattr(module, function_name)
34
35
36 def find_script_callable(name):
37 return list(pkg_resources.iter_entry_points(
38 'console_scripts', name))[0].load()
39
40
41 def generate_help_text(command, prefix):
42 ctx = click.Context(command)
43 help_opts = command.get_help_option(ctx).opts
44 full_cmd = ' '.join(prefix)
45 block = section(None,
46 title(None, full_cmd),
47 ids=[make_id(full_cmd)], names=[full_cmd])
48 if help_opts:
49 h = "$ {} {}\n".format(full_cmd, help_opts[0]) + command.get_help(ctx)
50 block.append(literal_block(None, h, language='console'))
51
52 if isinstance(command, click.core.MultiCommand):
53 for c in command.list_commands(ctx):
54 c = command.resolve_command(ctx, [c])[1]
55 block.append(generate_help_text(c, prefix+[c.name]))
56
57 return block
58
59
60 def make_block(command, opt, content):
61 h = "$ {} {}\n".format(command, opt) + content
62 return section(None,
63 title(None, command),
64 literal_block(None, h, language='console'),
65 ids=[make_id(command)], names=[command])
66
67
68 class DatacubeDomain(Domain):
69 name = 'datacube'
70 label = 'Data Cube'
71 directives = {
72 'click-help': ClickHelpDirective,
73 }
74
75
76 def setup(app):
77 app.add_config_value('click_utils_commands', {}, 'html')
78
79 app.add_domain(DatacubeDomain)
80 return {
81 'parallel_read_safe': False,
82 'parallel_write_safe': False,
83 }
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py
--- a/datacube/drivers/driver_cache.py
+++ b/datacube/drivers/driver_cache.py
@@ -26,14 +26,9 @@
"""
def safe_load(ep):
- from pkg_resources import DistributionNotFound
# pylint: disable=broad-except,bare-except
try:
driver_init = ep.load()
- except DistributionNotFound:
- # This happens when entry points were marked with extra features,
- # but extra feature were not requested for installation
- return None
except Exception as e:
_LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
_LOG.warning('Error was: %s', repr(e))
@@ -51,8 +46,8 @@
return driver
def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
- from pkg_resources import iter_entry_points
- for ep in iter_entry_points(group=group, name=None):
+ from importlib.metadata import entry_points
+ for ep in entry_points(group=group):
driver = safe_load(ep)
if driver is not None:
yield (ep.name, driver)
diff --git a/docs/click_utils.py b/docs/click_utils.py
--- a/docs/click_utils.py
+++ b/docs/click_utils.py
@@ -2,7 +2,7 @@
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
-import pkg_resources
+from importlib.metadata import entry_points
from docutils.nodes import literal_block, section, title, make_id
from sphinx.domains import Domain
from docutils.parsers.rst import Directive
@@ -34,8 +34,8 @@
def find_script_callable(name):
- return list(pkg_resources.iter_entry_points(
- 'console_scripts', name))[0].load()
+ return list(entry_points(
+ group='console_scripts', name=name))[0].load()
def generate_help_text(command, prefix):
| {"golden_diff": "diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py\n--- a/datacube/drivers/driver_cache.py\n+++ b/datacube/drivers/driver_cache.py\n@@ -26,14 +26,9 @@\n \"\"\"\n \n def safe_load(ep):\n- from pkg_resources import DistributionNotFound\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n- except DistributionNotFound:\n- # This happens when entry points were marked with extra features,\n- # but extra feature were not requested for installation\n- return None\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n@@ -51,8 +46,8 @@\n return driver\n \n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n- from pkg_resources import iter_entry_points\n- for ep in iter_entry_points(group=group, name=None):\n+ from importlib.metadata import entry_points\n+ for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\ndiff --git a/docs/click_utils.py b/docs/click_utils.py\n--- a/docs/click_utils.py\n+++ b/docs/click_utils.py\n@@ -2,7 +2,7 @@\n #\n # Copyright (c) 2015-2023 ODC Contributors\n # SPDX-License-Identifier: Apache-2.0\n-import pkg_resources\n+from importlib.metadata import entry_points\n from docutils.nodes import literal_block, section, title, make_id\n from sphinx.domains import Domain\n from docutils.parsers.rst import Directive\n@@ -34,8 +34,8 @@\n \n \n def find_script_callable(name):\n- return list(pkg_resources.iter_entry_points(\n- 'console_scripts', name))[0].load()\n+ return list(entry_points(\n+ group='console_scripts', name=name))[0].load()\n \n \n def generate_help_text(command, prefix):\n", "issue": "DeprecationWarning on `pkg_resources` in Python 3.10\n### Expected behaviour\r\nODC running in Python 3.10 shouldn't print `DeprecationWarning`s.\r\n\r\n### Actual behaviour\r\n\r\nWhen running some ODC code:\r\n```\r\n .../testenv/lib/python3.10/site-packages/datacube/drivers/driver_cache.py:54: DeprecationWarning: pkg_resources is deprecated as an API\r\n from pkg_resources import iter_entry_points\r\n```\r\nWhich is: https://github.com/opendatacube/datacube-core/blob/develop/datacube/drivers/driver_cache.py#L29\r\n\r\nThis code should be replaced with modules from the Python standard library, see: [Package Discovery and Resource Access using pkg_resources - setuptools 68.0.0.post20230621 documentation](https://setuptools.pypa.io/en/latest/pkg_resources.html) and [importlib.resources \u2013 Resources \u2014 Python 3.11.4 documentation](https://docs.python.org/3/library/importlib.resources.html#module-importlib.resources).\r\n\r\nWe've already set the minimum Python version as 3.8, and the stdlib modules exist since 3.7, so we don't need to add the external backports as dependencies.\r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using?\r\n `Open Data Cube core, version 1.8.13`\r\n* What datacube deployment/enviornment are you running against?\r\n Local installation from conda-forge.\r\n\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n from pkg_resources import DistributionNotFound\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except DistributionNotFound:\n # This happens when entry points were marked with extra features,\n # but extra feature were not requested for installation\n return None\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n from pkg_resources import iter_entry_points\n for ep in iter_entry_points(group=group, name=None):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}, {"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport pkg_resources\nfrom docutils.nodes import literal_block, section, title, make_id\nfrom sphinx.domains import Domain\nfrom docutils.parsers.rst import Directive\nimport importlib\n\nimport click\n\n\nclass ClickHelpDirective(Directive):\n has_content = True\n required_arguments = 1\n\n def run(self):\n root_cmd = self.arguments[0]\n\n env = self.state.document.settings.env\n\n group = find_script_callable_from_env(root_cmd, env)\n\n return [generate_help_text(group, [root_cmd])]\n\n\ndef find_script_callable_from_env(name, env):\n commands = env.config.click_utils_commands\n\n module, function_name = commands[name].split(':')\n module = importlib.import_module(module)\n return getattr(module, function_name)\n\n\ndef find_script_callable(name):\n return list(pkg_resources.iter_entry_points(\n 'console_scripts', name))[0].load()\n\n\ndef generate_help_text(command, prefix):\n ctx = click.Context(command)\n help_opts = command.get_help_option(ctx).opts\n full_cmd = ' '.join(prefix)\n block = section(None,\n title(None, full_cmd),\n ids=[make_id(full_cmd)], names=[full_cmd])\n if help_opts:\n h = \"$ {} {}\\n\".format(full_cmd, help_opts[0]) + command.get_help(ctx)\n block.append(literal_block(None, h, language='console'))\n\n if isinstance(command, click.core.MultiCommand):\n for c in command.list_commands(ctx):\n c = command.resolve_command(ctx, [c])[1]\n block.append(generate_help_text(c, prefix+[c.name]))\n\n return block\n\n\ndef make_block(command, opt, content):\n h = \"$ {} {}\\n\".format(command, opt) + content\n return section(None,\n title(None, command),\n literal_block(None, h, language='console'),\n ids=[make_id(command)], names=[command])\n\n\nclass DatacubeDomain(Domain):\n name = 'datacube'\n label = 'Data Cube'\n directives = {\n 'click-help': ClickHelpDirective,\n }\n\n\ndef setup(app):\n app.add_config_value('click_utils_commands', {}, 'html')\n\n app.add_domain(DatacubeDomain)\n return {\n 'parallel_read_safe': False,\n 'parallel_write_safe': False,\n }\n", "path": "docs/click_utils.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n from importlib.metadata import entry_points\n for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}, {"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nfrom importlib.metadata import entry_points\nfrom docutils.nodes import literal_block, section, title, make_id\nfrom sphinx.domains import Domain\nfrom docutils.parsers.rst import Directive\nimport importlib\n\nimport click\n\n\nclass ClickHelpDirective(Directive):\n has_content = True\n required_arguments = 1\n\n def run(self):\n root_cmd = self.arguments[0]\n\n env = self.state.document.settings.env\n\n group = find_script_callable_from_env(root_cmd, env)\n\n return [generate_help_text(group, [root_cmd])]\n\n\ndef find_script_callable_from_env(name, env):\n commands = env.config.click_utils_commands\n\n module, function_name = commands[name].split(':')\n module = importlib.import_module(module)\n return getattr(module, function_name)\n\n\ndef find_script_callable(name):\n return list(entry_points(\n group='console_scripts', name=name))[0].load()\n\n\ndef generate_help_text(command, prefix):\n ctx = click.Context(command)\n help_opts = command.get_help_option(ctx).opts\n full_cmd = ' '.join(prefix)\n block = section(None,\n title(None, full_cmd),\n ids=[make_id(full_cmd)], names=[full_cmd])\n if help_opts:\n h = \"$ {} {}\\n\".format(full_cmd, help_opts[0]) + command.get_help(ctx)\n block.append(literal_block(None, h, language='console'))\n\n if isinstance(command, click.core.MultiCommand):\n for c in command.list_commands(ctx):\n c = command.resolve_command(ctx, [c])[1]\n block.append(generate_help_text(c, prefix+[c.name]))\n\n return block\n\n\ndef make_block(command, opt, content):\n h = \"$ {} {}\\n\".format(command, opt) + content\n return section(None,\n title(None, command),\n literal_block(None, h, language='console'),\n ids=[make_id(command)], names=[command])\n\n\nclass DatacubeDomain(Domain):\n name = 'datacube'\n label = 'Data Cube'\n directives = {\n 'click-help': ClickHelpDirective,\n }\n\n\ndef setup(app):\n app.add_config_value('click_utils_commands', {}, 'html')\n\n app.add_domain(DatacubeDomain)\n return {\n 'parallel_read_safe': False,\n 'parallel_write_safe': False,\n }\n", "path": "docs/click_utils.py"}]} | 1,899 | 461 |
gh_patches_debug_37127 | rasdani/github-patches | git_diff | pantsbuild__pants-11673 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
python_source_root not added to syspath
I have a project structure along the lines of this:
```
packages/
package_a/
BUILD
__init__.py
package_a.py
package_b/
BUILD
__init__.py
package_b.py
protos/
package_a/
BUILD
foobar.proto
service_a/
BUILD
bazqux.proto
services/
service_a/
BUILD
__init__.py
main.py
service_b/
BUILD
__init__.py
main.py
```
`/packages`, `/protos` and `/services` are all configured as source roots. In each BUILD files residing in `protos/*` I have either the line `protobuf_library(python_source_root='services')` or `protobuf_library(python_source_root='packages')`, which means that the generated code ends up in `packages/*` or `services/*` instead.
This is working just fine as long as the running code is in the same source root as the generated protobuf code, but when code in `services/` is dependent on protos that has `python_source_root` set to `packages`, Python can't find the module unless an actual module from the same source root is also a dependency. I did some digging around, and it seems like the issue is that the source root specified in `python_source_root` is never explicitly added to Python's syspath, which is why imports fail if no "real" packages from the same source roots are used. So using the same example as earlier I see `services` and `protos`, but `packages`, where the generated code is placed, is missing.
I created a [proof-of-concept repository](https://github.com/jyggen/pants-issue-11666) in case my rambling makes little sense. The issue can be seen by running `./pants test services/::`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/util_rules/python_sources.py`
Content:
```
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from dataclasses import dataclass
5 from typing import Iterable, List, Tuple, Type
6
7 from pants.backend.python.target_types import PythonSources
8 from pants.backend.python.util_rules import ancestor_files
9 from pants.backend.python.util_rules.ancestor_files import AncestorFiles, AncestorFilesRequest
10 from pants.core.target_types import FilesSources, ResourcesSources
11 from pants.core.util_rules import source_files, stripped_source_files
12 from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
13 from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
14 from pants.engine.fs import MergeDigests, Snapshot
15 from pants.engine.rules import Get, MultiGet, collect_rules, rule
16 from pants.engine.target import Sources, Target
17 from pants.engine.unions import UnionMembership
18 from pants.source.source_root import SourceRoot, SourceRootRequest
19 from pants.util.logging import LogLevel
20 from pants.util.meta import frozen_after_init
21
22
23 @dataclass(frozen=True)
24 class PythonSourceFiles:
25 """Sources that can be introspected by Python, relative to a set of source roots.
26
27 Specifically, this will filter out to only have Python, and, optionally, resources() and
28 files() targets; and will add any missing `__init__.py` files to ensure that modules are
29 recognized correctly.
30
31 Use-cases that introspect Python source code (e.g., the `test, `lint`, `fmt` goals) can
32 request this type to get relevant sources that are still relative to their source roots.
33 That way the paths they report are the unstripped ones the user is familiar with.
34
35 The sources can also be imported and used by Python (e.g., for the `test` goal), but only
36 if sys.path is modified to include the source roots.
37 """
38
39 source_files: SourceFiles
40 source_roots: Tuple[str, ...] # Source roots for the specified source files.
41
42
43 @dataclass(frozen=True)
44 class StrippedPythonSourceFiles:
45 """A PythonSourceFiles that has had its source roots stripped."""
46
47 stripped_source_files: StrippedSourceFiles
48
49
50 @frozen_after_init
51 @dataclass(unsafe_hash=True)
52 class PythonSourceFilesRequest:
53 targets: Tuple[Target, ...]
54 include_resources: bool
55 include_files: bool
56
57 def __init__(
58 self,
59 targets: Iterable[Target],
60 *,
61 include_resources: bool = True,
62 include_files: bool = False
63 ) -> None:
64 self.targets = tuple(targets)
65 self.include_resources = include_resources
66 self.include_files = include_files
67
68 @property
69 def valid_sources_types(self) -> Tuple[Type[Sources], ...]:
70 types: List[Type[Sources]] = [PythonSources]
71 if self.include_resources:
72 types.append(ResourcesSources)
73 if self.include_files:
74 types.append(FilesSources)
75 return tuple(types)
76
77
78 @rule(level=LogLevel.DEBUG)
79 async def prepare_python_sources(
80 request: PythonSourceFilesRequest, union_membership: UnionMembership
81 ) -> PythonSourceFiles:
82 sources = await Get(
83 SourceFiles,
84 SourceFilesRequest(
85 (tgt.get(Sources) for tgt in request.targets),
86 for_sources_types=request.valid_sources_types,
87 enable_codegen=True,
88 ),
89 )
90
91 missing_init_files = await Get(
92 AncestorFiles,
93 AncestorFilesRequest("__init__.py", sources.snapshot),
94 )
95
96 init_injected = await Get(
97 Snapshot,
98 MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),
99 )
100
101 source_root_objs = await MultiGet(
102 Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))
103 for tgt in request.targets
104 if (
105 tgt.has_field(PythonSources)
106 or tgt.has_field(ResourcesSources)
107 or tgt.get(Sources).can_generate(PythonSources, union_membership)
108 or tgt.get(Sources).can_generate(ResourcesSources, union_membership)
109 )
110 )
111 source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}
112 return PythonSourceFiles(
113 SourceFiles(init_injected, sources.unrooted_files), tuple(sorted(source_root_paths))
114 )
115
116
117 @rule(level=LogLevel.DEBUG)
118 async def strip_python_sources(python_sources: PythonSourceFiles) -> StrippedPythonSourceFiles:
119 stripped = await Get(StrippedSourceFiles, SourceFiles, python_sources.source_files)
120 return StrippedPythonSourceFiles(stripped)
121
122
123 def rules():
124 return [
125 *collect_rules(),
126 *ancestor_files.rules(),
127 *source_files.rules(),
128 *stripped_source_files.rules(),
129 ]
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/util_rules/python_sources.py b/src/python/pants/backend/python/util_rules/python_sources.py
--- a/src/python/pants/backend/python/util_rules/python_sources.py
+++ b/src/python/pants/backend/python/util_rules/python_sources.py
@@ -13,7 +13,7 @@
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.fs import MergeDigests, Snapshot
from pants.engine.rules import Get, MultiGet, collect_rules, rule
-from pants.engine.target import Sources, Target
+from pants.engine.target import HydratedSources, HydrateSourcesRequest, Sources, Target
from pants.engine.unions import UnionMembership
from pants.source.source_root import SourceRoot, SourceRootRequest
from pants.util.logging import LogLevel
@@ -98,15 +98,39 @@
MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),
)
- source_root_objs = await MultiGet(
- Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))
- for tgt in request.targets
- if (
- tgt.has_field(PythonSources)
- or tgt.has_field(ResourcesSources)
- or tgt.get(Sources).can_generate(PythonSources, union_membership)
- or tgt.get(Sources).can_generate(ResourcesSources, union_membership)
+ # Codegen is able to generate code in any arbitrary location, unlike sources normally being
+ # rooted under the target definition. To determine source roots for these generated files, we
+ # cannot use the normal `SourceRootRequest.for_target()` and we instead must determine
+ # a source root for every individual generated file. So, we re-resolve the codegen sources here.
+ python_and_resources_targets = []
+ codegen_targets = []
+ for tgt in request.targets:
+ if tgt.has_field(PythonSources) or tgt.has_field(ResourcesSources):
+ python_and_resources_targets.append(tgt)
+ elif tgt.get(Sources).can_generate(PythonSources, union_membership) or tgt.get(
+ Sources
+ ).can_generate(ResourcesSources, union_membership):
+ codegen_targets.append(tgt)
+ codegen_sources = await MultiGet(
+ Get(
+ HydratedSources,
+ HydrateSourcesRequest(
+ tgt.get(Sources), for_sources_types=request.valid_sources_types, enable_codegen=True
+ ),
)
+ for tgt in codegen_targets
+ )
+ source_root_requests = [
+ *(SourceRootRequest.for_target(tgt) for tgt in python_and_resources_targets),
+ *(
+ SourceRootRequest.for_file(f)
+ for sources in codegen_sources
+ for f in sources.snapshot.files
+ ),
+ ]
+
+ source_root_objs = await MultiGet(
+ Get(SourceRoot, SourceRootRequest, req) for req in source_root_requests
)
source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}
return PythonSourceFiles(
| {"golden_diff": "diff --git a/src/python/pants/backend/python/util_rules/python_sources.py b/src/python/pants/backend/python/util_rules/python_sources.py\n--- a/src/python/pants/backend/python/util_rules/python_sources.py\n+++ b/src/python/pants/backend/python/util_rules/python_sources.py\n@@ -13,7 +13,7 @@\n from pants.core.util_rules.stripped_source_files import StrippedSourceFiles\n from pants.engine.fs import MergeDigests, Snapshot\n from pants.engine.rules import Get, MultiGet, collect_rules, rule\n-from pants.engine.target import Sources, Target\n+from pants.engine.target import HydratedSources, HydrateSourcesRequest, Sources, Target\n from pants.engine.unions import UnionMembership\n from pants.source.source_root import SourceRoot, SourceRootRequest\n from pants.util.logging import LogLevel\n@@ -98,15 +98,39 @@\n MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),\n )\n \n- source_root_objs = await MultiGet(\n- Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))\n- for tgt in request.targets\n- if (\n- tgt.has_field(PythonSources)\n- or tgt.has_field(ResourcesSources)\n- or tgt.get(Sources).can_generate(PythonSources, union_membership)\n- or tgt.get(Sources).can_generate(ResourcesSources, union_membership)\n+ # Codegen is able to generate code in any arbitrary location, unlike sources normally being\n+ # rooted under the target definition. To determine source roots for these generated files, we\n+ # cannot use the normal `SourceRootRequest.for_target()` and we instead must determine\n+ # a source root for every individual generated file. So, we re-resolve the codegen sources here.\n+ python_and_resources_targets = []\n+ codegen_targets = []\n+ for tgt in request.targets:\n+ if tgt.has_field(PythonSources) or tgt.has_field(ResourcesSources):\n+ python_and_resources_targets.append(tgt)\n+ elif tgt.get(Sources).can_generate(PythonSources, union_membership) or tgt.get(\n+ Sources\n+ ).can_generate(ResourcesSources, union_membership):\n+ codegen_targets.append(tgt)\n+ codegen_sources = await MultiGet(\n+ Get(\n+ HydratedSources,\n+ HydrateSourcesRequest(\n+ tgt.get(Sources), for_sources_types=request.valid_sources_types, enable_codegen=True\n+ ),\n )\n+ for tgt in codegen_targets\n+ )\n+ source_root_requests = [\n+ *(SourceRootRequest.for_target(tgt) for tgt in python_and_resources_targets),\n+ *(\n+ SourceRootRequest.for_file(f)\n+ for sources in codegen_sources\n+ for f in sources.snapshot.files\n+ ),\n+ ]\n+\n+ source_root_objs = await MultiGet(\n+ Get(SourceRoot, SourceRootRequest, req) for req in source_root_requests\n )\n source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}\n return PythonSourceFiles(\n", "issue": "python_source_root not added to syspath\nI have a project structure along the lines of this:\r\n```\r\npackages/\r\n package_a/\r\n BUILD\r\n __init__.py\r\n package_a.py\r\n package_b/\r\n BUILD\r\n __init__.py\r\n package_b.py\r\nprotos/\r\n package_a/\r\n BUILD\r\n foobar.proto\r\n service_a/\r\n BUILD\r\n bazqux.proto\r\nservices/\r\n service_a/\r\n BUILD\r\n __init__.py\r\n main.py\r\n service_b/\r\n BUILD\r\n __init__.py\r\n main.py\r\n```\r\n\r\n`/packages`, `/protos` and `/services` are all configured as source roots. In each BUILD files residing in `protos/*` I have either the line `protobuf_library(python_source_root='services')` or `protobuf_library(python_source_root='packages')`, which means that the generated code ends up in `packages/*` or `services/*` instead.\r\n\r\nThis is working just fine as long as the running code is in the same source root as the generated protobuf code, but when code in `services/` is dependent on protos that has `python_source_root` set to `packages`, Python can't find the module unless an actual module from the same source root is also a dependency. I did some digging around, and it seems like the issue is that the source root specified in `python_source_root` is never explicitly added to Python's syspath, which is why imports fail if no \"real\" packages from the same source roots are used. So using the same example as earlier I see `services` and `protos`, but `packages`, where the generated code is placed, is missing.\r\n\r\nI created a [proof-of-concept repository](https://github.com/jyggen/pants-issue-11666) in case my rambling makes little sense. The issue can be seen by running `./pants test services/::`.\r\n\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple, Type\n\nfrom pants.backend.python.target_types import PythonSources\nfrom pants.backend.python.util_rules import ancestor_files\nfrom pants.backend.python.util_rules.ancestor_files import AncestorFiles, AncestorFilesRequest\nfrom pants.core.target_types import FilesSources, ResourcesSources\nfrom pants.core.util_rules import source_files, stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.core.util_rules.stripped_source_files import StrippedSourceFiles\nfrom pants.engine.fs import MergeDigests, Snapshot\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import Sources, Target\nfrom pants.engine.unions import UnionMembership\nfrom pants.source.source_root import SourceRoot, SourceRootRequest\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import frozen_after_init\n\n\n@dataclass(frozen=True)\nclass PythonSourceFiles:\n \"\"\"Sources that can be introspected by Python, relative to a set of source roots.\n\n Specifically, this will filter out to only have Python, and, optionally, resources() and\n files() targets; and will add any missing `__init__.py` files to ensure that modules are\n recognized correctly.\n\n Use-cases that introspect Python source code (e.g., the `test, `lint`, `fmt` goals) can\n request this type to get relevant sources that are still relative to their source roots.\n That way the paths they report are the unstripped ones the user is familiar with.\n\n The sources can also be imported and used by Python (e.g., for the `test` goal), but only\n if sys.path is modified to include the source roots.\n \"\"\"\n\n source_files: SourceFiles\n source_roots: Tuple[str, ...] # Source roots for the specified source files.\n\n\n@dataclass(frozen=True)\nclass StrippedPythonSourceFiles:\n \"\"\"A PythonSourceFiles that has had its source roots stripped.\"\"\"\n\n stripped_source_files: StrippedSourceFiles\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PythonSourceFilesRequest:\n targets: Tuple[Target, ...]\n include_resources: bool\n include_files: bool\n\n def __init__(\n self,\n targets: Iterable[Target],\n *,\n include_resources: bool = True,\n include_files: bool = False\n ) -> None:\n self.targets = tuple(targets)\n self.include_resources = include_resources\n self.include_files = include_files\n\n @property\n def valid_sources_types(self) -> Tuple[Type[Sources], ...]:\n types: List[Type[Sources]] = [PythonSources]\n if self.include_resources:\n types.append(ResourcesSources)\n if self.include_files:\n types.append(FilesSources)\n return tuple(types)\n\n\n@rule(level=LogLevel.DEBUG)\nasync def prepare_python_sources(\n request: PythonSourceFilesRequest, union_membership: UnionMembership\n) -> PythonSourceFiles:\n sources = await Get(\n SourceFiles,\n SourceFilesRequest(\n (tgt.get(Sources) for tgt in request.targets),\n for_sources_types=request.valid_sources_types,\n enable_codegen=True,\n ),\n )\n\n missing_init_files = await Get(\n AncestorFiles,\n AncestorFilesRequest(\"__init__.py\", sources.snapshot),\n )\n\n init_injected = await Get(\n Snapshot,\n MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),\n )\n\n source_root_objs = await MultiGet(\n Get(SourceRoot, SourceRootRequest, SourceRootRequest.for_target(tgt))\n for tgt in request.targets\n if (\n tgt.has_field(PythonSources)\n or tgt.has_field(ResourcesSources)\n or tgt.get(Sources).can_generate(PythonSources, union_membership)\n or tgt.get(Sources).can_generate(ResourcesSources, union_membership)\n )\n )\n source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}\n return PythonSourceFiles(\n SourceFiles(init_injected, sources.unrooted_files), tuple(sorted(source_root_paths))\n )\n\n\n@rule(level=LogLevel.DEBUG)\nasync def strip_python_sources(python_sources: PythonSourceFiles) -> StrippedPythonSourceFiles:\n stripped = await Get(StrippedSourceFiles, SourceFiles, python_sources.source_files)\n return StrippedPythonSourceFiles(stripped)\n\n\ndef rules():\n return [\n *collect_rules(),\n *ancestor_files.rules(),\n *source_files.rules(),\n *stripped_source_files.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/python_sources.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple, Type\n\nfrom pants.backend.python.target_types import PythonSources\nfrom pants.backend.python.util_rules import ancestor_files\nfrom pants.backend.python.util_rules.ancestor_files import AncestorFiles, AncestorFilesRequest\nfrom pants.core.target_types import FilesSources, ResourcesSources\nfrom pants.core.util_rules import source_files, stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.core.util_rules.stripped_source_files import StrippedSourceFiles\nfrom pants.engine.fs import MergeDigests, Snapshot\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import HydratedSources, HydrateSourcesRequest, Sources, Target\nfrom pants.engine.unions import UnionMembership\nfrom pants.source.source_root import SourceRoot, SourceRootRequest\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import frozen_after_init\n\n\n@dataclass(frozen=True)\nclass PythonSourceFiles:\n \"\"\"Sources that can be introspected by Python, relative to a set of source roots.\n\n Specifically, this will filter out to only have Python, and, optionally, resources() and\n files() targets; and will add any missing `__init__.py` files to ensure that modules are\n recognized correctly.\n\n Use-cases that introspect Python source code (e.g., the `test, `lint`, `fmt` goals) can\n request this type to get relevant sources that are still relative to their source roots.\n That way the paths they report are the unstripped ones the user is familiar with.\n\n The sources can also be imported and used by Python (e.g., for the `test` goal), but only\n if sys.path is modified to include the source roots.\n \"\"\"\n\n source_files: SourceFiles\n source_roots: Tuple[str, ...] # Source roots for the specified source files.\n\n\n@dataclass(frozen=True)\nclass StrippedPythonSourceFiles:\n \"\"\"A PythonSourceFiles that has had its source roots stripped.\"\"\"\n\n stripped_source_files: StrippedSourceFiles\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PythonSourceFilesRequest:\n targets: Tuple[Target, ...]\n include_resources: bool\n include_files: bool\n\n def __init__(\n self,\n targets: Iterable[Target],\n *,\n include_resources: bool = True,\n include_files: bool = False\n ) -> None:\n self.targets = tuple(targets)\n self.include_resources = include_resources\n self.include_files = include_files\n\n @property\n def valid_sources_types(self) -> Tuple[Type[Sources], ...]:\n types: List[Type[Sources]] = [PythonSources]\n if self.include_resources:\n types.append(ResourcesSources)\n if self.include_files:\n types.append(FilesSources)\n return tuple(types)\n\n\n@rule(level=LogLevel.DEBUG)\nasync def prepare_python_sources(\n request: PythonSourceFilesRequest, union_membership: UnionMembership\n) -> PythonSourceFiles:\n sources = await Get(\n SourceFiles,\n SourceFilesRequest(\n (tgt.get(Sources) for tgt in request.targets),\n for_sources_types=request.valid_sources_types,\n enable_codegen=True,\n ),\n )\n\n missing_init_files = await Get(\n AncestorFiles,\n AncestorFilesRequest(\"__init__.py\", sources.snapshot),\n )\n\n init_injected = await Get(\n Snapshot,\n MergeDigests((sources.snapshot.digest, missing_init_files.snapshot.digest)),\n )\n\n # Codegen is able to generate code in any arbitrary location, unlike sources normally being\n # rooted under the target definition. To determine source roots for these generated files, we\n # cannot use the normal `SourceRootRequest.for_target()` and we instead must determine\n # a source root for every individual generated file. So, we re-resolve the codegen sources here.\n python_and_resources_targets = []\n codegen_targets = []\n for tgt in request.targets:\n if tgt.has_field(PythonSources) or tgt.has_field(ResourcesSources):\n python_and_resources_targets.append(tgt)\n elif tgt.get(Sources).can_generate(PythonSources, union_membership) or tgt.get(\n Sources\n ).can_generate(ResourcesSources, union_membership):\n codegen_targets.append(tgt)\n codegen_sources = await MultiGet(\n Get(\n HydratedSources,\n HydrateSourcesRequest(\n tgt.get(Sources), for_sources_types=request.valid_sources_types, enable_codegen=True\n ),\n )\n for tgt in codegen_targets\n )\n source_root_requests = [\n *(SourceRootRequest.for_target(tgt) for tgt in python_and_resources_targets),\n *(\n SourceRootRequest.for_file(f)\n for sources in codegen_sources\n for f in sources.snapshot.files\n ),\n ]\n\n source_root_objs = await MultiGet(\n Get(SourceRoot, SourceRootRequest, req) for req in source_root_requests\n )\n source_root_paths = {source_root_obj.path for source_root_obj in source_root_objs}\n return PythonSourceFiles(\n SourceFiles(init_injected, sources.unrooted_files), tuple(sorted(source_root_paths))\n )\n\n\n@rule(level=LogLevel.DEBUG)\nasync def strip_python_sources(python_sources: PythonSourceFiles) -> StrippedPythonSourceFiles:\n stripped = await Get(StrippedSourceFiles, SourceFiles, python_sources.source_files)\n return StrippedPythonSourceFiles(stripped)\n\n\ndef rules():\n return [\n *collect_rules(),\n *ancestor_files.rules(),\n *source_files.rules(),\n *stripped_source_files.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/python_sources.py"}]} | 1,966 | 656 |
gh_patches_debug_10677 | rasdani/github-patches | git_diff | python-discord__bot-1521 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logic in Source Command's Help Command Check
## Description
<!-- Describe the bug you've found -->
Its impossible to get the source for the HelpChannel cog.
## Steps to Reproduce
<!-- Detail the exact list of steps to be able to encounter the bug -->
`!src HelpChannel`
Due to the logic of the command, all that needs to be said is `!src help` followed by anything.
## Expected Behaviour
<!-- Outline what you expect to occur, include screenshots if relevant -->
Source link to the HelpChannel cog is shown.
## Actual Behaviour
<!-- Outline what occurs instead of the expected behavior, include screenshots if relevant -->
Help command source link is shown.
## Known Impacted Platforms
<!-- Replace [ ] with [x] to mark items -->
- [x] Web
- [x] Desktop
- [x] Android App
- [x] iOS App
## Possible Solutions
<!-- Detail any solutions you might have in mind to be able to resolve the bug -->
These lines are the probable cause.
https://github.com/python-discord/bot/blob/51af1369e0d9c2ad185f0c0920b599b7187ed077/bot/exts/info/source.py#L19-L20
After looking at sir-lancebot, it might even be possible to remove these two lines altogether.
## Would you like to implement a fix?
***Note: For high-priority or critical bugs, fixes may be implemented by staff.***
<!-- Replace [ ] with [x] with your choice -->
- [ ] I'd like to implement the bug fix
- [x] Anyone can implement the bug fix
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/exts/info/source.py`
Content:
```
1 import inspect
2 from pathlib import Path
3 from typing import Optional, Tuple, Union
4
5 from discord import Embed, utils
6 from discord.ext import commands
7
8 from bot.bot import Bot
9 from bot.constants import URLs
10
11 SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]
12
13
14 class SourceConverter(commands.Converter):
15 """Convert an argument into a help command, tag, command, or cog."""
16
17 async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
18 """Convert argument into source object."""
19 if argument.lower().startswith("help"):
20 return ctx.bot.help_command
21
22 cog = ctx.bot.get_cog(argument)
23 if cog:
24 return cog
25
26 cmd = ctx.bot.get_command(argument)
27 if cmd:
28 return cmd
29
30 tags_cog = ctx.bot.get_cog("Tags")
31 show_tag = True
32
33 if not tags_cog:
34 show_tag = False
35 elif argument.lower() in tags_cog._cache:
36 return argument.lower()
37
38 escaped_arg = utils.escape_markdown(argument)
39
40 raise commands.BadArgument(
41 f"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog."
42 )
43
44
45 class BotSource(commands.Cog):
46 """Displays information about the bot's source code."""
47
48 def __init__(self, bot: Bot):
49 self.bot = bot
50
51 @commands.command(name="source", aliases=("src",))
52 async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
53 """Display information and a GitHub link to the source code of a command, tag, or cog."""
54 if not source_item:
55 embed = Embed(title="Bot's GitHub Repository")
56 embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
57 embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
58 await ctx.send(embed=embed)
59 return
60
61 embed = await self.build_embed(source_item)
62 await ctx.send(embed=embed)
63
64 def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
65 """
66 Build GitHub link of source item, return this link, file location and first line number.
67
68 Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
69 """
70 if isinstance(source_item, commands.Command):
71 source_item = inspect.unwrap(source_item.callback)
72 src = source_item.__code__
73 filename = src.co_filename
74 elif isinstance(source_item, str):
75 tags_cog = self.bot.get_cog("Tags")
76 filename = tags_cog._cache[source_item]["location"]
77 else:
78 src = type(source_item)
79 try:
80 filename = inspect.getsourcefile(src)
81 except TypeError:
82 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
83
84 if not isinstance(source_item, str):
85 try:
86 lines, first_line_no = inspect.getsourcelines(src)
87 except OSError:
88 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
89
90 lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
91 else:
92 first_line_no = None
93 lines_extension = ""
94
95 # Handle tag file location differently than others to avoid errors in some cases
96 if not first_line_no:
97 file_location = Path(filename).relative_to("/bot/")
98 else:
99 file_location = Path(filename).relative_to(Path.cwd()).as_posix()
100
101 url = f"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}"
102
103 return url, file_location, first_line_no or None
104
105 async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
106 """Build embed based on source object."""
107 url, location, first_line = self.get_source_link(source_object)
108
109 if isinstance(source_object, commands.HelpCommand):
110 title = "Help Command"
111 description = source_object.__doc__.splitlines()[1]
112 elif isinstance(source_object, commands.Command):
113 description = source_object.short_doc
114 title = f"Command: {source_object.qualified_name}"
115 elif isinstance(source_object, str):
116 title = f"Tag: {source_object}"
117 description = ""
118 else:
119 title = f"Cog: {source_object.qualified_name}"
120 description = source_object.description.splitlines()[0]
121
122 embed = Embed(title=title, description=description)
123 embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
124 line_text = f":{first_line}" if first_line else ""
125 embed.set_footer(text=f"{location}{line_text}")
126
127 return embed
128
129
130 def setup(bot: Bot) -> None:
131 """Load the BotSource cog."""
132 bot.add_cog(BotSource(bot))
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py
--- a/bot/exts/info/source.py
+++ b/bot/exts/info/source.py
@@ -14,9 +14,10 @@
class SourceConverter(commands.Converter):
"""Convert an argument into a help command, tag, command, or cog."""
- async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
+ @staticmethod
+ async def convert(ctx: commands.Context, argument: str) -> SourceType:
"""Convert argument into source object."""
- if argument.lower().startswith("help"):
+ if argument.lower() == "help":
return ctx.bot.help_command
cog = ctx.bot.get_cog(argument)
| {"golden_diff": "diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py\n--- a/bot/exts/info/source.py\n+++ b/bot/exts/info/source.py\n@@ -14,9 +14,10 @@\n class SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n \n- async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n+ @staticmethod\n+ async def convert(ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n- if argument.lower().startswith(\"help\"):\n+ if argument.lower() == \"help\":\n return ctx.bot.help_command\n \n cog = ctx.bot.get_cog(argument)\n", "issue": "Logic in Source Command's Help Command Check\n## Description\r\n<!-- Describe the bug you've found -->\r\nIts impossible to get the source for the HelpChannel cog.\r\n\r\n## Steps to Reproduce\r\n<!-- Detail the exact list of steps to be able to encounter the bug -->\r\n`!src HelpChannel`\r\nDue to the logic of the command, all that needs to be said is `!src help` followed by anything.\r\n\r\n## Expected Behaviour\r\n<!-- Outline what you expect to occur, include screenshots if relevant -->\r\nSource link to the HelpChannel cog is shown.\r\n\r\n## Actual Behaviour\r\n<!-- Outline what occurs instead of the expected behavior, include screenshots if relevant -->\r\nHelp command source link is shown.\r\n\r\n## Known Impacted Platforms\r\n<!-- Replace [ ] with [x] to mark items -->\r\n\r\n- [x] Web\r\n- [x] Desktop\r\n- [x] Android App\r\n- [x] iOS App\r\n\r\n## Possible Solutions\r\n<!-- Detail any solutions you might have in mind to be able to resolve the bug -->\r\nThese lines are the probable cause.\r\nhttps://github.com/python-discord/bot/blob/51af1369e0d9c2ad185f0c0920b599b7187ed077/bot/exts/info/source.py#L19-L20\r\nAfter looking at sir-lancebot, it might even be possible to remove these two lines altogether.\r\n## Would you like to implement a fix?\r\n\r\n***Note: For high-priority or critical bugs, fixes may be implemented by staff.***\r\n<!-- Replace [ ] with [x] with your choice -->\r\n\r\n- [ ] I'd like to implement the bug fix\r\n- [x] Anyone can implement the bug fix\r\n\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed, utils\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n escaped_arg = utils.escape_markdown(argument)\n\n raise commands.BadArgument(\n f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n source_item = inspect.unwrap(source_item.callback)\n src = source_item.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}], "after_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed, utils\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n @staticmethod\n async def convert(ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower() == \"help\":\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n escaped_arg = utils.escape_markdown(argument)\n\n raise commands.BadArgument(\n f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n source_item = inspect.unwrap(source_item.callback)\n src = source_item.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}]} | 1,969 | 166 |
gh_patches_debug_1323 | rasdani/github-patches | git_diff | ivy-llc__ivy-22920 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eigvals
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.numpy.func_wrapper import (
4 to_ivy_arrays_and_back,
5 from_zero_dim_arrays_to_scalar,
6 )
7
8
9 @to_ivy_arrays_and_back
10 def eig(a):
11 return ivy.eig(a)
12
13
14 @to_ivy_arrays_and_back
15 @from_zero_dim_arrays_to_scalar
16 def eigh(a, /, UPLO="L"):
17 return ivy.eigh(a, UPLO=UPLO)
18
19
20 @to_ivy_arrays_and_back
21 @from_zero_dim_arrays_to_scalar
22 def eigvalsh(a, /, UPLO="L"):
23 return ivy.eigvalsh(a, UPLO=UPLO)
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
@@ -17,6 +17,11 @@
return ivy.eigh(a, UPLO=UPLO)
+@to_ivy_arrays_and_back
+def eigvals(a):
+ return ivy.eig(a)[0]
+
+
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def eigvalsh(a, /, UPLO="L"):
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n@@ -17,6 +17,11 @@\n return ivy.eigh(a, UPLO=UPLO)\n \n \n+@to_ivy_arrays_and_back\n+def eigvals(a):\n+ return ivy.eig(a)[0]\n+\n+\n @to_ivy_arrays_and_back\n @from_zero_dim_arrays_to_scalar\n def eigvalsh(a, /, UPLO=\"L\"):\n", "issue": "eigvals\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eigvals(a):\n return ivy.eig(a)[0]\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}]} | 460 | 161 |
gh_patches_debug_37040 | rasdani/github-patches | git_diff | opsdroid__opsdroid-522 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch CLI to use click
I recently discovered [`click`](http://click.pocoo.org/5/) which makes adding command line options and arguments to your python application super simple and much more robust.
We should look at replacing the [current argparse code](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/__main__.py#L88) with `click`!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/__main__.py`
Content:
```
1 """Starts opsdroid."""
2
3 import os
4 import sys
5 import logging
6 import argparse
7 import gettext
8
9 from opsdroid.core import OpsDroid
10 from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\
11 DEFAULT_LANGUAGE, LOCALE_DIR
12 from opsdroid.web import Web
13
14
15 gettext.install('opsdroid')
16 _LOGGER = logging.getLogger("opsdroid")
17
18
19 def configure_lang(config):
20 """Configure app language based on user config."""
21 lang_code = config.get("lang", DEFAULT_LANGUAGE)
22 if lang_code != DEFAULT_LANGUAGE:
23 lang = gettext.translation(
24 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)
25 lang.install()
26
27
28 def configure_logging(config):
29 """Configure the root logger based on user config."""
30 rootlogger = logging.getLogger()
31 while rootlogger.handlers:
32 rootlogger.handlers.pop()
33
34 try:
35 if config["logging"]["path"]:
36 logfile_path = os.path.expanduser(config["logging"]["path"])
37 else:
38 logfile_path = config["logging"]["path"]
39 except KeyError:
40 logfile_path = DEFAULT_LOG_FILENAME
41
42 try:
43 log_level = get_logging_level(
44 config["logging"]["level"])
45 except KeyError:
46 log_level = logging.INFO
47
48 rootlogger.setLevel(log_level)
49 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')
50
51 console_handler = logging.StreamHandler()
52 console_handler.setLevel(log_level)
53 console_handler.setFormatter(formatter)
54 rootlogger.addHandler(console_handler)
55
56 try:
57 if not config["logging"]["console"]:
58 console_handler.setLevel(logging.CRITICAL)
59 except KeyError:
60 pass
61
62 if logfile_path:
63 logdir = os.path.dirname(os.path.realpath(logfile_path))
64 if not os.path.isdir(logdir):
65 os.makedirs(logdir)
66 file_handler = logging.FileHandler(logfile_path)
67 file_handler.setLevel(log_level)
68 file_handler.setFormatter(formatter)
69 rootlogger.addHandler(file_handler)
70 _LOGGER.info("="*40)
71 _LOGGER.info(_("Started application"))
72
73
74 def get_logging_level(logging_level):
75 """Get the logger level based on the user configuration."""
76 if logging_level == 'critical':
77 return logging.CRITICAL
78 elif logging_level == 'error':
79 return logging.ERROR
80 elif logging_level == 'warning':
81 return logging.WARNING
82 elif logging_level == 'debug':
83 return logging.DEBUG
84
85 return logging.INFO
86
87
88 def parse_args(args):
89 """Parse command line arguments."""
90 parser = argparse.ArgumentParser(description='Run opsdroid.')
91 parser.add_argument('--gen-config', action="store_true",
92 help='prints out an example configuration file')
93 return parser.parse_args(args)
94
95
96 def check_dependencies():
97 """Check for system dependencies required by opsdroid."""
98 if sys.version_info.major < 3 or sys.version_info.minor < 5:
99 logging.critical(_("Whoops! opsdroid requires python 3.5 or above."))
100 sys.exit(1)
101
102
103 def welcome_message(config):
104 """Add welcome message if set to true in configuration."""
105 try:
106 if config['welcome-message']:
107 _LOGGER.info("=" * 40)
108 _LOGGER.info(_("You can customise your opsdroid by modifying "
109 "your configuration.yaml"))
110 _LOGGER.info(_("Read more at: "
111 "http://opsdroid.readthedocs.io/#configuration"))
112 _LOGGER.info(_("Watch the Get Started Videos at: "
113 "http://bit.ly/2fnC0Fh"))
114 _LOGGER.info(_("Install Opsdroid Desktop at: \n"
115 "https://github.com/opsdroid/opsdroid-desktop/"
116 "releases"))
117 _LOGGER.info("=" * 40)
118 except KeyError:
119 _LOGGER.warning(_("'welcome-message: true/false' is missing in "
120 "configuration.yaml"))
121
122
123 def main():
124 """Parse the args and then start the application."""
125 args = parse_args(sys.argv[1:])
126
127 if args.gen_config:
128 with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
129 print(conf.read())
130 sys.exit(0)
131
132 check_dependencies()
133
134 with OpsDroid() as opsdroid:
135 opsdroid.load()
136 configure_lang(opsdroid.config)
137 configure_logging(opsdroid.config)
138 welcome_message(opsdroid.config)
139 opsdroid.web_server = Web(opsdroid)
140 opsdroid.start_loop()
141
142
143 def init():
144 """Enter the application."""
145 if __name__ == "__main__":
146 main()
147
148
149 init()
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py
--- a/opsdroid/__main__.py
+++ b/opsdroid/__main__.py
@@ -3,12 +3,13 @@
import os
import sys
import logging
-import argparse
import gettext
+import click
+
from opsdroid.core import OpsDroid
-from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\
- DEFAULT_LANGUAGE, LOCALE_DIR
+from opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \
+ EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR
from opsdroid.web import Web
@@ -85,14 +86,6 @@
return logging.INFO
-def parse_args(args):
- """Parse command line arguments."""
- parser = argparse.ArgumentParser(description='Run opsdroid.')
- parser.add_argument('--gen-config', action="store_true",
- help='prints out an example configuration file')
- return parser.parse_args(args)
-
-
def check_dependencies():
"""Check for system dependencies required by opsdroid."""
if sys.version_info.major < 3 or sys.version_info.minor < 5:
@@ -100,6 +93,23 @@
sys.exit(1)
+def print_version(ctx, param, value):
+ """Print out the version of opsdroid that is installed."""
+ if not value or ctx.resilient_parsing:
+ return
+ click.echo('opsdroid v{version}'.format(version=__version__))
+ ctx.exit(0)
+
+
+def print_example_config(ctx, param, value):
+ """Print out the example config."""
+ if not value or ctx.resilient_parsing:
+ return
+ with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
+ click.echo(conf.read())
+ ctx.exit(0)
+
+
def welcome_message(config):
"""Add welcome message if set to true in configuration."""
try:
@@ -120,15 +130,19 @@
"configuration.yaml"))
[email protected]()
[email protected]('--gen-config', is_flag=True, callback=print_example_config,
+ expose_value=False, default=False,
+ help='Print an example config and exit.')
[email protected]('--version', '-v', is_flag=True, callback=print_version,
+ expose_value=False, default=False, is_eager=True,
+ help='Print the version and exit.')
def main():
- """Parse the args and then start the application."""
- args = parse_args(sys.argv[1:])
-
- if args.gen_config:
- with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
- print(conf.read())
- sys.exit(0)
+ """Opsdroid is a chat bot framework written in Python.
+ It is designed to be extendable, scalable and simple.
+ See https://opsdroid.github.io/ for more information.
+ """
check_dependencies()
with OpsDroid() as opsdroid:
| {"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -3,12 +3,13 @@\n import os\n import sys\n import logging\n-import argparse\n import gettext\n \n+import click\n+\n from opsdroid.core import OpsDroid\n-from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\\\n- DEFAULT_LANGUAGE, LOCALE_DIR\n+from opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \\\n+ EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR\n from opsdroid.web import Web\n \n \n@@ -85,14 +86,6 @@\n return logging.INFO\n \n \n-def parse_args(args):\n- \"\"\"Parse command line arguments.\"\"\"\n- parser = argparse.ArgumentParser(description='Run opsdroid.')\n- parser.add_argument('--gen-config', action=\"store_true\",\n- help='prints out an example configuration file')\n- return parser.parse_args(args)\n-\n-\n def check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n@@ -100,6 +93,23 @@\n sys.exit(1)\n \n \n+def print_version(ctx, param, value):\n+ \"\"\"Print out the version of opsdroid that is installed.\"\"\"\n+ if not value or ctx.resilient_parsing:\n+ return\n+ click.echo('opsdroid v{version}'.format(version=__version__))\n+ ctx.exit(0)\n+\n+\n+def print_example_config(ctx, param, value):\n+ \"\"\"Print out the example config.\"\"\"\n+ if not value or ctx.resilient_parsing:\n+ return\n+ with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n+ click.echo(conf.read())\n+ ctx.exit(0)\n+\n+\n def welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n@@ -120,15 +130,19 @@\n \"configuration.yaml\"))\n \n \[email protected]()\[email protected]('--gen-config', is_flag=True, callback=print_example_config,\n+ expose_value=False, default=False,\n+ help='Print an example config and exit.')\[email protected]('--version', '-v', is_flag=True, callback=print_version,\n+ expose_value=False, default=False, is_eager=True,\n+ help='Print the version and exit.')\n def main():\n- \"\"\"Parse the args and then start the application.\"\"\"\n- args = parse_args(sys.argv[1:])\n-\n- if args.gen_config:\n- with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n- print(conf.read())\n- sys.exit(0)\n+ \"\"\"Opsdroid is a chat bot framework written in Python.\n \n+ It is designed to be extendable, scalable and simple.\n+ See https://opsdroid.github.io/ for more information.\n+ \"\"\"\n check_dependencies()\n \n with OpsDroid() as opsdroid:\n", "issue": "Switch CLI to use click\nI recently discovered [`click`](http://click.pocoo.org/5/) which makes adding command line options and arguments to your python application super simple and much more robust.\r\n\r\nWe should look at replacing the [current argparse code](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/__main__.py#L88) with `click`!\n", "before_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport gettext\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\\\n DEFAULT_LANGUAGE, LOCALE_DIR\nfrom opsdroid.web import Web\n\n\ngettext.install('opsdroid')\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\n 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(_(\"Started application\"))\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n logging.critical(_(\"Whoops! opsdroid requires python 3.5 or above.\"))\n sys.exit(1)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n if config['welcome-message']:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"))\n _LOGGER.info(_(\"Read more at: \"\n \"http://opsdroid.readthedocs.io/#configuration\"))\n _LOGGER.info(_(\"Watch the Get Started Videos at: \"\n \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(_(\"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"))\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(_(\"'welcome-message: true/false' is missing in \"\n \"configuration.yaml\"))\n\n\ndef main():\n \"\"\"Parse the args and then start the application.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_lang(opsdroid.config)\n configure_logging(opsdroid.config)\n welcome_message(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n\n\ndef init():\n \"\"\"Enter the application.\"\"\"\n if __name__ == \"__main__\":\n main()\n\n\ninit()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport gettext\n\nimport click\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \\\n EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR\nfrom opsdroid.web import Web\n\n\ngettext.install('opsdroid')\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\n 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(_(\"Started application\"))\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n logging.critical(_(\"Whoops! opsdroid requires python 3.5 or above.\"))\n sys.exit(1)\n\n\ndef print_version(ctx, param, value):\n \"\"\"Print out the version of opsdroid that is installed.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo('opsdroid v{version}'.format(version=__version__))\n ctx.exit(0)\n\n\ndef print_example_config(ctx, param, value):\n \"\"\"Print out the example config.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n click.echo(conf.read())\n ctx.exit(0)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n if config['welcome-message']:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"))\n _LOGGER.info(_(\"Read more at: \"\n \"http://opsdroid.readthedocs.io/#configuration\"))\n _LOGGER.info(_(\"Watch the Get Started Videos at: \"\n \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(_(\"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"))\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(_(\"'welcome-message: true/false' is missing in \"\n \"configuration.yaml\"))\n\n\[email protected]()\[email protected]('--gen-config', is_flag=True, callback=print_example_config,\n expose_value=False, default=False,\n help='Print an example config and exit.')\[email protected]('--version', '-v', is_flag=True, callback=print_version,\n expose_value=False, default=False, is_eager=True,\n help='Print the version and exit.')\ndef main():\n \"\"\"Opsdroid is a chat bot framework written in Python.\n\n It is designed to be extendable, scalable and simple.\n See https://opsdroid.github.io/ for more information.\n \"\"\"\n check_dependencies()\n\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_lang(opsdroid.config)\n configure_logging(opsdroid.config)\n welcome_message(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n\n\ndef init():\n \"\"\"Enter the application.\"\"\"\n if __name__ == \"__main__\":\n main()\n\n\ninit()\n", "path": "opsdroid/__main__.py"}]} | 1,657 | 672 |
gh_patches_debug_6492 | rasdani/github-patches | git_diff | mozilla__kitsune-2981 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add whitenoise and serve static files from the app
Add whitenoise to serve static files in the dev (and stage?) instances.
In production we'll probably go with another more efficient approach which is being investigated in #2949.
Whitenoise activation should be configurable via the `ENABLE_WHITENOISE` env variable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wsgi/app.py`
Content:
```
1 """
2 WSGI config for kitsune project.
3
4 It exposes the WSGI callable as a module-level variable named ``application``.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
8 """
9 import os
10 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA
11
12 from django.core.wsgi import get_wsgi_application
13
14 import newrelic.agent
15 from decouple import config
16 from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
17
18 # For django-celery
19 os.environ['CELERY_LOADER'] = 'django'
20
21 application = get_wsgi_application()
22 application = Sentry(application)
23
24 # Add NewRelic
25 newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
26 newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
27 if newrelic_ini and newrelic_license_key:
28 newrelic.agent.initialize(newrelic_ini)
29 application = newrelic.agent.wsgi_application()(application)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wsgi/app.py b/wsgi/app.py
--- a/wsgi/app.py
+++ b/wsgi/app.py
@@ -21,6 +21,10 @@
application = get_wsgi_application()
application = Sentry(application)
+if config('ENABLE_WHITENOISE', default=False, cast=bool):
+ from whitenoise.django import DjangoWhiteNoise
+ application = DjangoWhiteNoise(application)
+
# Add NewRelic
newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
| {"golden_diff": "diff --git a/wsgi/app.py b/wsgi/app.py\n--- a/wsgi/app.py\n+++ b/wsgi/app.py\n@@ -21,6 +21,10 @@\n application = get_wsgi_application()\n application = Sentry(application)\n \n+if config('ENABLE_WHITENOISE', default=False, cast=bool):\n+ from whitenoise.django import DjangoWhiteNoise\n+ application = DjangoWhiteNoise(application)\n+\n # Add NewRelic\n newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\n newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\n", "issue": "Add whitenoise and serve static files from the app\nAdd whitenoise to serve static files in the dev (and stage?) instances. \r\n\r\nIn production we'll probably go with another more efficient approach which is being investigated in #2949.\r\n\r\nWhitenoise activation should be configurable via the `ENABLE_WHITENOISE` env variable.\n", "before_files": [{"content": "\"\"\"\nWSGI config for kitsune project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application\n\nimport newrelic.agent\nfrom decouple import config\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n\n# For django-celery\nos.environ['CELERY_LOADER'] = 'django'\n\napplication = get_wsgi_application()\napplication = Sentry(application)\n\n# Add NewRelic\nnewrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\nnewrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\nif newrelic_ini and newrelic_license_key:\n newrelic.agent.initialize(newrelic_ini)\n application = newrelic.agent.wsgi_application()(application)\n", "path": "wsgi/app.py"}], "after_files": [{"content": "\"\"\"\nWSGI config for kitsune project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application\n\nimport newrelic.agent\nfrom decouple import config\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n\n# For django-celery\nos.environ['CELERY_LOADER'] = 'django'\n\napplication = get_wsgi_application()\napplication = Sentry(application)\n\nif config('ENABLE_WHITENOISE', default=False, cast=bool):\n from whitenoise.django import DjangoWhiteNoise\n application = DjangoWhiteNoise(application)\n\n# Add NewRelic\nnewrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\nnewrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\nif newrelic_ini and newrelic_license_key:\n newrelic.agent.initialize(newrelic_ini)\n application = newrelic.agent.wsgi_application()(application)\n", "path": "wsgi/app.py"}]} | 612 | 136 |
gh_patches_debug_22252 | rasdani/github-patches | git_diff | getsentry__sentry-python-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
asyncio concurrency issues
@mitsuhiko discovered that passing around hubs does not work at all and we likely leak state between requests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/sanic.py`
Content:
```
1 import sys
2 import weakref
3 from inspect import isawaitable
4
5 from sentry_sdk._compat import urlparse, reraise
6 from sentry_sdk.hub import Hub
7 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
8 from sentry_sdk.integrations import Integration
9 from sentry_sdk.integrations._wsgi import RequestExtractor, _filter_headers
10 from sentry_sdk.integrations.logging import ignore_logger
11
12 from sanic import Sanic
13 from sanic.router import Router
14 from sanic.handlers import ErrorHandler
15
16
17 class SanicIntegration(Integration):
18 identifier = "sanic"
19
20 @staticmethod
21 def setup_once():
22 if sys.version_info < (3, 7):
23 # Sanic is async. We better have contextvars or we're going to leak
24 # state between requests.
25 raise RuntimeError("The sanic integration for Sentry requires Python 3.7+")
26
27 # Sanic 0.8 and older creates a logger named "root" and puts a
28 # stringified version of every exception in there (without exc_info),
29 # which our error deduplication can't detect.
30 #
31 # https://github.com/huge-success/sanic/issues/1332
32 ignore_logger("root")
33
34 old_handle_request = Sanic.handle_request
35
36 async def sentry_handle_request(self, request, *args, **kwargs):
37 hub = Hub.current
38 if hub.get_integration(SanicIntegration) is None:
39 response = old_handle_request(self, request, *args, **kwargs)
40 else:
41 weak_request = weakref.ref(request)
42
43 with hub.push_scope() as scope:
44 scope.add_event_processor(_make_request_processor(weak_request))
45 response = old_handle_request(self, request, *args, **kwargs)
46 if isawaitable(response):
47 response = await response
48
49 return response
50
51 Sanic.handle_request = sentry_handle_request
52
53 old_router_get = Router.get
54
55 def sentry_router_get(self, request):
56 rv = old_router_get(self, request)
57 hub = Hub.current
58 if hub.get_integration(SanicIntegration) is not None:
59 with capture_internal_exceptions():
60 with hub.configure_scope() as scope:
61 scope.transaction = rv[0].__name__
62 return rv
63
64 Router.get = sentry_router_get
65
66 old_error_handler_lookup = ErrorHandler.lookup
67
68 def sentry_error_handler_lookup(self, exception):
69 _capture_exception(exception)
70 old_error_handler = old_error_handler_lookup(self, exception)
71
72 if old_error_handler is None:
73 return None
74
75 if Hub.current.get_integration(SanicIntegration) is None:
76 return old_error_handler
77
78 async def sentry_wrapped_error_handler(request, exception):
79 try:
80 response = old_error_handler(request, exception)
81 if isawaitable(response):
82 response = await response
83 return response
84 except Exception:
85 exc_info = sys.exc_info()
86 _capture_exception(exc_info)
87 reraise(*exc_info)
88
89 return sentry_wrapped_error_handler
90
91 ErrorHandler.lookup = sentry_error_handler_lookup
92
93
94 def _capture_exception(exception):
95 hub = Hub.current
96 integration = hub.get_integration(SanicIntegration)
97 if integration is None:
98 return
99
100 with capture_internal_exceptions():
101 event, hint = event_from_exception(
102 exception,
103 client_options=hub.client.options,
104 mechanism={"type": "sanic", "handled": False},
105 )
106 hub.capture_event(event, hint=hint)
107
108
109 def _make_request_processor(weak_request):
110 def sanic_processor(event, hint):
111 request = weak_request()
112 if request is None:
113 return event
114
115 with capture_internal_exceptions():
116 extractor = SanicRequestExtractor(request)
117 extractor.extract_into_event(event)
118
119 request_info = event["request"]
120 if "query_string" not in request_info:
121 request_info["query_string"] = extractor.urlparts.query
122
123 if "method" not in request_info:
124 request_info["method"] = request.method
125
126 if "env" not in request_info:
127 request_info["env"] = {"REMOTE_ADDR": request.remote_addr}
128
129 if "headers" not in request_info:
130 request_info["headers"] = _filter_headers(dict(request.headers))
131
132 return event
133
134 return sanic_processor
135
136
137 class SanicRequestExtractor(RequestExtractor):
138 def __init__(self, request):
139 RequestExtractor.__init__(self, request)
140 self.urlparts = urlparse.urlsplit(self.request.url)
141
142 def content_length(self):
143 if self.request.body is None:
144 return 0
145 return len(self.request.body)
146
147 def url(self):
148 return "%s://%s%s" % (
149 self.urlparts.scheme,
150 self.urlparts.netloc,
151 self.urlparts.path,
152 )
153
154 def cookies(self):
155 return dict(self.request.cookies)
156
157 def raw_data(self):
158 return self.request.body
159
160 def form(self):
161 return self.request.form
162
163 def is_json(self):
164 raise NotImplementedError()
165
166 def json(self):
167 return self.request.json
168
169 def files(self):
170 return self.request.files
171
172 def size_of_file(self, file):
173 return len(file.body or ())
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py
--- a/sentry_sdk/integrations/sanic.py
+++ b/sentry_sdk/integrations/sanic.py
@@ -36,17 +36,19 @@
async def sentry_handle_request(self, request, *args, **kwargs):
hub = Hub.current
if hub.get_integration(SanicIntegration) is None:
- response = old_handle_request(self, request, *args, **kwargs)
- else:
- weak_request = weakref.ref(request)
+ return old_handle_request(self, request, *args, **kwargs)
+
+ weak_request = weakref.ref(request)
- with hub.push_scope() as scope:
+ with Hub(hub) as hub:
+ with hub.configure_scope() as scope:
scope.add_event_processor(_make_request_processor(weak_request))
- response = old_handle_request(self, request, *args, **kwargs)
- if isawaitable(response):
- response = await response
- return response
+ response = old_handle_request(self, request, *args, **kwargs)
+ if isawaitable(response):
+ response = await response
+
+ return response
Sanic.handle_request = sentry_handle_request
| {"golden_diff": "diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py\n--- a/sentry_sdk/integrations/sanic.py\n+++ b/sentry_sdk/integrations/sanic.py\n@@ -36,17 +36,19 @@\n async def sentry_handle_request(self, request, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n- response = old_handle_request(self, request, *args, **kwargs)\n- else:\n- weak_request = weakref.ref(request)\n+ return old_handle_request(self, request, *args, **kwargs)\n+\n+ weak_request = weakref.ref(request)\n \n- with hub.push_scope() as scope:\n+ with Hub(hub) as hub:\n+ with hub.configure_scope() as scope:\n scope.add_event_processor(_make_request_processor(weak_request))\n- response = old_handle_request(self, request, *args, **kwargs)\n- if isawaitable(response):\n- response = await response\n \n- return response\n+ response = old_handle_request(self, request, *args, **kwargs)\n+ if isawaitable(response):\n+ response = await response\n+\n+ return response\n \n Sanic.handle_request = sentry_handle_request\n", "issue": "asyncio concurrency issues\n@mitsuhiko discovered that passing around hubs does not work at all and we likely leak state between requests\n", "before_files": [{"content": "import sys\nimport weakref\nfrom inspect import isawaitable\n\nfrom sentry_sdk._compat import urlparse, reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi import RequestExtractor, _filter_headers\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom sanic import Sanic\nfrom sanic.router import Router\nfrom sanic.handlers import ErrorHandler\n\n\nclass SanicIntegration(Integration):\n identifier = \"sanic\"\n\n @staticmethod\n def setup_once():\n if sys.version_info < (3, 7):\n # Sanic is async. We better have contextvars or we're going to leak\n # state between requests.\n raise RuntimeError(\"The sanic integration for Sentry requires Python 3.7+\")\n\n # Sanic 0.8 and older creates a logger named \"root\" and puts a\n # stringified version of every exception in there (without exc_info),\n # which our error deduplication can't detect.\n #\n # https://github.com/huge-success/sanic/issues/1332\n ignore_logger(\"root\")\n\n old_handle_request = Sanic.handle_request\n\n async def sentry_handle_request(self, request, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n response = old_handle_request(self, request, *args, **kwargs)\n else:\n weak_request = weakref.ref(request)\n\n with hub.push_scope() as scope:\n scope.add_event_processor(_make_request_processor(weak_request))\n response = old_handle_request(self, request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n return response\n\n Sanic.handle_request = sentry_handle_request\n\n old_router_get = Router.get\n\n def sentry_router_get(self, request):\n rv = old_router_get(self, request)\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n scope.transaction = rv[0].__name__\n return rv\n\n Router.get = sentry_router_get\n\n old_error_handler_lookup = ErrorHandler.lookup\n\n def sentry_error_handler_lookup(self, exception):\n _capture_exception(exception)\n old_error_handler = old_error_handler_lookup(self, exception)\n\n if old_error_handler is None:\n return None\n\n if Hub.current.get_integration(SanicIntegration) is None:\n return old_error_handler\n\n async def sentry_wrapped_error_handler(request, exception):\n try:\n response = old_error_handler(request, exception)\n if isawaitable(response):\n response = await response\n return response\n except Exception:\n exc_info = sys.exc_info()\n _capture_exception(exc_info)\n reraise(*exc_info)\n\n return sentry_wrapped_error_handler\n\n ErrorHandler.lookup = sentry_error_handler_lookup\n\n\ndef _capture_exception(exception):\n hub = Hub.current\n integration = hub.get_integration(SanicIntegration)\n if integration is None:\n return\n\n with capture_internal_exceptions():\n event, hint = event_from_exception(\n exception,\n client_options=hub.client.options,\n mechanism={\"type\": \"sanic\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _make_request_processor(weak_request):\n def sanic_processor(event, hint):\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n extractor = SanicRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n if \"query_string\" not in request_info:\n request_info[\"query_string\"] = extractor.urlparts.query\n\n if \"method\" not in request_info:\n request_info[\"method\"] = request.method\n\n if \"env\" not in request_info:\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_addr}\n\n if \"headers\" not in request_info:\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return sanic_processor\n\n\nclass SanicRequestExtractor(RequestExtractor):\n def __init__(self, request):\n RequestExtractor.__init__(self, request)\n self.urlparts = urlparse.urlsplit(self.request.url)\n\n def content_length(self):\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def url(self):\n return \"%s://%s%s\" % (\n self.urlparts.scheme,\n self.urlparts.netloc,\n self.urlparts.path,\n )\n\n def cookies(self):\n return dict(self.request.cookies)\n\n def raw_data(self):\n return self.request.body\n\n def form(self):\n return self.request.form\n\n def is_json(self):\n raise NotImplementedError()\n\n def json(self):\n return self.request.json\n\n def files(self):\n return self.request.files\n\n def size_of_file(self, file):\n return len(file.body or ())\n", "path": "sentry_sdk/integrations/sanic.py"}], "after_files": [{"content": "import sys\nimport weakref\nfrom inspect import isawaitable\n\nfrom sentry_sdk._compat import urlparse, reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi import RequestExtractor, _filter_headers\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom sanic import Sanic\nfrom sanic.router import Router\nfrom sanic.handlers import ErrorHandler\n\n\nclass SanicIntegration(Integration):\n identifier = \"sanic\"\n\n @staticmethod\n def setup_once():\n if sys.version_info < (3, 7):\n # Sanic is async. We better have contextvars or we're going to leak\n # state between requests.\n raise RuntimeError(\"The sanic integration for Sentry requires Python 3.7+\")\n\n # Sanic 0.8 and older creates a logger named \"root\" and puts a\n # stringified version of every exception in there (without exc_info),\n # which our error deduplication can't detect.\n #\n # https://github.com/huge-success/sanic/issues/1332\n ignore_logger(\"root\")\n\n old_handle_request = Sanic.handle_request\n\n async def sentry_handle_request(self, request, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n return old_handle_request(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.add_event_processor(_make_request_processor(weak_request))\n\n response = old_handle_request(self, request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n return response\n\n Sanic.handle_request = sentry_handle_request\n\n old_router_get = Router.get\n\n def sentry_router_get(self, request):\n rv = old_router_get(self, request)\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n scope.transaction = rv[0].__name__\n return rv\n\n Router.get = sentry_router_get\n\n old_error_handler_lookup = ErrorHandler.lookup\n\n def sentry_error_handler_lookup(self, exception):\n _capture_exception(exception)\n old_error_handler = old_error_handler_lookup(self, exception)\n\n if old_error_handler is None:\n return None\n\n if Hub.current.get_integration(SanicIntegration) is None:\n return old_error_handler\n\n async def sentry_wrapped_error_handler(request, exception):\n try:\n response = old_error_handler(request, exception)\n if isawaitable(response):\n response = await response\n return response\n except Exception:\n exc_info = sys.exc_info()\n _capture_exception(exc_info)\n reraise(*exc_info)\n\n return sentry_wrapped_error_handler\n\n ErrorHandler.lookup = sentry_error_handler_lookup\n\n\ndef _capture_exception(exception):\n hub = Hub.current\n integration = hub.get_integration(SanicIntegration)\n if integration is None:\n return\n\n with capture_internal_exceptions():\n event, hint = event_from_exception(\n exception,\n client_options=hub.client.options,\n mechanism={\"type\": \"sanic\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _make_request_processor(weak_request):\n def sanic_processor(event, hint):\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n extractor = SanicRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n if \"query_string\" not in request_info:\n request_info[\"query_string\"] = extractor.urlparts.query\n\n if \"method\" not in request_info:\n request_info[\"method\"] = request.method\n\n if \"env\" not in request_info:\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_addr}\n\n if \"headers\" not in request_info:\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return sanic_processor\n\n\nclass SanicRequestExtractor(RequestExtractor):\n def __init__(self, request):\n RequestExtractor.__init__(self, request)\n self.urlparts = urlparse.urlsplit(self.request.url)\n\n def content_length(self):\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def url(self):\n return \"%s://%s%s\" % (\n self.urlparts.scheme,\n self.urlparts.netloc,\n self.urlparts.path,\n )\n\n def cookies(self):\n return dict(self.request.cookies)\n\n def raw_data(self):\n return self.request.body\n\n def form(self):\n return self.request.form\n\n def is_json(self):\n raise NotImplementedError()\n\n def json(self):\n return self.request.json\n\n def files(self):\n return self.request.files\n\n def size_of_file(self, file):\n return len(file.body or ())\n", "path": "sentry_sdk/integrations/sanic.py"}]} | 1,830 | 285 |
gh_patches_debug_14733 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3271 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add openAPI Specification for /database_roles/ endpoint
Generate spec for `database_roles` endpoint
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/openapi.py`
Content:
```
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -1,9 +1,16 @@
def custom_preprocessing_hook(endpoints):
- filtered = []
- for (path, path_regex, method, callback) in endpoints:
- # Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
- filtered.append((path, path_regex, method, callback))
+ prefixes = [
+ "/api/db/v0/databases/",
+ "/api/db/v0/data_files/",
+ "/api/db/v0/schemas/",
+ "/api/db/v0/tables/",
+ "/api/db/v0/links/",
+ "/api/db/v0/queries/",
+ "/api/ui/v0/databases/",
+ "/api/ui/v0/users/",
+ "/api/ui/v0/database_roles/"
+ ]
+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]
return filtered
| {"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -1,9 +1,16 @@\n def custom_preprocessing_hook(endpoints):\n- filtered = []\n- for (path, path_regex, method, callback) in endpoints:\n- # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n- filtered.append((path, path_regex, method, callback))\n+ prefixes = [\n+ \"/api/db/v0/databases/\",\n+ \"/api/db/v0/data_files/\",\n+ \"/api/db/v0/schemas/\",\n+ \"/api/db/v0/tables/\",\n+ \"/api/db/v0/links/\",\n+ \"/api/db/v0/queries/\",\n+ \"/api/ui/v0/databases/\",\n+ \"/api/ui/v0/users/\",\n+ \"/api/ui/v0/database_roles/\"\n+ ]\n+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n", "issue": "Add openAPI Specification for /database_roles/ endpoint\nGenerate spec for `database_roles` endpoint\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}], "after_files": [{"content": "def custom_preprocessing_hook(endpoints):\n prefixes = [\n \"/api/db/v0/databases/\",\n \"/api/db/v0/data_files/\",\n \"/api/db/v0/schemas/\",\n \"/api/db/v0/tables/\",\n \"/api/db/v0/links/\",\n \"/api/db/v0/queries/\",\n \"/api/ui/v0/databases/\",\n \"/api/ui/v0/users/\",\n \"/api/ui/v0/database_roles/\"\n ]\n filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]} | 541 | 281 |
gh_patches_debug_35341 | rasdani/github-patches | git_diff | google__mobly-170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Snippets are not compatible with UIAutomator
'am instrument' only sets up a UiAutomationConnection in -w mode (I don't know why). Snippets are not run in wait mode, so UiAutomationConnection is null. This crashes uiautomator, which needs that proxy object for privileged operations back to the shell.
We need to start the snippets in -w mode, using something like `start_standing_subprocess`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mobly/controllers/android_device_lib/snippet_client.py`
Content:
```
1 #/usr/bin/env python3.4
2 #
3 # Copyright 2016 Google Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 """JSON RPC interface to Mobly Snippet Lib."""
17 import logging
18 import re
19
20 from mobly.controllers.android_device_lib import adb
21 from mobly.controllers.android_device_lib import jsonrpc_client_base
22
23 _INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'
24
25 _LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
26
27 _STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
28
29
30 class Error(Exception):
31 pass
32
33
34 class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
35 """A client for interacting with snippet APKs using Mobly Snippet Lib.
36
37 See superclass documentation for a list of public attributes.
38 """
39
40 def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()):
41 """Initializes a SnippetClient.
42
43 Args:
44 package: (str) The package name of the apk where the snippets are
45 defined.
46 host_port: (int) The port at which to start the snippet client. Note
47 that the same port will currently be used for both the
48 device and host side of the connection.
49 adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.
50 """
51 # TODO(adorokhine): Don't assume that a free host-side port is free on
52 # the device as well. Both sides should allocate a unique port.
53 super(SnippetClient, self).__init__(
54 host_port=host_port,
55 device_port=host_port,
56 app_name=package,
57 adb_proxy=adb_proxy,
58 log=log)
59 self.package = package
60 self.log = log
61 self._serial = self._adb.serial
62
63 def _do_start_app(self):
64 """Overrides superclass."""
65 cmd = _LAUNCH_CMD % (self.device_port, self.package)
66 # Use info here so people know exactly what's happening here, which is
67 # helpful since they need to create their own instrumentations and
68 # manifest.
69 self.log.info('Launching snippet apk %s', self.package)
70 self._adb.shell(cmd)
71
72 def stop_app(self):
73 """Overrides superclass."""
74 cmd = _STOP_CMD % self.package
75 self.log.debug('Stopping snippet apk %s', self.package)
76 out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')
77 if 'OK (0 tests)' not in out:
78 raise Error('Failed to stop existing apk. Unexpected output: %s' %
79 out)
80
81 def check_app_installed(self):
82 """Overrides superclass."""
83 # Check that the Mobly Snippet app is installed.
84 if not self._adb_grep_wrapper(
85 r'pm list package | tr -d "\r" | grep "^package:%s$"' %
86 self.package):
87 raise jsonrpc_client_base.AppStartError(
88 '%s is not installed on %s' % (self.package, self._serial))
89 # Check that the app is instrumented.
90 out = self._adb_grep_wrapper(
91 r'pm list instrumentation | tr -d "\r" | grep ^instrumentation:%s/%s'
92 % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE))
93 if not out:
94 raise jsonrpc_client_base.AppStartError(
95 '%s is installed on %s, but it is not instrumented.' %
96 (self.package, self._serial))
97 match = re.search(r'^instrumentation:(.*)\/(.*) \(target=(.*)\)$', out)
98 target_name = match.group(3)
99 # Check that the instrumentation target is installed if it's not the
100 # same as the snippet package.
101 if target_name != self.package:
102 out = self._adb_grep_wrapper(
103 r'pm list package | tr -d "\r" | grep ^package:%s$' %
104 target_name)
105 if not out:
106 raise jsonrpc_client_base.AppStartError(
107 'Instrumentation target %s is not installed on %s' %
108 (target_name, self._serial))
109
110 def _start_event_client(self):
111 event_client = SnippetClient(
112 package=self.package,
113 host_port=self.host_port,
114 adb_proxy=self._adb,
115 log=self.log)
116 event_client.connect(self.uid,
117 jsonrpc_client_base.JsonRpcCommand.CONTINUE)
118 return event_client
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -17,14 +17,18 @@
import logging
import re
-from mobly.controllers.android_device_lib import adb
+from mobly import utils
from mobly.controllers.android_device_lib import jsonrpc_client_base
-_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'
+_INSTRUMENTATION_RUNNER_PACKAGE = (
+ 'com.google.android.mobly.snippet.SnippetRunner')
-_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
+_LAUNCH_CMD = (
+ 'am instrument -w -e action start -e port %s %s/' +
+ _INSTRUMENTATION_RUNNER_PACKAGE)
-_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE
+_STOP_CMD = (
+ 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
class Error(Exception):
@@ -59,6 +63,7 @@
self.package = package
self.log = log
self._serial = self._adb.serial
+ self._proc = None
def _do_start_app(self):
"""Overrides superclass."""
@@ -67,11 +72,18 @@
# helpful since they need to create their own instrumentations and
# manifest.
self.log.info('Launching snippet apk %s', self.package)
- self._adb.shell(cmd)
+ adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd]
+ self._proc = utils.start_standing_subprocess(adb_cmd, shell=False)
def stop_app(self):
"""Overrides superclass."""
- cmd = _STOP_CMD % self.package
+ # Kill the pending 'adb shell am instrument -w' process if there is one.
+ # Although killing the snippet apk would abort this process anyway, we
+ # want to call stop_standing_subprocess() to perform a health check,
+ # print the failure stack trace if there was any, and reap it from the
+ # process table.
+ if self._proc:
+ utils.stop_standing_subprocess(self._proc)
self.log.debug('Stopping snippet apk %s', self.package)
out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')
if 'OK (0 tests)' not in out:
| {"golden_diff": "diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py\n--- a/mobly/controllers/android_device_lib/snippet_client.py\n+++ b/mobly/controllers/android_device_lib/snippet_client.py\n@@ -17,14 +17,18 @@\n import logging\n import re\n \n-from mobly.controllers.android_device_lib import adb\n+from mobly import utils\n from mobly.controllers.android_device_lib import jsonrpc_client_base\n \n-_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'\n+_INSTRUMENTATION_RUNNER_PACKAGE = (\n+ 'com.google.android.mobly.snippet.SnippetRunner')\n \n-_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n+_LAUNCH_CMD = (\n+ 'am instrument -w -e action start -e port %s %s/' +\n+ _INSTRUMENTATION_RUNNER_PACKAGE)\n \n-_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n+_STOP_CMD = (\n+ 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n \n \n class Error(Exception):\n@@ -59,6 +63,7 @@\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n+ self._proc = None\n \n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n@@ -67,11 +72,18 @@\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n- self._adb.shell(cmd)\n+ adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd]\n+ self._proc = utils.start_standing_subprocess(adb_cmd, shell=False)\n \n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n- cmd = _STOP_CMD % self.package\n+ # Kill the pending 'adb shell am instrument -w' process if there is one.\n+ # Although killing the snippet apk would abort this process anyway, we\n+ # want to call stop_standing_subprocess() to perform a health check,\n+ # print the failure stack trace if there was any, and reap it from the\n+ # process table.\n+ if self._proc:\n+ utils.stop_standing_subprocess(self._proc)\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n", "issue": "Snippets are not compatible with UIAutomator\n'am instrument' only sets up a UiAutomationConnection in -w mode (I don't know why). Snippets are not run in wait mode, so UiAutomationConnection is null. This crashes uiautomator, which needs that proxy object for privileged operations back to the shell.\r\n\r\nWe need to start the snippets in -w mode, using something like `start_standing_subprocess`.\n", "before_files": [{"content": "#/usr/bin/env python3.4\n#\n# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"JSON RPC interface to Mobly Snippet Lib.\"\"\"\nimport logging\nimport re\n\nfrom mobly.controllers.android_device_lib import adb\nfrom mobly.controllers.android_device_lib import jsonrpc_client_base\n\n_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'\n\n_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n\n_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n\n\nclass Error(Exception):\n pass\n\n\nclass SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \"\"\"A client for interacting with snippet APKs using Mobly Snippet Lib.\n\n See superclass documentation for a list of public attributes.\n \"\"\"\n\n def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()):\n \"\"\"Initializes a SnippetClient.\n \n Args:\n package: (str) The package name of the apk where the snippets are\n defined.\n host_port: (int) The port at which to start the snippet client. Note\n that the same port will currently be used for both the\n device and host side of the connection.\n adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.\n \"\"\"\n # TODO(adorokhine): Don't assume that a free host-side port is free on\n # the device as well. Both sides should allocate a unique port.\n super(SnippetClient, self).__init__(\n host_port=host_port,\n device_port=host_port,\n app_name=package,\n adb_proxy=adb_proxy,\n log=log)\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n\n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _LAUNCH_CMD % (self.device_port, self.package)\n # Use info here so people know exactly what's happening here, which is\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n self._adb.shell(cmd)\n\n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _STOP_CMD % self.package\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n raise Error('Failed to stop existing apk. Unexpected output: %s' %\n out)\n\n def check_app_installed(self):\n \"\"\"Overrides superclass.\"\"\"\n # Check that the Mobly Snippet app is installed.\n if not self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep \"^package:%s$\"' %\n self.package):\n raise jsonrpc_client_base.AppStartError(\n '%s is not installed on %s' % (self.package, self._serial))\n # Check that the app is instrumented.\n out = self._adb_grep_wrapper(\n r'pm list instrumentation | tr -d \"\\r\" | grep ^instrumentation:%s/%s'\n % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE))\n if not out:\n raise jsonrpc_client_base.AppStartError(\n '%s is installed on %s, but it is not instrumented.' %\n (self.package, self._serial))\n match = re.search(r'^instrumentation:(.*)\\/(.*) \\(target=(.*)\\)$', out)\n target_name = match.group(3)\n # Check that the instrumentation target is installed if it's not the\n # same as the snippet package.\n if target_name != self.package:\n out = self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep ^package:%s$' %\n target_name)\n if not out:\n raise jsonrpc_client_base.AppStartError(\n 'Instrumentation target %s is not installed on %s' %\n (target_name, self._serial))\n\n def _start_event_client(self):\n event_client = SnippetClient(\n package=self.package,\n host_port=self.host_port,\n adb_proxy=self._adb,\n log=self.log)\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n", "path": "mobly/controllers/android_device_lib/snippet_client.py"}], "after_files": [{"content": "#/usr/bin/env python3.4\n#\n# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"JSON RPC interface to Mobly Snippet Lib.\"\"\"\nimport logging\nimport re\n\nfrom mobly import utils\nfrom mobly.controllers.android_device_lib import jsonrpc_client_base\n\n_INSTRUMENTATION_RUNNER_PACKAGE = (\n 'com.google.android.mobly.snippet.SnippetRunner')\n\n_LAUNCH_CMD = (\n 'am instrument -w -e action start -e port %s %s/' +\n _INSTRUMENTATION_RUNNER_PACKAGE)\n\n_STOP_CMD = (\n 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n\n\nclass Error(Exception):\n pass\n\n\nclass SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \"\"\"A client for interacting with snippet APKs using Mobly Snippet Lib.\n\n See superclass documentation for a list of public attributes.\n \"\"\"\n\n def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()):\n \"\"\"Initializes a SnippetClient.\n \n Args:\n package: (str) The package name of the apk where the snippets are\n defined.\n host_port: (int) The port at which to start the snippet client. Note\n that the same port will currently be used for both the\n device and host side of the connection.\n adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.\n \"\"\"\n # TODO(adorokhine): Don't assume that a free host-side port is free on\n # the device as well. Both sides should allocate a unique port.\n super(SnippetClient, self).__init__(\n host_port=host_port,\n device_port=host_port,\n app_name=package,\n adb_proxy=adb_proxy,\n log=log)\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n self._proc = None\n\n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _LAUNCH_CMD % (self.device_port, self.package)\n # Use info here so people know exactly what's happening here, which is\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd]\n self._proc = utils.start_standing_subprocess(adb_cmd, shell=False)\n\n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n # Kill the pending 'adb shell am instrument -w' process if there is one.\n # Although killing the snippet apk would abort this process anyway, we\n # want to call stop_standing_subprocess() to perform a health check,\n # print the failure stack trace if there was any, and reap it from the\n # process table.\n if self._proc:\n utils.stop_standing_subprocess(self._proc)\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n raise Error('Failed to stop existing apk. Unexpected output: %s' %\n out)\n\n def check_app_installed(self):\n \"\"\"Overrides superclass.\"\"\"\n # Check that the Mobly Snippet app is installed.\n if not self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep \"^package:%s$\"' %\n self.package):\n raise jsonrpc_client_base.AppStartError(\n '%s is not installed on %s' % (self.package, self._serial))\n # Check that the app is instrumented.\n out = self._adb_grep_wrapper(\n r'pm list instrumentation | tr -d \"\\r\" | grep ^instrumentation:%s/%s'\n % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE))\n if not out:\n raise jsonrpc_client_base.AppStartError(\n '%s is installed on %s, but it is not instrumented.' %\n (self.package, self._serial))\n match = re.search(r'^instrumentation:(.*)\\/(.*) \\(target=(.*)\\)$', out)\n target_name = match.group(3)\n # Check that the instrumentation target is installed if it's not the\n # same as the snippet package.\n if target_name != self.package:\n out = self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep ^package:%s$' %\n target_name)\n if not out:\n raise jsonrpc_client_base.AppStartError(\n 'Instrumentation target %s is not installed on %s' %\n (target_name, self._serial))\n\n def _start_event_client(self):\n event_client = SnippetClient(\n package=self.package,\n host_port=self.host_port,\n adb_proxy=self._adb,\n log=self.log)\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n", "path": "mobly/controllers/android_device_lib/snippet_client.py"}]} | 1,704 | 596 |
gh_patches_debug_57176 | rasdani/github-patches | git_diff | celery__celery-4037 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
celery.contrib.sphinx fails with Sphinx 1.6.1
When using the `celery.contrib.sphinx` extension with Sphinx 1.6.1 with Celery 4.0.2 the following occurs:
```
Exception occurred:
File "/home/ubuntu/virtualenvs/venv-system/lib/python2.7/site-packages/celery/contrib/sphinx.py", line 72, in setup
app.domains['py'].directives['task'] = TaskDirective
AttributeError: 'Sphinx' object has no attribute 'domains'
The full traceback has been saved in /tmp/sphinx-err-oOWabx.log, if you want to report the issue to the developers.
Please also report this if it was a user error, so that a better error message can be provided next time.
A bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!
make: *** [html] Error 1
```
The `domains` property seems to have been removed in sphinx-doc/sphinx#3656 and I think this line needs to be replaced with the [`add_directive` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive) (or more likely the [`add_directive_to_domain` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive_to_domain)).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/contrib/sphinx.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Sphinx documentation plugin used to document tasks.
3
4 Introduction
5 ============
6
7 Usage
8 -----
9
10 Add the extension to your :file:`docs/conf.py` configuration module:
11
12 .. code-block:: python
13
14 extensions = (...,
15 'celery.contrib.sphinx')
16
17 If you'd like to change the prefix for tasks in reference documentation
18 then you can change the ``celery_task_prefix`` configuration value:
19
20 .. code-block:: python
21
22 celery_task_prefix = '(task)' # < default
23
24 With the extension installed `autodoc` will automatically find
25 task decorated objects and generate the correct (as well as
26 add a ``(task)`` prefix), and you can also refer to the tasks
27 using `:task:proj.tasks.add` syntax.
28
29 Use ``.. autotask::`` to manually document a task.
30 """
31 from __future__ import absolute_import, unicode_literals
32 from inspect import formatargspec
33 from sphinx.domains.python import PyModulelevel
34 from sphinx.ext.autodoc import FunctionDocumenter
35 from celery.app.task import BaseTask
36 from celery.five import getfullargspec
37
38
39 class TaskDocumenter(FunctionDocumenter):
40 """Document task definitions."""
41
42 objtype = 'task'
43 member_order = 11
44
45 @classmethod
46 def can_document_member(cls, member, membername, isattr, parent):
47 return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
48
49 def format_args(self):
50 wrapped = getattr(self.object, '__wrapped__', None)
51 if wrapped is not None:
52 argspec = getfullargspec(wrapped)
53 fmt = formatargspec(*argspec)
54 fmt = fmt.replace('\\', '\\\\')
55 return fmt
56 return ''
57
58 def document_members(self, all_members=False):
59 pass
60
61
62 class TaskDirective(PyModulelevel):
63 """Sphinx task directive."""
64
65 def get_signature_prefix(self, sig):
66 return self.env.config.celery_task_prefix
67
68
69 def setup(app):
70 """Setup Sphinx extension."""
71 app.add_autodocumenter(TaskDocumenter)
72 app.domains['py'].directives['task'] = TaskDirective
73 app.add_config_value('celery_task_prefix', '(task)', True)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py
--- a/celery/contrib/sphinx.py
+++ b/celery/contrib/sphinx.py
@@ -69,5 +69,5 @@
def setup(app):
"""Setup Sphinx extension."""
app.add_autodocumenter(TaskDocumenter)
- app.domains['py'].directives['task'] = TaskDirective
+ app.add_directive_to_domain('py', 'task', TaskDirective)
app.add_config_value('celery_task_prefix', '(task)', True)
| {"golden_diff": "diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py\n--- a/celery/contrib/sphinx.py\n+++ b/celery/contrib/sphinx.py\n@@ -69,5 +69,5 @@\n def setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n- app.domains['py'].directives['task'] = TaskDirective\n+ app.add_directive_to_domain('py', 'task', TaskDirective)\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "issue": "celery.contrib.sphinx fails with Sphinx 1.6.1\nWhen using the `celery.contrib.sphinx` extension with Sphinx 1.6.1 with Celery 4.0.2 the following occurs:\r\n\r\n```\r\nException occurred:\r\n File \"/home/ubuntu/virtualenvs/venv-system/lib/python2.7/site-packages/celery/contrib/sphinx.py\", line 72, in setup\r\n app.domains['py'].directives['task'] = TaskDirective\r\nAttributeError: 'Sphinx' object has no attribute 'domains'\r\nThe full traceback has been saved in /tmp/sphinx-err-oOWabx.log, if you want to report the issue to the developers.\r\nPlease also report this if it was a user error, so that a better error message can be provided next time.\r\nA bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!\r\nmake: *** [html] Error 1\r\n```\r\n\r\nThe `domains` property seems to have been removed in sphinx-doc/sphinx#3656 and I think this line needs to be replaced with the [`add_directive` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive) (or more likely the [`add_directive_to_domain` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive_to_domain)).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Sphinx documentation plugin used to document tasks.\n\nIntroduction\n============\n\nUsage\n-----\n\nAdd the extension to your :file:`docs/conf.py` configuration module:\n\n.. code-block:: python\n\n extensions = (...,\n 'celery.contrib.sphinx')\n\nIf you'd like to change the prefix for tasks in reference documentation\nthen you can change the ``celery_task_prefix`` configuration value:\n\n.. code-block:: python\n\n celery_task_prefix = '(task)' # < default\n\nWith the extension installed `autodoc` will automatically find\ntask decorated objects and generate the correct (as well as\nadd a ``(task)`` prefix), and you can also refer to the tasks\nusing `:task:proj.tasks.add` syntax.\n\nUse ``.. autotask::`` to manually document a task.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom inspect import formatargspec\nfrom sphinx.domains.python import PyModulelevel\nfrom sphinx.ext.autodoc import FunctionDocumenter\nfrom celery.app.task import BaseTask\nfrom celery.five import getfullargspec\n\n\nclass TaskDocumenter(FunctionDocumenter):\n \"\"\"Document task definitions.\"\"\"\n\n objtype = 'task'\n member_order = 11\n\n @classmethod\n def can_document_member(cls, member, membername, isattr, parent):\n return isinstance(member, BaseTask) and getattr(member, '__wrapped__')\n\n def format_args(self):\n wrapped = getattr(self.object, '__wrapped__', None)\n if wrapped is not None:\n argspec = getfullargspec(wrapped)\n fmt = formatargspec(*argspec)\n fmt = fmt.replace('\\\\', '\\\\\\\\')\n return fmt\n return ''\n\n def document_members(self, all_members=False):\n pass\n\n\nclass TaskDirective(PyModulelevel):\n \"\"\"Sphinx task directive.\"\"\"\n\n def get_signature_prefix(self, sig):\n return self.env.config.celery_task_prefix\n\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n app.domains['py'].directives['task'] = TaskDirective\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "path": "celery/contrib/sphinx.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Sphinx documentation plugin used to document tasks.\n\nIntroduction\n============\n\nUsage\n-----\n\nAdd the extension to your :file:`docs/conf.py` configuration module:\n\n.. code-block:: python\n\n extensions = (...,\n 'celery.contrib.sphinx')\n\nIf you'd like to change the prefix for tasks in reference documentation\nthen you can change the ``celery_task_prefix`` configuration value:\n\n.. code-block:: python\n\n celery_task_prefix = '(task)' # < default\n\nWith the extension installed `autodoc` will automatically find\ntask decorated objects and generate the correct (as well as\nadd a ``(task)`` prefix), and you can also refer to the tasks\nusing `:task:proj.tasks.add` syntax.\n\nUse ``.. autotask::`` to manually document a task.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom inspect import formatargspec\nfrom sphinx.domains.python import PyModulelevel\nfrom sphinx.ext.autodoc import FunctionDocumenter\nfrom celery.app.task import BaseTask\nfrom celery.five import getfullargspec\n\n\nclass TaskDocumenter(FunctionDocumenter):\n \"\"\"Document task definitions.\"\"\"\n\n objtype = 'task'\n member_order = 11\n\n @classmethod\n def can_document_member(cls, member, membername, isattr, parent):\n return isinstance(member, BaseTask) and getattr(member, '__wrapped__')\n\n def format_args(self):\n wrapped = getattr(self.object, '__wrapped__', None)\n if wrapped is not None:\n argspec = getfullargspec(wrapped)\n fmt = formatargspec(*argspec)\n fmt = fmt.replace('\\\\', '\\\\\\\\')\n return fmt\n return ''\n\n def document_members(self, all_members=False):\n pass\n\n\nclass TaskDirective(PyModulelevel):\n \"\"\"Sphinx task directive.\"\"\"\n\n def get_signature_prefix(self, sig):\n return self.env.config.celery_task_prefix\n\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n app.add_directive_to_domain('py', 'task', TaskDirective)\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "path": "celery/contrib/sphinx.py"}]} | 1,197 | 127 |
gh_patches_debug_30866 | rasdani/github-patches | git_diff | sktime__sktime-3723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[MNT] PyPi publish action for 0.14 failed
Summary by @fkiraly:
The attempted 0.14.0 release has failed at pypi upload of successfully built wheels.
Help on diagnosing and solving this is appreciated.
Original post:
---
Just to let you know: 0.14 has not been published on PyPI yet:
https://github.com/sktime/sktime/actions/runs/3402037795
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """Install script for sktime."""
4
5 __author__ = ["mloning", "lmmentel"]
6
7 import codecs
8
9 import toml
10 from setuptools import find_packages, setup
11
12 pyproject = toml.load("pyproject.toml")
13
14
15 def long_description():
16 """Read and return README as long description."""
17 with codecs.open("README.md", encoding="utf-8-sig") as f:
18 return f.read()
19
20
21 # ground truth package metadata is loaded from pyproject.toml
22 # for context see:
23 # - [PEP 621 -- Storing project metadata in pyproject.toml]
24 # (https://www.python.org/dev/peps/pep-0621)
25 pyproject = toml.load("pyproject.toml")
26
27
28 def setup_package():
29 """Set up package."""
30 projectname = pyproject["project"]["name"]
31 setup(
32 author_email="[email protected]",
33 author=f"{projectname} developers",
34 classifiers=pyproject["project"]["classifiers"],
35 description=pyproject["project"]["description"],
36 download_url=pyproject["project"]["urls"]["download"],
37 extras_require=pyproject["project"]["optional-dependencies"],
38 include_package_data=True,
39 install_requires=pyproject["project"]["dependencies"],
40 keywords=pyproject["project"]["keywords"],
41 license=pyproject["project"]["license"],
42 long_description=long_description(),
43 maintainer_email="[email protected]",
44 maintainer=f"{projectname} developers",
45 name=projectname,
46 package_data={
47 "sktime": [
48 "*.csv",
49 "*.csv.gz",
50 "*.arff",
51 "*.arff.gz",
52 "*.txt",
53 "*.ts",
54 "*.tsv",
55 ]
56 },
57 packages=find_packages(
58 where=".",
59 exclude=["tests", "tests.*"],
60 ),
61 project_urls=pyproject["project"]["urls"],
62 python_requires=pyproject["project"]["requires-python"],
63 setup_requires=pyproject["build-system"]["requires"],
64 url=pyproject["project"]["urls"]["repository"],
65 version=pyproject["project"]["version"],
66 zip_safe=False,
67 )
68
69
70 if __name__ == "__main__":
71 setup_package()
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
deleted file mode 100644
--- a/setup.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-"""Install script for sktime."""
-
-__author__ = ["mloning", "lmmentel"]
-
-import codecs
-
-import toml
-from setuptools import find_packages, setup
-
-pyproject = toml.load("pyproject.toml")
-
-
-def long_description():
- """Read and return README as long description."""
- with codecs.open("README.md", encoding="utf-8-sig") as f:
- return f.read()
-
-
-# ground truth package metadata is loaded from pyproject.toml
-# for context see:
-# - [PEP 621 -- Storing project metadata in pyproject.toml]
-# (https://www.python.org/dev/peps/pep-0621)
-pyproject = toml.load("pyproject.toml")
-
-
-def setup_package():
- """Set up package."""
- projectname = pyproject["project"]["name"]
- setup(
- author_email="[email protected]",
- author=f"{projectname} developers",
- classifiers=pyproject["project"]["classifiers"],
- description=pyproject["project"]["description"],
- download_url=pyproject["project"]["urls"]["download"],
- extras_require=pyproject["project"]["optional-dependencies"],
- include_package_data=True,
- install_requires=pyproject["project"]["dependencies"],
- keywords=pyproject["project"]["keywords"],
- license=pyproject["project"]["license"],
- long_description=long_description(),
- maintainer_email="[email protected]",
- maintainer=f"{projectname} developers",
- name=projectname,
- package_data={
- "sktime": [
- "*.csv",
- "*.csv.gz",
- "*.arff",
- "*.arff.gz",
- "*.txt",
- "*.ts",
- "*.tsv",
- ]
- },
- packages=find_packages(
- where=".",
- exclude=["tests", "tests.*"],
- ),
- project_urls=pyproject["project"]["urls"],
- python_requires=pyproject["project"]["requires-python"],
- setup_requires=pyproject["build-system"]["requires"],
- url=pyproject["project"]["urls"]["repository"],
- version=pyproject["project"]["version"],
- zip_safe=False,
- )
-
-
-if __name__ == "__main__":
- setup_package()
| {"golden_diff": "diff --git a/setup.py b/setup.py\ndeleted file mode 100644\n--- a/setup.py\n+++ /dev/null\n@@ -1,71 +0,0 @@\n-#! /usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\"\"\"Install script for sktime.\"\"\"\n-\n-__author__ = [\"mloning\", \"lmmentel\"]\n-\n-import codecs\n-\n-import toml\n-from setuptools import find_packages, setup\n-\n-pyproject = toml.load(\"pyproject.toml\")\n-\n-\n-def long_description():\n- \"\"\"Read and return README as long description.\"\"\"\n- with codecs.open(\"README.md\", encoding=\"utf-8-sig\") as f:\n- return f.read()\n-\n-\n-# ground truth package metadata is loaded from pyproject.toml\n-# for context see:\n-# - [PEP 621 -- Storing project metadata in pyproject.toml]\n-# (https://www.python.org/dev/peps/pep-0621)\n-pyproject = toml.load(\"pyproject.toml\")\n-\n-\n-def setup_package():\n- \"\"\"Set up package.\"\"\"\n- projectname = pyproject[\"project\"][\"name\"]\n- setup(\n- author_email=\"[email protected]\",\n- author=f\"{projectname} developers\",\n- classifiers=pyproject[\"project\"][\"classifiers\"],\n- description=pyproject[\"project\"][\"description\"],\n- download_url=pyproject[\"project\"][\"urls\"][\"download\"],\n- extras_require=pyproject[\"project\"][\"optional-dependencies\"],\n- include_package_data=True,\n- install_requires=pyproject[\"project\"][\"dependencies\"],\n- keywords=pyproject[\"project\"][\"keywords\"],\n- license=pyproject[\"project\"][\"license\"],\n- long_description=long_description(),\n- maintainer_email=\"[email protected]\",\n- maintainer=f\"{projectname} developers\",\n- name=projectname,\n- package_data={\n- \"sktime\": [\n- \"*.csv\",\n- \"*.csv.gz\",\n- \"*.arff\",\n- \"*.arff.gz\",\n- \"*.txt\",\n- \"*.ts\",\n- \"*.tsv\",\n- ]\n- },\n- packages=find_packages(\n- where=\".\",\n- exclude=[\"tests\", \"tests.*\"],\n- ),\n- project_urls=pyproject[\"project\"][\"urls\"],\n- python_requires=pyproject[\"project\"][\"requires-python\"],\n- setup_requires=pyproject[\"build-system\"][\"requires\"],\n- url=pyproject[\"project\"][\"urls\"][\"repository\"],\n- version=pyproject[\"project\"][\"version\"],\n- zip_safe=False,\n- )\n-\n-\n-if __name__ == \"__main__\":\n- setup_package()\n", "issue": "[MNT] PyPi publish action for 0.14 failed\nSummary by @fkiraly:\r\nThe attempted 0.14.0 release has failed at pypi upload of successfully built wheels.\r\n\r\nHelp on diagnosing and solving this is appreciated.\r\n\r\nOriginal post:\r\n---\r\nJust to let you know: 0.14 has not been published on PyPI yet:\r\n\r\nhttps://github.com/sktime/sktime/actions/runs/3402037795\n", "before_files": [{"content": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Install script for sktime.\"\"\"\n\n__author__ = [\"mloning\", \"lmmentel\"]\n\nimport codecs\n\nimport toml\nfrom setuptools import find_packages, setup\n\npyproject = toml.load(\"pyproject.toml\")\n\n\ndef long_description():\n \"\"\"Read and return README as long description.\"\"\"\n with codecs.open(\"README.md\", encoding=\"utf-8-sig\") as f:\n return f.read()\n\n\n# ground truth package metadata is loaded from pyproject.toml\n# for context see:\n# - [PEP 621 -- Storing project metadata in pyproject.toml]\n# (https://www.python.org/dev/peps/pep-0621)\npyproject = toml.load(\"pyproject.toml\")\n\n\ndef setup_package():\n \"\"\"Set up package.\"\"\"\n projectname = pyproject[\"project\"][\"name\"]\n setup(\n author_email=\"[email protected]\",\n author=f\"{projectname} developers\",\n classifiers=pyproject[\"project\"][\"classifiers\"],\n description=pyproject[\"project\"][\"description\"],\n download_url=pyproject[\"project\"][\"urls\"][\"download\"],\n extras_require=pyproject[\"project\"][\"optional-dependencies\"],\n include_package_data=True,\n install_requires=pyproject[\"project\"][\"dependencies\"],\n keywords=pyproject[\"project\"][\"keywords\"],\n license=pyproject[\"project\"][\"license\"],\n long_description=long_description(),\n maintainer_email=\"[email protected]\",\n maintainer=f\"{projectname} developers\",\n name=projectname,\n package_data={\n \"sktime\": [\n \"*.csv\",\n \"*.csv.gz\",\n \"*.arff\",\n \"*.arff.gz\",\n \"*.txt\",\n \"*.ts\",\n \"*.tsv\",\n ]\n },\n packages=find_packages(\n where=\".\",\n exclude=[\"tests\", \"tests.*\"],\n ),\n project_urls=pyproject[\"project\"][\"urls\"],\n python_requires=pyproject[\"project\"][\"requires-python\"],\n setup_requires=pyproject[\"build-system\"][\"requires\"],\n url=pyproject[\"project\"][\"urls\"][\"repository\"],\n version=pyproject[\"project\"][\"version\"],\n zip_safe=False,\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n", "path": "setup.py"}], "after_files": [{"content": null, "path": "setup.py"}]} | 982 | 583 |
gh_patches_debug_24590 | rasdani/github-patches | git_diff | apluslms__a-plus-1216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Display personal deadline extensions on a student’s points page
A student’s points page in A+ just shows the official deadlines for each module. Personalized deadlines are not shown. This has repeatedly caused confusion when a student believes there’s something wrong with their personalized deadline. It would be better if the student was able to see the actual deadlines on their points page (and possibly elsewhere?).
This would also eliminate some confusion in communications between members of course staff, as staff members would also easily see the student’s DL extensions.
I’m not sure off the top of my head how best to display this on the page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `course/templatetags/course.py`
Content:
```
1 from typing import Any, Dict, List, Union
2
3 from django import template
4 from django.db import models
5 from django.utils.safestring import mark_safe
6 from django.utils.translation import get_language
7
8 from exercise.cache.content import CachedContent
9 from course.models import CourseInstance, UserTagging
10 from lib.localization_syntax import pick_localized
11 from userprofile.models import UserProfile
12 from ..cache.menu import CachedTopMenu
13
14
15 register = template.Library()
16
17
18 def _prepare_topmenu(context):
19 if 'topmenu' not in context:
20 request = context.get('request', None)
21 context['topmenu'] = CachedTopMenu(request.user if request else None)
22 return context['topmenu']
23
24
25 @register.inclusion_tag("course/_course_dropdown_menu.html", takes_context=True)
26 def course_menu(context):
27 menu = _prepare_topmenu(context)
28 return { "instances": menu.courses() }
29
30
31 @register.inclusion_tag('course/_group_select.html', takes_context=True)
32 def group_select(context):
33 instance = context.get('instance', None)
34 if not instance:
35 return { 'groups': [] }
36 menu = _prepare_topmenu(context)
37 groups, selected = menu.groups(instance)
38 return {
39 'instance': instance,
40 'groups': groups,
41 'selected': selected,
42 }
43
44
45 @register.filter
46 def escape_slashes(string):
47 return str(string).replace('/', '\/') # noqa: W605
48
49 @register.filter
50 def parse_localization(entry):
51 return pick_localized(entry, get_language())
52
53
54 @register.filter
55 def list_unselected(langs):
56 listed = list(filter(lambda x: x and x != get_language(), langs.split("|")))
57 return listed
58
59
60 @register.filter
61 def is_visible(entry):
62 return CachedContent.is_visible(entry)
63
64
65 @register.filter
66 def is_visible_to(entry, user):
67 return entry.is_visible_to(user)
68
69
70 @register.filter
71 def is_listed(entry):
72 return CachedContent.is_listed(entry)
73
74
75 @register.filter
76 def len_listed(entries):
77 return len([e for e in entries if CachedContent.is_listed(e)])
78
79
80 @register.filter
81 def is_in_maintenance(entry):
82 return CachedContent.is_in_maintenance(entry)
83
84
85 @register.filter
86 def exercises_open(entry, now):
87 return entry['opening_time'] <= now <= entry['closing_time']
88
89
90 @register.filter
91 def exercises_submittable(entry, now):
92 if entry['late_allowed']:
93 return entry['opening_time'] <= now <= entry['late_time']
94 return entry['opening_time'] <= now <= entry['closing_time']
95
96
97 @register.filter
98 def has_opened(entry, now):
99 return entry['opening_time'] <= now
100
101
102 @register.filter
103 def url(model_object, name=None):
104 if name:
105 return model_object.get_url(name)
106 return model_object.get_display_url()
107
108
109 @register.filter
110 def names(profiles):
111 return ", ".join(p.user.get_full_name() for p in profiles)
112
113
114 @register.inclusion_tag('course/_avatars.html')
115 def avatars(profiles):
116 return { 'profiles': profiles }
117
118
119 @register.inclusion_tag("course/_profiles.html")
120 def profiles(
121 profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]],
122 instance: CourseInstance,
123 is_teacher: bool
124 ) -> Dict[str, Any]:
125 if isinstance(profiles, UserProfile):
126 profiles = [profiles]
127 elif isinstance(profiles, models.QuerySet):
128 # Avoid re-fetching the queryset
129 profiles = list(profiles)
130 return {
131 'instance': instance,
132 'profiles': profiles,
133 'is_teacher': is_teacher,
134 }
135
136
137 @register.simple_tag
138 def tags(profile, instance):
139 tags = UserTagging.objects.get_all(profile, instance)
140 return mark_safe(' '.join(tag.html_label for tag in tags))
141
142
143 @register.filter
144 def enrollment_audience(enrollment_audience_val):
145 # convert enrollment audience Enum value to the string description
146 return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val]
147
148
149 @register.filter
150 def view_content_to(view_content_to_val):
151 # convert "view content to" Enum value to the string description
152 return CourseInstance.VIEW_ACCESS[view_content_to_val]
153
154
155 @register.filter
156 def is_banned_student(profile, course_instance):
157 return course_instance.is_banned(profile.user)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/course/templatetags/course.py b/course/templatetags/course.py
--- a/course/templatetags/course.py
+++ b/course/templatetags/course.py
@@ -1,4 +1,5 @@
from typing import Any, Dict, List, Union
+from copy import deepcopy
from django import template
from django.db import models
@@ -22,6 +23,11 @@
return context['topmenu']
+def _deadline_extended_exercise_open(entry, now):
+ personal_deadline = entry.get('personal_deadline')
+ return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline
+
+
@register.inclusion_tag("course/_course_dropdown_menu.html", takes_context=True)
def course_menu(context):
menu = _prepare_topmenu(context)
@@ -87,6 +93,17 @@
return entry['opening_time'] <= now <= entry['closing_time']
[email protected]
+def deadline_extended_exercise_open(entry, now):
+ return _deadline_extended_exercise_open(entry, now)
+
+
[email protected]
+def deadline_extended_exercises_open(entry, now):
+ entries = deepcopy(entry['flatted'])
+ return any(_deadline_extended_exercise_open(entry, now) for entry in entries)
+
+
@register.filter
def exercises_submittable(entry, now):
if entry['late_allowed']:
| {"golden_diff": "diff --git a/course/templatetags/course.py b/course/templatetags/course.py\n--- a/course/templatetags/course.py\n+++ b/course/templatetags/course.py\n@@ -1,4 +1,5 @@\n from typing import Any, Dict, List, Union\n+from copy import deepcopy\n \n from django import template\n from django.db import models\n@@ -22,6 +23,11 @@\n return context['topmenu']\n \n \n+def _deadline_extended_exercise_open(entry, now):\n+ personal_deadline = entry.get('personal_deadline')\n+ return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline\n+\n+\n @register.inclusion_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\n def course_menu(context):\n menu = _prepare_topmenu(context)\n@@ -87,6 +93,17 @@\n return entry['opening_time'] <= now <= entry['closing_time']\n \n \[email protected]\n+def deadline_extended_exercise_open(entry, now):\n+ return _deadline_extended_exercise_open(entry, now)\n+\n+\[email protected]\n+def deadline_extended_exercises_open(entry, now):\n+ entries = deepcopy(entry['flatted'])\n+ return any(_deadline_extended_exercise_open(entry, now) for entry in entries)\n+\n+\n @register.filter\n def exercises_submittable(entry, now):\n if entry['late_allowed']:\n", "issue": "Display personal deadline extensions on a student\u2019s points page\nA student\u2019s points page in A+ just shows the official deadlines for each module. Personalized deadlines are not shown. This has repeatedly caused confusion when a student believes there\u2019s something wrong with their personalized deadline. It would be better if the student was able to see the actual deadlines on their points page (and possibly elsewhere?). \r\n\r\nThis would also eliminate some confusion in communications between members of course staff, as staff members would also easily see the student\u2019s DL extensions. \r\n\r\nI\u2019m not sure off the top of my head how best to display this on the page.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Any, Dict, List, Union\n\nfrom django import template\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import get_language\n\nfrom exercise.cache.content import CachedContent\nfrom course.models import CourseInstance, UserTagging\nfrom lib.localization_syntax import pick_localized\nfrom userprofile.models import UserProfile\nfrom ..cache.menu import CachedTopMenu\n\n\nregister = template.Library()\n\n\ndef _prepare_topmenu(context):\n if 'topmenu' not in context:\n request = context.get('request', None)\n context['topmenu'] = CachedTopMenu(request.user if request else None)\n return context['topmenu']\n\n\[email protected]_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\ndef course_menu(context):\n menu = _prepare_topmenu(context)\n return { \"instances\": menu.courses() }\n\n\[email protected]_tag('course/_group_select.html', takes_context=True)\ndef group_select(context):\n instance = context.get('instance', None)\n if not instance:\n return { 'groups': [] }\n menu = _prepare_topmenu(context)\n groups, selected = menu.groups(instance)\n return {\n 'instance': instance,\n 'groups': groups,\n 'selected': selected,\n }\n\n\[email protected]\ndef escape_slashes(string):\n return str(string).replace('/', '\\/') # noqa: W605\n\[email protected]\ndef parse_localization(entry):\n return pick_localized(entry, get_language())\n\n\[email protected]\ndef list_unselected(langs):\n listed = list(filter(lambda x: x and x != get_language(), langs.split(\"|\")))\n return listed\n\n\[email protected]\ndef is_visible(entry):\n return CachedContent.is_visible(entry)\n\n\[email protected]\ndef is_visible_to(entry, user):\n return entry.is_visible_to(user)\n\n\[email protected]\ndef is_listed(entry):\n return CachedContent.is_listed(entry)\n\n\[email protected]\ndef len_listed(entries):\n return len([e for e in entries if CachedContent.is_listed(e)])\n\n\[email protected]\ndef is_in_maintenance(entry):\n return CachedContent.is_in_maintenance(entry)\n\n\[email protected]\ndef exercises_open(entry, now):\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef exercises_submittable(entry, now):\n if entry['late_allowed']:\n return entry['opening_time'] <= now <= entry['late_time']\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef has_opened(entry, now):\n return entry['opening_time'] <= now\n\n\[email protected]\ndef url(model_object, name=None):\n if name:\n return model_object.get_url(name)\n return model_object.get_display_url()\n\n\[email protected]\ndef names(profiles):\n return \", \".join(p.user.get_full_name() for p in profiles)\n\n\[email protected]_tag('course/_avatars.html')\ndef avatars(profiles):\n return { 'profiles': profiles }\n\n\[email protected]_tag(\"course/_profiles.html\")\ndef profiles(\n profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]],\n instance: CourseInstance,\n is_teacher: bool\n ) -> Dict[str, Any]:\n if isinstance(profiles, UserProfile):\n profiles = [profiles]\n elif isinstance(profiles, models.QuerySet):\n # Avoid re-fetching the queryset\n profiles = list(profiles)\n return {\n 'instance': instance,\n 'profiles': profiles,\n 'is_teacher': is_teacher,\n }\n\n\[email protected]_tag\ndef tags(profile, instance):\n tags = UserTagging.objects.get_all(profile, instance)\n return mark_safe(' '.join(tag.html_label for tag in tags))\n\n\[email protected]\ndef enrollment_audience(enrollment_audience_val):\n # convert enrollment audience Enum value to the string description\n return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val]\n\n\[email protected]\ndef view_content_to(view_content_to_val):\n # convert \"view content to\" Enum value to the string description\n return CourseInstance.VIEW_ACCESS[view_content_to_val]\n\n\[email protected]\ndef is_banned_student(profile, course_instance):\n return course_instance.is_banned(profile.user)\n", "path": "course/templatetags/course.py"}], "after_files": [{"content": "from typing import Any, Dict, List, Union\nfrom copy import deepcopy\n\nfrom django import template\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import get_language\n\nfrom exercise.cache.content import CachedContent\nfrom course.models import CourseInstance, UserTagging\nfrom lib.localization_syntax import pick_localized\nfrom userprofile.models import UserProfile\nfrom ..cache.menu import CachedTopMenu\n\n\nregister = template.Library()\n\n\ndef _prepare_topmenu(context):\n if 'topmenu' not in context:\n request = context.get('request', None)\n context['topmenu'] = CachedTopMenu(request.user if request else None)\n return context['topmenu']\n\n\ndef _deadline_extended_exercise_open(entry, now):\n personal_deadline = entry.get('personal_deadline')\n return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline\n\n\[email protected]_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\ndef course_menu(context):\n menu = _prepare_topmenu(context)\n return { \"instances\": menu.courses() }\n\n\[email protected]_tag('course/_group_select.html', takes_context=True)\ndef group_select(context):\n instance = context.get('instance', None)\n if not instance:\n return { 'groups': [] }\n menu = _prepare_topmenu(context)\n groups, selected = menu.groups(instance)\n return {\n 'instance': instance,\n 'groups': groups,\n 'selected': selected,\n }\n\n\[email protected]\ndef escape_slashes(string):\n return str(string).replace('/', '\\/') # noqa: W605\n\[email protected]\ndef parse_localization(entry):\n return pick_localized(entry, get_language())\n\n\[email protected]\ndef list_unselected(langs):\n listed = list(filter(lambda x: x and x != get_language(), langs.split(\"|\")))\n return listed\n\n\[email protected]\ndef is_visible(entry):\n return CachedContent.is_visible(entry)\n\n\[email protected]\ndef is_visible_to(entry, user):\n return entry.is_visible_to(user)\n\n\[email protected]\ndef is_listed(entry):\n return CachedContent.is_listed(entry)\n\n\[email protected]\ndef len_listed(entries):\n return len([e for e in entries if CachedContent.is_listed(e)])\n\n\[email protected]\ndef is_in_maintenance(entry):\n return CachedContent.is_in_maintenance(entry)\n\n\[email protected]\ndef exercises_open(entry, now):\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef deadline_extended_exercise_open(entry, now):\n return _deadline_extended_exercise_open(entry, now)\n\n\[email protected]\ndef deadline_extended_exercises_open(entry, now):\n entries = deepcopy(entry['flatted'])\n return any(_deadline_extended_exercise_open(entry, now) for entry in entries)\n\n\[email protected]\ndef exercises_submittable(entry, now):\n if entry['late_allowed']:\n return entry['opening_time'] <= now <= entry['late_time']\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef has_opened(entry, now):\n return entry['opening_time'] <= now\n\n\[email protected]\ndef url(model_object, name=None):\n if name:\n return model_object.get_url(name)\n return model_object.get_display_url()\n\n\[email protected]\ndef names(profiles):\n return \", \".join(p.user.get_full_name() for p in profiles)\n\n\[email protected]_tag('course/_avatars.html')\ndef avatars(profiles):\n return { 'profiles': profiles }\n\n\[email protected]_tag(\"course/_profiles.html\")\ndef profiles(\n profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]],\n instance: CourseInstance,\n is_teacher: bool\n ) -> Dict[str, Any]:\n if isinstance(profiles, UserProfile):\n profiles = [profiles]\n elif isinstance(profiles, models.QuerySet):\n # Avoid re-fetching the queryset\n profiles = list(profiles)\n return {\n 'instance': instance,\n 'profiles': profiles,\n 'is_teacher': is_teacher,\n }\n\n\[email protected]_tag\ndef tags(profile, instance):\n tags = UserTagging.objects.get_all(profile, instance)\n return mark_safe(' '.join(tag.html_label for tag in tags))\n\n\[email protected]\ndef enrollment_audience(enrollment_audience_val):\n # convert enrollment audience Enum value to the string description\n return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val]\n\n\[email protected]\ndef view_content_to(view_content_to_val):\n # convert \"view content to\" Enum value to the string description\n return CourseInstance.VIEW_ACCESS[view_content_to_val]\n\n\[email protected]\ndef is_banned_student(profile, course_instance):\n return course_instance.is_banned(profile.user)\n", "path": "course/templatetags/course.py"}]} | 1,700 | 310 |
gh_patches_debug_13156 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-5599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Added retry on internal error
Added retry on internal error as suggested by the internal error message: '500 Error encountered during execution. Retrying may solve the problem.'.
Rationalised the conditions structure so it would simplify addition of other retry conditions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bigquery/google/cloud/bigquery/retry.py`
Content:
```
1
2 # Copyright 2018 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from google.api_core import retry
17
18
19 def _should_retry(exc):
20 """Predicate for determining when to retry.
21
22 We retry if and only if the 'reason' is 'backendError'
23 or 'rateLimitExceeded'.
24 """
25 if not hasattr(exc, 'errors'):
26 return False
27 if len(exc.errors) == 0:
28 return False
29 reason = exc.errors[0]['reason']
30 return reason == 'backendError' or reason == 'rateLimitExceeded'
31
32
33 DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
34 """The default retry object.
35
36 Any method with a ``retry`` parameter will be retried automatically,
37 with reasonable defaults. To disable retry, pass ``retry=None``.
38 To modify the default retry behavior, call a ``with_XXX`` method
39 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
40 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
41 """
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bigquery/google/cloud/bigquery/retry.py b/bigquery/google/cloud/bigquery/retry.py
--- a/bigquery/google/cloud/bigquery/retry.py
+++ b/bigquery/google/cloud/bigquery/retry.py
@@ -16,6 +16,13 @@
from google.api_core import retry
+_RETRYABLE_REASONS = frozenset([
+ 'backendError',
+ 'rateLimitExceeded',
+ 'internalError',
+])
+
+
def _should_retry(exc):
"""Predicate for determining when to retry.
@@ -27,7 +34,7 @@
if len(exc.errors) == 0:
return False
reason = exc.errors[0]['reason']
- return reason == 'backendError' or reason == 'rateLimitExceeded'
+ return reason in _RETRYABLE_REASONS
DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
| {"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/retry.py b/bigquery/google/cloud/bigquery/retry.py\n--- a/bigquery/google/cloud/bigquery/retry.py\n+++ b/bigquery/google/cloud/bigquery/retry.py\n@@ -16,6 +16,13 @@\n from google.api_core import retry\n \n \n+_RETRYABLE_REASONS = frozenset([\n+ 'backendError',\n+ 'rateLimitExceeded',\n+ 'internalError',\n+])\n+\n+\n def _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n \n@@ -27,7 +34,7 @@\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n- return reason == 'backendError' or reason == 'rateLimitExceeded'\n+ return reason in _RETRYABLE_REASONS\n \n \n DEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n", "issue": "Added retry on internal error\nAdded retry on internal error as suggested by the internal error message: '500 Error encountered during execution. Retrying may solve the problem.'. \r\nRationalised the conditions structure so it would simplify addition of other retry conditions.\n", "before_files": [{"content": "\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import retry\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, 'errors'):\n return False\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n return reason == 'backendError' or reason == 'rateLimitExceeded'\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "bigquery/google/cloud/bigquery/retry.py"}], "after_files": [{"content": "\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import retry\n\n\n_RETRYABLE_REASONS = frozenset([\n 'backendError',\n 'rateLimitExceeded',\n 'internalError',\n])\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, 'errors'):\n return False\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "bigquery/google/cloud/bigquery/retry.py"}]} | 728 | 200 |
gh_patches_debug_32509 | rasdani/github-patches | git_diff | carpentries__amy-2107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make URL redirects from `?next` param safer
There are a couple places where we redirect to `?next` value without checking if it's safe. There's a `django.http.is_safe_url` function we could use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/autoemails/utils.py`
Content:
```
1 from typing import Union
2
3 import django_rq
4 import pytz
5 from rq.exceptions import NoSuchJobError
6 from rq.job import Job
7 from rq_scheduler.utils import from_unix
8
9
10 def scheduled_execution_time(job_id, scheduler=None, naive=True):
11 """Get RQ-Scheduler scheduled execution time for specific job."""
12 _scheduler = scheduler
13 if not scheduler:
14 _scheduler = django_rq.get_scheduler("default")
15
16 # Scheduler keeps jobs in a single key, they are sorted by score, which is
17 # scheduled execution time (linux epoch). We can retrieve single
18 # entry's score.
19 time = _scheduler.connection.zscore(_scheduler.scheduled_jobs_key, job_id)
20
21 # Convert linux time epoch to UTC.
22 if time:
23 time = from_unix(time)
24 if not naive:
25 # By default, RQ-Scheduler uses UTC naive (TZ-unaware) objects,
26 # which we can "convert" to TZ-aware UTC.
27 time = time.replace(tzinfo=pytz.UTC)
28 return time
29
30
31 def compare_emails(a, b):
32 """EmailMultiAlternatives doesn't implement __eq__, so we have to
33 cheat our way."""
34 if a is None and b is None:
35 return True
36 elif a is None and b or b is None and a:
37 return False
38 else:
39 try:
40 return (
41 a.to == b.to
42 and a.cc == b.cc
43 and a.bcc == b.bcc
44 and a.reply_to == b.reply_to
45 and a.subject == b.subject
46 and a.body == b.body
47 )
48 except AttributeError:
49 return False
50
51
52 def check_status(job: Union[str, Job], scheduler=None):
53 _scheduler = scheduler
54 if not scheduler:
55 _scheduler = django_rq.get_scheduler("default")
56
57 if not isinstance(job, Job):
58 try:
59 job = Job.fetch(job, connection=_scheduler.connection)
60 except NoSuchJobError:
61 return None
62
63 scheduled = scheduled_execution_time(job.get_id(), scheduler)
64
65 if scheduled:
66 return job.get_status() or "scheduled"
67 else:
68 return job.get_status() or "cancelled"
69
```
Path: `amy/autoemails/views.py`
Content:
```
1 import logging
2
3 from django.contrib import messages
4 from django.shortcuts import get_object_or_404, redirect
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.views.decorators.http import require_POST
8 import django_rq
9
10 from workshops.models import WorkshopRequest
11 from workshops.util import admin_required
12
13 from .actions import GenericAction
14 from .forms import GenericEmailScheduleForm
15 from .models import EmailTemplate, Trigger
16 from .utils import check_status, scheduled_execution_time
17
18 logger = logging.getLogger("amy.signals")
19 scheduler = django_rq.get_scheduler("default")
20 redis_connection = django_rq.get_connection("default")
21
22
23 @require_POST
24 @admin_required
25 def generic_schedule_email(request, pk):
26 """
27 Generic view for scheduling an email to be sent.
28 """
29 template_slug = request.POST.get("slug", "")
30 original_template = get_object_or_404(EmailTemplate, slug=template_slug)
31 # Hardcoded, maybe in future respond to other requests, like
32 # SelfOrganizedSubmission or WorkshopInquiry
33 trigger = get_object_or_404(
34 Trigger,
35 action__startswith="workshop-request-response",
36 template__slug=template_slug,
37 active=True,
38 )
39 form = GenericEmailScheduleForm(request.POST, instance=original_template)
40 workshop_request = get_object_or_404(WorkshopRequest, pk=pk)
41
42 if form.is_valid():
43 template = EmailTemplate(
44 slug=form.cleaned_data["slug"],
45 subject=form.cleaned_data["subject"],
46 to_header=form.cleaned_data["to_header"],
47 from_header=form.cleaned_data["from_header"],
48 cc_header=form.cleaned_data["cc_header"],
49 bcc_header=form.cleaned_data["bcc_header"],
50 reply_to_header=form.cleaned_data["reply_to_header"],
51 body_template=form.cleaned_data["body_template"],
52 )
53
54 objects = dict(request=workshop_request)
55 if workshop_request.event:
56 objects["event"] = workshop_request.event
57 objects["workshop"] = workshop_request.event
58
59 action = GenericAction(
60 trigger=trigger,
61 objects=objects,
62 )
63 action_name = GenericAction.__name__
64 launch_at = action.get_launch_at()
65 meta = dict(
66 action=action,
67 template=template,
68 launch_at=launch_at,
69 email=None,
70 context=None,
71 )
72
73 job = scheduler.enqueue_in(launch_at, action, meta=meta)
74 logger.debug("%s: enqueueing", action_name)
75 scheduled_at = scheduled_execution_time(
76 job.get_id(), scheduler=scheduler, naive=False
77 )
78 logger.debug("%s: job created [%r]", action_name, job)
79
80 rqj = workshop_request.rq_jobs.create(
81 job_id=job.get_id(),
82 trigger=trigger,
83 scheduled_execution=scheduled_at,
84 status=check_status(job),
85 mail_status="",
86 event_slug=action.event_slug(),
87 recipients=action.all_recipients(),
88 )
89
90 messages.info(
91 request,
92 format_html(
93 "New email ({}) was scheduled to run "
94 '<relative-time datetime="{}">{}</relative-time>: '
95 '<a href="{}">{}</a>.',
96 trigger.get_action_display(),
97 scheduled_at.isoformat(),
98 "{:%Y-%m-%d %H:%M}".format(scheduled_at),
99 reverse("admin:autoemails_rqjob_preview", args=[rqj.pk]),
100 job.id,
101 ),
102 fail_silently=True,
103 )
104
105 return redirect(
106 request.POST.get("next", "") or workshop_request.get_absolute_url()
107 )
108
109 else:
110 messages.error(
111 request,
112 f"Could not send the email due to form errors: {form.errors}",
113 fail_silently=True,
114 )
115
116 return redirect(
117 request.POST.get("next", "") or workshop_request.get_absolute_url()
118 )
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/autoemails/utils.py b/amy/autoemails/utils.py
--- a/amy/autoemails/utils.py
+++ b/amy/autoemails/utils.py
@@ -1,5 +1,7 @@
-from typing import Union
+from typing import Optional, Union
+from django.conf import settings
+from django.utils.http import is_safe_url
import django_rq
import pytz
from rq.exceptions import NoSuchJobError
@@ -66,3 +68,9 @@
return job.get_status() or "scheduled"
else:
return job.get_status() or "cancelled"
+
+
+def safe_next_or_default_url(next_url: Optional[str], default: str) -> str:
+ if next_url is not None and is_safe_url(next_url, settings.ALLOWED_HOSTS):
+ return next_url
+ return default
diff --git a/amy/autoemails/views.py b/amy/autoemails/views.py
--- a/amy/autoemails/views.py
+++ b/amy/autoemails/views.py
@@ -13,7 +13,7 @@
from .actions import GenericAction
from .forms import GenericEmailScheduleForm
from .models import EmailTemplate, Trigger
-from .utils import check_status, scheduled_execution_time
+from .utils import check_status, safe_next_or_default_url, scheduled_execution_time
logger = logging.getLogger("amy.signals")
scheduler = django_rq.get_scheduler("default")
@@ -102,9 +102,9 @@
fail_silently=True,
)
- return redirect(
- request.POST.get("next", "") or workshop_request.get_absolute_url()
- )
+ default_url = workshop_request.get_absolute_url()
+ next_url = request.POST.get("next", None)
+ return redirect(safe_next_or_default_url(next_url, default_url))
else:
messages.error(
@@ -113,6 +113,6 @@
fail_silently=True,
)
- return redirect(
- request.POST.get("next", "") or workshop_request.get_absolute_url()
- )
+ default_url = workshop_request.get_absolute_url()
+ next_url = request.POST.get("next", None)
+ return redirect(safe_next_or_default_url(next_url, default_url))
| {"golden_diff": "diff --git a/amy/autoemails/utils.py b/amy/autoemails/utils.py\n--- a/amy/autoemails/utils.py\n+++ b/amy/autoemails/utils.py\n@@ -1,5 +1,7 @@\n-from typing import Union\n+from typing import Optional, Union\n \n+from django.conf import settings\n+from django.utils.http import is_safe_url\n import django_rq\n import pytz\n from rq.exceptions import NoSuchJobError\n@@ -66,3 +68,9 @@\n return job.get_status() or \"scheduled\"\n else:\n return job.get_status() or \"cancelled\"\n+\n+\n+def safe_next_or_default_url(next_url: Optional[str], default: str) -> str:\n+ if next_url is not None and is_safe_url(next_url, settings.ALLOWED_HOSTS):\n+ return next_url\n+ return default\ndiff --git a/amy/autoemails/views.py b/amy/autoemails/views.py\n--- a/amy/autoemails/views.py\n+++ b/amy/autoemails/views.py\n@@ -13,7 +13,7 @@\n from .actions import GenericAction\n from .forms import GenericEmailScheduleForm\n from .models import EmailTemplate, Trigger\n-from .utils import check_status, scheduled_execution_time\n+from .utils import check_status, safe_next_or_default_url, scheduled_execution_time\n \n logger = logging.getLogger(\"amy.signals\")\n scheduler = django_rq.get_scheduler(\"default\")\n@@ -102,9 +102,9 @@\n fail_silently=True,\n )\n \n- return redirect(\n- request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n- )\n+ default_url = workshop_request.get_absolute_url()\n+ next_url = request.POST.get(\"next\", None)\n+ return redirect(safe_next_or_default_url(next_url, default_url))\n \n else:\n messages.error(\n@@ -113,6 +113,6 @@\n fail_silently=True,\n )\n \n- return redirect(\n- request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n- )\n+ default_url = workshop_request.get_absolute_url()\n+ next_url = request.POST.get(\"next\", None)\n+ return redirect(safe_next_or_default_url(next_url, default_url))\n", "issue": "Make URL redirects from `?next` param safer\nThere are a couple places where we redirect to `?next` value without checking if it's safe. There's a `django.http.is_safe_url` function we could use.\n", "before_files": [{"content": "from typing import Union\n\nimport django_rq\nimport pytz\nfrom rq.exceptions import NoSuchJobError\nfrom rq.job import Job\nfrom rq_scheduler.utils import from_unix\n\n\ndef scheduled_execution_time(job_id, scheduler=None, naive=True):\n \"\"\"Get RQ-Scheduler scheduled execution time for specific job.\"\"\"\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n # Scheduler keeps jobs in a single key, they are sorted by score, which is\n # scheduled execution time (linux epoch). We can retrieve single\n # entry's score.\n time = _scheduler.connection.zscore(_scheduler.scheduled_jobs_key, job_id)\n\n # Convert linux time epoch to UTC.\n if time:\n time = from_unix(time)\n if not naive:\n # By default, RQ-Scheduler uses UTC naive (TZ-unaware) objects,\n # which we can \"convert\" to TZ-aware UTC.\n time = time.replace(tzinfo=pytz.UTC)\n return time\n\n\ndef compare_emails(a, b):\n \"\"\"EmailMultiAlternatives doesn't implement __eq__, so we have to\n cheat our way.\"\"\"\n if a is None and b is None:\n return True\n elif a is None and b or b is None and a:\n return False\n else:\n try:\n return (\n a.to == b.to\n and a.cc == b.cc\n and a.bcc == b.bcc\n and a.reply_to == b.reply_to\n and a.subject == b.subject\n and a.body == b.body\n )\n except AttributeError:\n return False\n\n\ndef check_status(job: Union[str, Job], scheduler=None):\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n if not isinstance(job, Job):\n try:\n job = Job.fetch(job, connection=_scheduler.connection)\n except NoSuchJobError:\n return None\n\n scheduled = scheduled_execution_time(job.get_id(), scheduler)\n\n if scheduled:\n return job.get_status() or \"scheduled\"\n else:\n return job.get_status() or \"cancelled\"\n", "path": "amy/autoemails/utils.py"}, {"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.views.decorators.http import require_POST\nimport django_rq\n\nfrom workshops.models import WorkshopRequest\nfrom workshops.util import admin_required\n\nfrom .actions import GenericAction\nfrom .forms import GenericEmailScheduleForm\nfrom .models import EmailTemplate, Trigger\nfrom .utils import check_status, scheduled_execution_time\n\nlogger = logging.getLogger(\"amy.signals\")\nscheduler = django_rq.get_scheduler(\"default\")\nredis_connection = django_rq.get_connection(\"default\")\n\n\n@require_POST\n@admin_required\ndef generic_schedule_email(request, pk):\n \"\"\"\n Generic view for scheduling an email to be sent.\n \"\"\"\n template_slug = request.POST.get(\"slug\", \"\")\n original_template = get_object_or_404(EmailTemplate, slug=template_slug)\n # Hardcoded, maybe in future respond to other requests, like\n # SelfOrganizedSubmission or WorkshopInquiry\n trigger = get_object_or_404(\n Trigger,\n action__startswith=\"workshop-request-response\",\n template__slug=template_slug,\n active=True,\n )\n form = GenericEmailScheduleForm(request.POST, instance=original_template)\n workshop_request = get_object_or_404(WorkshopRequest, pk=pk)\n\n if form.is_valid():\n template = EmailTemplate(\n slug=form.cleaned_data[\"slug\"],\n subject=form.cleaned_data[\"subject\"],\n to_header=form.cleaned_data[\"to_header\"],\n from_header=form.cleaned_data[\"from_header\"],\n cc_header=form.cleaned_data[\"cc_header\"],\n bcc_header=form.cleaned_data[\"bcc_header\"],\n reply_to_header=form.cleaned_data[\"reply_to_header\"],\n body_template=form.cleaned_data[\"body_template\"],\n )\n\n objects = dict(request=workshop_request)\n if workshop_request.event:\n objects[\"event\"] = workshop_request.event\n objects[\"workshop\"] = workshop_request.event\n\n action = GenericAction(\n trigger=trigger,\n objects=objects,\n )\n action_name = GenericAction.__name__\n launch_at = action.get_launch_at()\n meta = dict(\n action=action,\n template=template,\n launch_at=launch_at,\n email=None,\n context=None,\n )\n\n job = scheduler.enqueue_in(launch_at, action, meta=meta)\n logger.debug(\"%s: enqueueing\", action_name)\n scheduled_at = scheduled_execution_time(\n job.get_id(), scheduler=scheduler, naive=False\n )\n logger.debug(\"%s: job created [%r]\", action_name, job)\n\n rqj = workshop_request.rq_jobs.create(\n job_id=job.get_id(),\n trigger=trigger,\n scheduled_execution=scheduled_at,\n status=check_status(job),\n mail_status=\"\",\n event_slug=action.event_slug(),\n recipients=action.all_recipients(),\n )\n\n messages.info(\n request,\n format_html(\n \"New email ({}) was scheduled to run \"\n '<relative-time datetime=\"{}\">{}</relative-time>: '\n '<a href=\"{}\">{}</a>.',\n trigger.get_action_display(),\n scheduled_at.isoformat(),\n \"{:%Y-%m-%d %H:%M}\".format(scheduled_at),\n reverse(\"admin:autoemails_rqjob_preview\", args=[rqj.pk]),\n job.id,\n ),\n fail_silently=True,\n )\n\n return redirect(\n request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n )\n\n else:\n messages.error(\n request,\n f\"Could not send the email due to form errors: {form.errors}\",\n fail_silently=True,\n )\n\n return redirect(\n request.POST.get(\"next\", \"\") or workshop_request.get_absolute_url()\n )\n", "path": "amy/autoemails/views.py"}], "after_files": [{"content": "from typing import Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.http import is_safe_url\nimport django_rq\nimport pytz\nfrom rq.exceptions import NoSuchJobError\nfrom rq.job import Job\nfrom rq_scheduler.utils import from_unix\n\n\ndef scheduled_execution_time(job_id, scheduler=None, naive=True):\n \"\"\"Get RQ-Scheduler scheduled execution time for specific job.\"\"\"\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n # Scheduler keeps jobs in a single key, they are sorted by score, which is\n # scheduled execution time (linux epoch). We can retrieve single\n # entry's score.\n time = _scheduler.connection.zscore(_scheduler.scheduled_jobs_key, job_id)\n\n # Convert linux time epoch to UTC.\n if time:\n time = from_unix(time)\n if not naive:\n # By default, RQ-Scheduler uses UTC naive (TZ-unaware) objects,\n # which we can \"convert\" to TZ-aware UTC.\n time = time.replace(tzinfo=pytz.UTC)\n return time\n\n\ndef compare_emails(a, b):\n \"\"\"EmailMultiAlternatives doesn't implement __eq__, so we have to\n cheat our way.\"\"\"\n if a is None and b is None:\n return True\n elif a is None and b or b is None and a:\n return False\n else:\n try:\n return (\n a.to == b.to\n and a.cc == b.cc\n and a.bcc == b.bcc\n and a.reply_to == b.reply_to\n and a.subject == b.subject\n and a.body == b.body\n )\n except AttributeError:\n return False\n\n\ndef check_status(job: Union[str, Job], scheduler=None):\n _scheduler = scheduler\n if not scheduler:\n _scheduler = django_rq.get_scheduler(\"default\")\n\n if not isinstance(job, Job):\n try:\n job = Job.fetch(job, connection=_scheduler.connection)\n except NoSuchJobError:\n return None\n\n scheduled = scheduled_execution_time(job.get_id(), scheduler)\n\n if scheduled:\n return job.get_status() or \"scheduled\"\n else:\n return job.get_status() or \"cancelled\"\n\n\ndef safe_next_or_default_url(next_url: Optional[str], default: str) -> str:\n if next_url is not None and is_safe_url(next_url, settings.ALLOWED_HOSTS):\n return next_url\n return default\n", "path": "amy/autoemails/utils.py"}, {"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.views.decorators.http import require_POST\nimport django_rq\n\nfrom workshops.models import WorkshopRequest\nfrom workshops.util import admin_required\n\nfrom .actions import GenericAction\nfrom .forms import GenericEmailScheduleForm\nfrom .models import EmailTemplate, Trigger\nfrom .utils import check_status, safe_next_or_default_url, scheduled_execution_time\n\nlogger = logging.getLogger(\"amy.signals\")\nscheduler = django_rq.get_scheduler(\"default\")\nredis_connection = django_rq.get_connection(\"default\")\n\n\n@require_POST\n@admin_required\ndef generic_schedule_email(request, pk):\n \"\"\"\n Generic view for scheduling an email to be sent.\n \"\"\"\n template_slug = request.POST.get(\"slug\", \"\")\n original_template = get_object_or_404(EmailTemplate, slug=template_slug)\n # Hardcoded, maybe in future respond to other requests, like\n # SelfOrganizedSubmission or WorkshopInquiry\n trigger = get_object_or_404(\n Trigger,\n action__startswith=\"workshop-request-response\",\n template__slug=template_slug,\n active=True,\n )\n form = GenericEmailScheduleForm(request.POST, instance=original_template)\n workshop_request = get_object_or_404(WorkshopRequest, pk=pk)\n\n if form.is_valid():\n template = EmailTemplate(\n slug=form.cleaned_data[\"slug\"],\n subject=form.cleaned_data[\"subject\"],\n to_header=form.cleaned_data[\"to_header\"],\n from_header=form.cleaned_data[\"from_header\"],\n cc_header=form.cleaned_data[\"cc_header\"],\n bcc_header=form.cleaned_data[\"bcc_header\"],\n reply_to_header=form.cleaned_data[\"reply_to_header\"],\n body_template=form.cleaned_data[\"body_template\"],\n )\n\n objects = dict(request=workshop_request)\n if workshop_request.event:\n objects[\"event\"] = workshop_request.event\n objects[\"workshop\"] = workshop_request.event\n\n action = GenericAction(\n trigger=trigger,\n objects=objects,\n )\n action_name = GenericAction.__name__\n launch_at = action.get_launch_at()\n meta = dict(\n action=action,\n template=template,\n launch_at=launch_at,\n email=None,\n context=None,\n )\n\n job = scheduler.enqueue_in(launch_at, action, meta=meta)\n logger.debug(\"%s: enqueueing\", action_name)\n scheduled_at = scheduled_execution_time(\n job.get_id(), scheduler=scheduler, naive=False\n )\n logger.debug(\"%s: job created [%r]\", action_name, job)\n\n rqj = workshop_request.rq_jobs.create(\n job_id=job.get_id(),\n trigger=trigger,\n scheduled_execution=scheduled_at,\n status=check_status(job),\n mail_status=\"\",\n event_slug=action.event_slug(),\n recipients=action.all_recipients(),\n )\n\n messages.info(\n request,\n format_html(\n \"New email ({}) was scheduled to run \"\n '<relative-time datetime=\"{}\">{}</relative-time>: '\n '<a href=\"{}\">{}</a>.',\n trigger.get_action_display(),\n scheduled_at.isoformat(),\n \"{:%Y-%m-%d %H:%M}\".format(scheduled_at),\n reverse(\"admin:autoemails_rqjob_preview\", args=[rqj.pk]),\n job.id,\n ),\n fail_silently=True,\n )\n\n default_url = workshop_request.get_absolute_url()\n next_url = request.POST.get(\"next\", None)\n return redirect(safe_next_or_default_url(next_url, default_url))\n\n else:\n messages.error(\n request,\n f\"Could not send the email due to form errors: {form.errors}\",\n fail_silently=True,\n )\n\n default_url = workshop_request.get_absolute_url()\n next_url = request.POST.get(\"next\", None)\n return redirect(safe_next_or_default_url(next_url, default_url))\n", "path": "amy/autoemails/views.py"}]} | 1,978 | 485 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.