problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_6009 | rasdani/github-patches | git_diff | google__osv.dev-1021 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Maven 'zero' versions causes errors when comparing
Maven versions that are 'zero' (e.g. `0.0.0` and `0.0.0-X.Y.Z`) cause assertion errors when comparing with non-zero versions, because the comparisons assume there's always an initial token without a `.` or `-` 'prefix'.
This seems to be causing some of the missing vulnerabilities in #1018
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `osv/ecosystems/maven.py`
Content:
```
1 # Copyright 2021 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Maven ecosystem helper."""
15
16 import collections
17 import json
18 import re
19
20 import urllib.parse
21 from . import config
22 from .helper_base import DepsDevMixin, EnumerateError
23 from ..cache import cached
24 from ..request_helper import RequestHelper
25
26
27 # pylint: disable=line-too-long
28 # Maven's very complicated spec:
29 # http://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification
30
31 _TO_TRIM = ('0', '', 'final', 'ga')
32 _KEYWORD_ORDER = ('alpha', 'beta', 'milestone', 'rc', 'snapshot', '', 'sp')
33
34
35 def qualifier_order(token):
36 """Returns an integer representing a token's order."""
37 # ".qualifier" < "-qualifier" < "-number" < ".number"
38 if token.value.isdigit():
39 if token.prefix == '-':
40 return 2
41
42 assert token.prefix == '.'
43 return 3
44
45 if token.prefix == '-':
46 return 1
47
48 assert token.prefix == '.'
49 return 0
50
51
52 class VersionToken(
53 collections.namedtuple(
54 'VersionToken', 'prefix value is_null', defaults=(False,))):
55 """Version token."""
56
57 __slots__ = ()
58
59 def __eq__(self, other):
60 return self.prefix == other.prefix and self.value == other.value
61
62 def __lt__(self, other):
63 if self.prefix == other.prefix:
64 # if the prefix is the same, then compare the token:
65 if self.value.isdigit() and other.value.isdigit():
66 # Numeric tokens have the natural order.
67 return int(self.value) < int(other.value)
68 # The spec is unclear, but according to Maven's implementation, numerics
69 # sort after non-numerics, **unless it's a null value**.
70 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L443
71 if self.value.isdigit() and not self.is_null:
72 return False
73
74 if other.value.isdigit() and not other.is_null:
75 return True
76
77 # Non-numeric tokens ("qualifiers") have the alphabetical order, except
78 # for the following tokens which come first in _KEYWORD_ORDER.
79 #
80 # The spec is unclear, but according to Maven's implementation, unknown
81 # qualifiers sort after known qualifiers:
82 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L423
83 try:
84 left_idx = _KEYWORD_ORDER.index(self.value)
85 except ValueError:
86 left_idx = len(_KEYWORD_ORDER)
87
88 try:
89 right_idx = _KEYWORD_ORDER.index(other.value)
90 except ValueError:
91 right_idx = len(_KEYWORD_ORDER)
92
93 if left_idx == len(_KEYWORD_ORDER) and right_idx == len(_KEYWORD_ORDER):
94 # Both are unknown qualifiers. Just do a lexical comparison.
95 return self.value < other.value
96
97 return left_idx < right_idx
98
99 # else ".qualifier" < "-qualifier" < "-number" < ".number"
100 return qualifier_order(self) < qualifier_order(other)
101
102
103 class Version:
104 """Maven version."""
105
106 def __init__(self):
107 self.tokens = []
108
109 def __str__(self):
110 result = ''
111 for token in self.tokens:
112 result += token.prefix + token.value
113
114 return result
115
116 def __eq__(self, other):
117 return self.tokens == other.tokens
118
119 def __lt__(self, other):
120 for i in range(max(len(self.tokens), len(other.tokens))):
121 # the shorter one padded with enough "null" values with matching prefix to
122 # have the same length as the longer one. Padded "null" values depend on
123 # the prefix of the other version: 0 for '.', "" for '-'
124 if i >= len(self.tokens):
125 if other.tokens[i].prefix == '.':
126 left = VersionToken('.', '0', is_null=True)
127 else:
128 assert other.tokens[i].prefix == '-'
129 left = VersionToken('-', '', is_null=True)
130 else:
131 left = self.tokens[i]
132
133 if i >= len(other.tokens):
134 if self.tokens[i].prefix == '.':
135 right = VersionToken('.', '0', is_null=True)
136 else:
137 assert self.tokens[i].prefix == '-'
138 right = VersionToken('-', '', is_null=True)
139 else:
140 right = other.tokens[i]
141
142 if left == right:
143 continue
144
145 return left < right
146
147 @classmethod
148 def from_string(cls, str_version):
149 """Parse a version."""
150 version = Version()
151
152 # The Maven coordinate is split in tokens between dots ('.'), hyphens ('-')
153 # and transitions between digits and characters. The prefix is recorded
154 # and will have effect on the order.
155
156 # Split and keep the delimiter.
157 tokens = re.split(r'([-.])', str_version)
158 for i in range(0, len(tokens), 2):
159 if i == 0:
160 # First token has no preceding prefix.
161 prefix = ''
162 else:
163 # Preceding prefix.
164 prefix = tokens[i - 1]
165
166 # A transition between digits and characters is equivalent to a hyphen.
167 # According to Maven's implementation: any non-digit is a "character":
168 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L627
169
170 # Find instances of <digit><non-digit> or <non-digit><digit>.
171 # ?= makes the regex non-consuming (needed to catch overlapping
172 # transitions such as <digit><non-digit><digit>).
173 # This gives an array of indices where each index is where a hyphen should be.
174 transitions = [
175 m.span()[0] + 1
176 for m in re.finditer(r'(?=(\d[^\d]|[^\d]\d))', tokens[i])
177 ]
178 # Add the last index so that our algorithm to split up the current token works.
179 transitions.append(len(tokens[i]))
180
181 prev_index = 0
182 for j, transition in enumerate(transitions):
183 if j > 0:
184 prefix = '-'
185
186 # The spec doesn't say this, but all qualifiers are case insensitive.
187 current = tokens[i][prev_index:transition].lower()
188 if not current:
189 # Empty tokens are replaced with "0".
190 current = '0'
191
192 # Normalize "cr" to "rc" for easier comparison since they are equal in
193 # precedence.
194 if current == 'cr':
195 current = 'rc'
196
197 # Also do this for 'ga', 'final' which are equivalent to empty string.
198 # "release" is not part of the spec but is implemented by Maven.
199 if current in ('ga', 'final', 'release'):
200 current = ''
201
202 # the "alpha", "beta" and "milestone" qualifiers can respectively be
203 # shortened to "a", "b" and "m" when directly followed by a number.
204 if transition != len(tokens[i]):
205 if current == 'a':
206 current = 'alpha'
207
208 if current == 'b':
209 current = 'beta'
210
211 if current == 'm':
212 current = 'milestone'
213
214 if current.isdigit():
215 # Remove any leading zeros.
216 current = str(int(current))
217
218 version.tokens.append(VersionToken(prefix, current))
219 prev_index = transition
220
221 # Then, starting from the end of the version, the trailing "null" values
222 # (0, "", "final", "ga") are trimmed.
223 i = len(version.tokens) - 1
224 while i >= 0:
225 if version.tokens[i].value in _TO_TRIM:
226 version.tokens.pop(i)
227 i -= 1
228 continue
229
230 # This process is repeated at each remaining hyphen from end to start.
231 while i >= 0 and version.tokens[i].prefix != '-':
232 i -= 1
233
234 i -= 1
235
236 return version
237
238
239 class Maven(DepsDevMixin):
240 """Maven ecosystem."""
241
242 _API_PACKAGE_URL = 'https://search.maven.org/solrsearch/select'
243
244 def sort_key(self, version):
245 """Sort key."""
246 return Version.from_string(version)
247
248 @staticmethod
249 def _get_versions(package):
250 """Get versions."""
251 versions = []
252 request_helper = RequestHelper()
253
254 group_id, artifact_id = package.split(':', 2)
255 start = 0
256
257 while True:
258 query = {
259 'q': f'g:"{group_id}" AND a:"{artifact_id}"',
260 'core': 'gav',
261 'rows': '20',
262 'wt': 'json',
263 'start': start
264 }
265 url = Maven._API_PACKAGE_URL + '?' + urllib.parse.urlencode(query)
266 response = request_helper.get(url)
267 response = json.loads(response)['response']
268 if response['numFound'] == 0:
269 raise EnumerateError(f'Package {package} not found')
270
271 for result in response['docs']:
272 versions.append(result['v'])
273
274 if len(versions) >= response['numFound']:
275 break
276
277 start = len(versions)
278
279 return versions
280
281 def enumerate_versions(self,
282 package,
283 introduced,
284 fixed=None,
285 last_affected=None,
286 limits=None):
287 """Enumerate versions."""
288 if config.use_deps_dev:
289 return self._deps_dev_enumerate(
290 package, introduced, fixed, last_affected, limits=limits)
291
292 get_versions = self._get_versions
293 if config.shared_cache:
294 get_versions = cached(config.shared_cache)(get_versions)
295
296 versions = get_versions(package)
297 self.sort_versions(versions)
298 return self._get_affected_versions(versions, introduced, fixed,
299 last_affected, limits)
300
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/osv/ecosystems/maven.py b/osv/ecosystems/maven.py
--- a/osv/ecosystems/maven.py
+++ b/osv/ecosystems/maven.py
@@ -221,7 +221,7 @@
# Then, starting from the end of the version, the trailing "null" values
# (0, "", "final", "ga") are trimmed.
i = len(version.tokens) - 1
- while i >= 0:
+ while i > 0: # We always want at least one token for comparison
if version.tokens[i].value in _TO_TRIM:
version.tokens.pop(i)
i -= 1
| {"golden_diff": "diff --git a/osv/ecosystems/maven.py b/osv/ecosystems/maven.py\n--- a/osv/ecosystems/maven.py\n+++ b/osv/ecosystems/maven.py\n@@ -221,7 +221,7 @@\n # Then, starting from the end of the version, the trailing \"null\" values\n # (0, \"\", \"final\", \"ga\") are trimmed.\n i = len(version.tokens) - 1\n- while i >= 0:\n+ while i > 0: # We always want at least one token for comparison\n if version.tokens[i].value in _TO_TRIM:\n version.tokens.pop(i)\n i -= 1\n", "issue": "Maven 'zero' versions causes errors when comparing\nMaven versions that are 'zero' (e.g. `0.0.0` and `0.0.0-X.Y.Z`) cause assertion errors when comparing with non-zero versions, because the comparisons assume there's always an initial token without a `.` or `-` 'prefix'.\r\n\r\nThis seems to be causing some of the missing vulnerabilities in #1018\n", "before_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Maven ecosystem helper.\"\"\"\n\nimport collections\nimport json\nimport re\n\nimport urllib.parse\nfrom . import config\nfrom .helper_base import DepsDevMixin, EnumerateError\nfrom ..cache import cached\nfrom ..request_helper import RequestHelper\n\n\n# pylint: disable=line-too-long\n# Maven's very complicated spec:\n# http://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification\n\n_TO_TRIM = ('0', '', 'final', 'ga')\n_KEYWORD_ORDER = ('alpha', 'beta', 'milestone', 'rc', 'snapshot', '', 'sp')\n\n\ndef qualifier_order(token):\n \"\"\"Returns an integer representing a token's order.\"\"\"\n # \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n if token.value.isdigit():\n if token.prefix == '-':\n return 2\n\n assert token.prefix == '.'\n return 3\n\n if token.prefix == '-':\n return 1\n\n assert token.prefix == '.'\n return 0\n\n\nclass VersionToken(\n collections.namedtuple(\n 'VersionToken', 'prefix value is_null', defaults=(False,))):\n \"\"\"Version token.\"\"\"\n\n __slots__ = ()\n\n def __eq__(self, other):\n return self.prefix == other.prefix and self.value == other.value\n\n def __lt__(self, other):\n if self.prefix == other.prefix:\n # if the prefix is the same, then compare the token:\n if self.value.isdigit() and other.value.isdigit():\n # Numeric tokens have the natural order.\n return int(self.value) < int(other.value)\n # The spec is unclear, but according to Maven's implementation, numerics\n # sort after non-numerics, **unless it's a null value**.\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L443\n if self.value.isdigit() and not self.is_null:\n return False\n\n if other.value.isdigit() and not other.is_null:\n return True\n\n # Non-numeric tokens (\"qualifiers\") have the alphabetical order, except\n # for the following tokens which come first in _KEYWORD_ORDER.\n #\n # The spec is unclear, but according to Maven's implementation, unknown\n # qualifiers sort after known qualifiers:\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L423\n try:\n left_idx = _KEYWORD_ORDER.index(self.value)\n except ValueError:\n left_idx = len(_KEYWORD_ORDER)\n\n try:\n right_idx = _KEYWORD_ORDER.index(other.value)\n except ValueError:\n right_idx = len(_KEYWORD_ORDER)\n\n if left_idx == len(_KEYWORD_ORDER) and right_idx == len(_KEYWORD_ORDER):\n # Both are unknown qualifiers. Just do a lexical comparison.\n return self.value < other.value\n\n return left_idx < right_idx\n\n # else \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n return qualifier_order(self) < qualifier_order(other)\n\n\nclass Version:\n \"\"\"Maven version.\"\"\"\n\n def __init__(self):\n self.tokens = []\n\n def __str__(self):\n result = ''\n for token in self.tokens:\n result += token.prefix + token.value\n\n return result\n\n def __eq__(self, other):\n return self.tokens == other.tokens\n\n def __lt__(self, other):\n for i in range(max(len(self.tokens), len(other.tokens))):\n # the shorter one padded with enough \"null\" values with matching prefix to\n # have the same length as the longer one. Padded \"null\" values depend on\n # the prefix of the other version: 0 for '.', \"\" for '-'\n if i >= len(self.tokens):\n if other.tokens[i].prefix == '.':\n left = VersionToken('.', '0', is_null=True)\n else:\n assert other.tokens[i].prefix == '-'\n left = VersionToken('-', '', is_null=True)\n else:\n left = self.tokens[i]\n\n if i >= len(other.tokens):\n if self.tokens[i].prefix == '.':\n right = VersionToken('.', '0', is_null=True)\n else:\n assert self.tokens[i].prefix == '-'\n right = VersionToken('-', '', is_null=True)\n else:\n right = other.tokens[i]\n\n if left == right:\n continue\n\n return left < right\n\n @classmethod\n def from_string(cls, str_version):\n \"\"\"Parse a version.\"\"\"\n version = Version()\n\n # The Maven coordinate is split in tokens between dots ('.'), hyphens ('-')\n # and transitions between digits and characters. The prefix is recorded\n # and will have effect on the order.\n\n # Split and keep the delimiter.\n tokens = re.split(r'([-.])', str_version)\n for i in range(0, len(tokens), 2):\n if i == 0:\n # First token has no preceding prefix.\n prefix = ''\n else:\n # Preceding prefix.\n prefix = tokens[i - 1]\n\n # A transition between digits and characters is equivalent to a hyphen.\n # According to Maven's implementation: any non-digit is a \"character\":\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L627\n\n # Find instances of <digit><non-digit> or <non-digit><digit>.\n # ?= makes the regex non-consuming (needed to catch overlapping\n # transitions such as <digit><non-digit><digit>).\n # This gives an array of indices where each index is where a hyphen should be.\n transitions = [\n m.span()[0] + 1\n for m in re.finditer(r'(?=(\\d[^\\d]|[^\\d]\\d))', tokens[i])\n ]\n # Add the last index so that our algorithm to split up the current token works.\n transitions.append(len(tokens[i]))\n\n prev_index = 0\n for j, transition in enumerate(transitions):\n if j > 0:\n prefix = '-'\n\n # The spec doesn't say this, but all qualifiers are case insensitive.\n current = tokens[i][prev_index:transition].lower()\n if not current:\n # Empty tokens are replaced with \"0\".\n current = '0'\n\n # Normalize \"cr\" to \"rc\" for easier comparison since they are equal in\n # precedence.\n if current == 'cr':\n current = 'rc'\n\n # Also do this for 'ga', 'final' which are equivalent to empty string.\n # \"release\" is not part of the spec but is implemented by Maven.\n if current in ('ga', 'final', 'release'):\n current = ''\n\n # the \"alpha\", \"beta\" and \"milestone\" qualifiers can respectively be\n # shortened to \"a\", \"b\" and \"m\" when directly followed by a number.\n if transition != len(tokens[i]):\n if current == 'a':\n current = 'alpha'\n\n if current == 'b':\n current = 'beta'\n\n if current == 'm':\n current = 'milestone'\n\n if current.isdigit():\n # Remove any leading zeros.\n current = str(int(current))\n\n version.tokens.append(VersionToken(prefix, current))\n prev_index = transition\n\n # Then, starting from the end of the version, the trailing \"null\" values\n # (0, \"\", \"final\", \"ga\") are trimmed.\n i = len(version.tokens) - 1\n while i >= 0:\n if version.tokens[i].value in _TO_TRIM:\n version.tokens.pop(i)\n i -= 1\n continue\n\n # This process is repeated at each remaining hyphen from end to start.\n while i >= 0 and version.tokens[i].prefix != '-':\n i -= 1\n\n i -= 1\n\n return version\n\n\nclass Maven(DepsDevMixin):\n \"\"\"Maven ecosystem.\"\"\"\n\n _API_PACKAGE_URL = 'https://search.maven.org/solrsearch/select'\n\n def sort_key(self, version):\n \"\"\"Sort key.\"\"\"\n return Version.from_string(version)\n\n @staticmethod\n def _get_versions(package):\n \"\"\"Get versions.\"\"\"\n versions = []\n request_helper = RequestHelper()\n\n group_id, artifact_id = package.split(':', 2)\n start = 0\n\n while True:\n query = {\n 'q': f'g:\"{group_id}\" AND a:\"{artifact_id}\"',\n 'core': 'gav',\n 'rows': '20',\n 'wt': 'json',\n 'start': start\n }\n url = Maven._API_PACKAGE_URL + '?' + urllib.parse.urlencode(query)\n response = request_helper.get(url)\n response = json.loads(response)['response']\n if response['numFound'] == 0:\n raise EnumerateError(f'Package {package} not found')\n\n for result in response['docs']:\n versions.append(result['v'])\n\n if len(versions) >= response['numFound']:\n break\n\n start = len(versions)\n\n return versions\n\n def enumerate_versions(self,\n package,\n introduced,\n fixed=None,\n last_affected=None,\n limits=None):\n \"\"\"Enumerate versions.\"\"\"\n if config.use_deps_dev:\n return self._deps_dev_enumerate(\n package, introduced, fixed, last_affected, limits=limits)\n\n get_versions = self._get_versions\n if config.shared_cache:\n get_versions = cached(config.shared_cache)(get_versions)\n\n versions = get_versions(package)\n self.sort_versions(versions)\n return self._get_affected_versions(versions, introduced, fixed,\n last_affected, limits)\n", "path": "osv/ecosystems/maven.py"}], "after_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Maven ecosystem helper.\"\"\"\n\nimport collections\nimport json\nimport re\n\nimport urllib.parse\nfrom . import config\nfrom .helper_base import DepsDevMixin, EnumerateError\nfrom ..cache import cached\nfrom ..request_helper import RequestHelper\n\n\n# pylint: disable=line-too-long\n# Maven's very complicated spec:\n# http://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification\n\n_TO_TRIM = ('0', '', 'final', 'ga')\n_KEYWORD_ORDER = ('alpha', 'beta', 'milestone', 'rc', 'snapshot', '', 'sp')\n\n\ndef qualifier_order(token):\n \"\"\"Returns an integer representing a token's order.\"\"\"\n # \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n if token.value.isdigit():\n if token.prefix == '-':\n return 2\n\n assert token.prefix == '.'\n return 3\n\n if token.prefix == '-':\n return 1\n\n assert token.prefix == '.'\n return 0\n\n\nclass VersionToken(\n collections.namedtuple(\n 'VersionToken', 'prefix value is_null', defaults=(False,))):\n \"\"\"Version token.\"\"\"\n\n __slots__ = ()\n\n def __eq__(self, other):\n return self.prefix == other.prefix and self.value == other.value\n\n def __lt__(self, other):\n if self.prefix == other.prefix:\n # if the prefix is the same, then compare the token:\n if self.value.isdigit() and other.value.isdigit():\n # Numeric tokens have the natural order.\n return int(self.value) < int(other.value)\n # The spec is unclear, but according to Maven's implementation, numerics\n # sort after non-numerics, **unless it's a null value**.\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L443\n if self.value.isdigit() and not self.is_null:\n return False\n\n if other.value.isdigit() and not other.is_null:\n return True\n\n # Non-numeric tokens (\"qualifiers\") have the alphabetical order, except\n # for the following tokens which come first in _KEYWORD_ORDER.\n #\n # The spec is unclear, but according to Maven's implementation, unknown\n # qualifiers sort after known qualifiers:\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L423\n try:\n left_idx = _KEYWORD_ORDER.index(self.value)\n except ValueError:\n left_idx = len(_KEYWORD_ORDER)\n\n try:\n right_idx = _KEYWORD_ORDER.index(other.value)\n except ValueError:\n right_idx = len(_KEYWORD_ORDER)\n\n if left_idx == len(_KEYWORD_ORDER) and right_idx == len(_KEYWORD_ORDER):\n # Both are unknown qualifiers. Just do a lexical comparison.\n return self.value < other.value\n\n return left_idx < right_idx\n\n # else \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n return qualifier_order(self) < qualifier_order(other)\n\n\nclass Version:\n \"\"\"Maven version.\"\"\"\n\n def __init__(self):\n self.tokens = []\n\n def __str__(self):\n result = ''\n for token in self.tokens:\n result += token.prefix + token.value\n\n return result\n\n def __eq__(self, other):\n return self.tokens == other.tokens\n\n def __lt__(self, other):\n for i in range(max(len(self.tokens), len(other.tokens))):\n # the shorter one padded with enough \"null\" values with matching prefix to\n # have the same length as the longer one. Padded \"null\" values depend on\n # the prefix of the other version: 0 for '.', \"\" for '-'\n if i >= len(self.tokens):\n if other.tokens[i].prefix == '.':\n left = VersionToken('.', '0', is_null=True)\n else:\n assert other.tokens[i].prefix == '-'\n left = VersionToken('-', '', is_null=True)\n else:\n left = self.tokens[i]\n\n if i >= len(other.tokens):\n if self.tokens[i].prefix == '.':\n right = VersionToken('.', '0', is_null=True)\n else:\n assert self.tokens[i].prefix == '-'\n right = VersionToken('-', '', is_null=True)\n else:\n right = other.tokens[i]\n\n if left == right:\n continue\n\n return left < right\n\n @classmethod\n def from_string(cls, str_version):\n \"\"\"Parse a version.\"\"\"\n version = Version()\n\n # The Maven coordinate is split in tokens between dots ('.'), hyphens ('-')\n # and transitions between digits and characters. The prefix is recorded\n # and will have effect on the order.\n\n # Split and keep the delimiter.\n tokens = re.split(r'([-.])', str_version)\n for i in range(0, len(tokens), 2):\n if i == 0:\n # First token has no preceding prefix.\n prefix = ''\n else:\n # Preceding prefix.\n prefix = tokens[i - 1]\n\n # A transition between digits and characters is equivalent to a hyphen.\n # According to Maven's implementation: any non-digit is a \"character\":\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L627\n\n # Find instances of <digit><non-digit> or <non-digit><digit>.\n # ?= makes the regex non-consuming (needed to catch overlapping\n # transitions such as <digit><non-digit><digit>).\n # This gives an array of indices where each index is where a hyphen should be.\n transitions = [\n m.span()[0] + 1\n for m in re.finditer(r'(?=(\\d[^\\d]|[^\\d]\\d))', tokens[i])\n ]\n # Add the last index so that our algorithm to split up the current token works.\n transitions.append(len(tokens[i]))\n\n prev_index = 0\n for j, transition in enumerate(transitions):\n if j > 0:\n prefix = '-'\n\n # The spec doesn't say this, but all qualifiers are case insensitive.\n current = tokens[i][prev_index:transition].lower()\n if not current:\n # Empty tokens are replaced with \"0\".\n current = '0'\n\n # Normalize \"cr\" to \"rc\" for easier comparison since they are equal in\n # precedence.\n if current == 'cr':\n current = 'rc'\n\n # Also do this for 'ga', 'final' which are equivalent to empty string.\n # \"release\" is not part of the spec but is implemented by Maven.\n if current in ('ga', 'final', 'release'):\n current = ''\n\n # the \"alpha\", \"beta\" and \"milestone\" qualifiers can respectively be\n # shortened to \"a\", \"b\" and \"m\" when directly followed by a number.\n if transition != len(tokens[i]):\n if current == 'a':\n current = 'alpha'\n\n if current == 'b':\n current = 'beta'\n\n if current == 'm':\n current = 'milestone'\n\n if current.isdigit():\n # Remove any leading zeros.\n current = str(int(current))\n\n version.tokens.append(VersionToken(prefix, current))\n prev_index = transition\n\n # Then, starting from the end of the version, the trailing \"null\" values\n # (0, \"\", \"final\", \"ga\") are trimmed.\n i = len(version.tokens) - 1\n while i > 0: # We always want at least one token for comparison\n if version.tokens[i].value in _TO_TRIM:\n version.tokens.pop(i)\n i -= 1\n continue\n\n # This process is repeated at each remaining hyphen from end to start.\n while i >= 0 and version.tokens[i].prefix != '-':\n i -= 1\n\n i -= 1\n\n return version\n\n\nclass Maven(DepsDevMixin):\n \"\"\"Maven ecosystem.\"\"\"\n\n _API_PACKAGE_URL = 'https://search.maven.org/solrsearch/select'\n\n def sort_key(self, version):\n \"\"\"Sort key.\"\"\"\n return Version.from_string(version)\n\n @staticmethod\n def _get_versions(package):\n \"\"\"Get versions.\"\"\"\n versions = []\n request_helper = RequestHelper()\n\n group_id, artifact_id = package.split(':', 2)\n start = 0\n\n while True:\n query = {\n 'q': f'g:\"{group_id}\" AND a:\"{artifact_id}\"',\n 'core': 'gav',\n 'rows': '20',\n 'wt': 'json',\n 'start': start\n }\n url = Maven._API_PACKAGE_URL + '?' + urllib.parse.urlencode(query)\n response = request_helper.get(url)\n response = json.loads(response)['response']\n if response['numFound'] == 0:\n raise EnumerateError(f'Package {package} not found')\n\n for result in response['docs']:\n versions.append(result['v'])\n\n if len(versions) >= response['numFound']:\n break\n\n start = len(versions)\n\n return versions\n\n def enumerate_versions(self,\n package,\n introduced,\n fixed=None,\n last_affected=None,\n limits=None):\n \"\"\"Enumerate versions.\"\"\"\n if config.use_deps_dev:\n return self._deps_dev_enumerate(\n package, introduced, fixed, last_affected, limits=limits)\n\n get_versions = self._get_versions\n if config.shared_cache:\n get_versions = cached(config.shared_cache)(get_versions)\n\n versions = get_versions(package)\n self.sort_versions(versions)\n return self._get_affected_versions(versions, introduced, fixed,\n last_affected, limits)\n", "path": "osv/ecosystems/maven.py"}]} | 3,555 | 156 |
gh_patches_debug_2520 | rasdani/github-patches | git_diff | cal-itp__benefits-1215 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor Agency dynamic headline into model prop
Right now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62):
```python
page = viewmodels.Page(
title=_("core.pages.agency_index.title"),
headline=_("core.pages.agency_index.mst_cc.headline"),
button=button,
classes="home",
)
```
This is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future.
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [ ] `agency_index` gets its headline from the selected `agency`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/views.py`
Content:
```
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.template import loader
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.translation import pgettext, gettext as _
9
10 from . import models, session, viewmodels
11 from .middleware import pageview_decorator
12
13 ROUTE_INDEX = "core:index"
14 ROUTE_ELIGIBILITY = "eligibility:index"
15 ROUTE_HELP = "core:help"
16 ROUTE_LOGGED_OUT = "core:logged_out"
17
18 TEMPLATE_INDEX = "core/index.html"
19 TEMPLATE_AGENCY = "core/agency_index.html"
20 TEMPLATE_HELP = "core/help.html"
21 TEMPLATE_LOGGED_OUT = "core/logged_out.html"
22
23
24 @pageview_decorator
25 def index(request):
26 """View handler for the main entry page."""
27 session.reset(request)
28
29 page = viewmodels.Page(
30 title=_("core.pages.index.title"),
31 headline=_("core.pages.index.headline"),
32 modal=viewmodels.AgencySelector(
33 id="agency-selector",
34 aria_labelledby_id="agency-selector-modal-label",
35 button_text=_("core.pages.index.button"),
36 ),
37 )
38
39 return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())
40
41
42 @pageview_decorator
43 def agency_index(request, agency):
44 """View handler for an agency entry page."""
45 session.reset(request)
46 session.update(request, agency=agency, origin=agency.index_url)
47
48 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY))
49
50 page = viewmodels.Page(
51 title=_("core.pages.agency_index.title"),
52 headline=_("core.pages.agency_index.mst_cc.headline"),
53 button=button,
54 )
55
56 return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())
57
58
59 @pageview_decorator
60 def agency_public_key(request, agency):
61 """View handler returns an agency's public key as plain text."""
62 return HttpResponse(agency.public_key_data, content_type="text/plain")
63
64
65 @pageview_decorator
66 def help(request):
67 """View handler for the help page."""
68 if session.active_agency(request):
69 agency = session.agency(request)
70 buttons = viewmodels.Button.agency_contact_links(agency)
71 else:
72 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]
73
74 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
75
76 page = viewmodels.Page(
77 title=_("core.buttons.help"),
78 headline=_("core.buttons.help"),
79 buttons=buttons,
80 )
81
82 return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())
83
84
85 @pageview_decorator
86 def bad_request(request, exception, template_name="400.html"):
87 """View handler for HTTP 400 Bad Request responses."""
88 if session.active_agency(request):
89 session.update(request, origin=session.agency(request).index_url)
90 else:
91 session.update(request, origin=reverse(ROUTE_INDEX))
92
93 home = viewmodels.Button.home(request)
94 page = viewmodels.ErrorPage.server_error(button=home)
95 t = loader.get_template(template_name)
96
97 return HttpResponseBadRequest(t.render(page.context_dict()))
98
99
100 @pageview_decorator
101 def csrf_failure(request, reason):
102 """
103 View handler for CSRF_FAILURE_VIEW with custom data.
104 """
105 if session.active_agency(request):
106 session.update(request, origin=session.agency(request).index_url)
107 else:
108 session.update(request, origin=reverse(ROUTE_INDEX))
109
110 home = viewmodels.Button.home(request)
111 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
112 t = loader.get_template("400.html")
113
114 return HttpResponseNotFound(t.render(page.context_dict()))
115
116
117 @pageview_decorator
118 def page_not_found(request, exception, template_name="404.html"):
119 """View handler for HTTP 404 Not Found responses."""
120 if session.active_agency(request):
121 session.update(request, origin=session.agency(request).index_url)
122 else:
123 session.update(request, origin=reverse(ROUTE_INDEX))
124
125 home = viewmodels.Button.home(request)
126 # show a more user-friendly message instead of not_found
127 page = viewmodels.ErrorPage.user_error(button=home, path=request.path)
128 t = loader.get_template(template_name)
129
130 return HttpResponseNotFound(t.render(page.context_dict()))
131
132
133 @pageview_decorator
134 def server_error(request, template_name="500.html"):
135 """View handler for HTTP 500 Server Error responses."""
136 if session.active_agency(request):
137 session.update(request, origin=session.agency(request).index_url)
138 else:
139 session.update(request, origin=reverse(ROUTE_INDEX))
140
141 home = viewmodels.Button.home(request)
142 page = viewmodels.ErrorPage.server_error(button=home)
143 t = loader.get_template(template_name)
144
145 return HttpResponseServerError(t.render(page.context_dict()))
146
147
148 def logged_out(request):
149 """View handler for the final log out confirmation message."""
150 page = viewmodels.Page(
151 title=_("core.pages.logged_out.title"),
152 icon=viewmodels.Icon("happybus", pgettext("image alt text", "core.icons.happybus")),
153 )
154
155 return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -49,7 +49,8 @@
page = viewmodels.Page(
title=_("core.pages.agency_index.title"),
- headline=_("core.pages.agency_index.mst_cc.headline"),
+ headline=_("core.pages.agency_index.headline%(transit_agency_short_name)s")
+ % {"transit_agency_short_name": agency.short_name},
button=button,
)
| {"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -49,7 +49,8 @@\n \n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n- headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n+ headline=_(\"core.pages.agency_index.headline%(transit_agency_short_name)s\")\n+ % {\"transit_agency_short_name\": agency.short_name},\n button=button,\n )\n", "issue": "Refactor Agency dynamic headline into model prop\nRight now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62):\r\n\r\n```python\r\npage = viewmodels.Page(\r\n title=_(\"core.pages.agency_index.title\"),\r\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\r\n button=button,\r\n classes=\"home\",\r\n )\r\n```\r\n\r\nThis is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] `agency_index` gets its headline from the selected `agency`\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\nROUTE_LOGGED_OUT = \"core:logged_out\"\n\nTEMPLATE_INDEX = \"core/index.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\nTEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n headline=_(\"core.pages.index.headline\"),\n modal=viewmodels.AgencySelector(\n id=\"agency-selector\",\n aria_labelledby_id=\"agency-selector-modal-label\",\n button_text=_(\"core.pages.index.button\"),\n ),\n )\n\n return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n button=button,\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n headline=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n\n\ndef logged_out(request):\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n\n return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n", "path": "benefits/core/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\nROUTE_LOGGED_OUT = \"core:logged_out\"\n\nTEMPLATE_INDEX = \"core/index.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\nTEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n headline=_(\"core.pages.index.headline\"),\n modal=viewmodels.AgencySelector(\n id=\"agency-selector\",\n aria_labelledby_id=\"agency-selector-modal-label\",\n button_text=_(\"core.pages.index.button\"),\n ),\n )\n\n return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n headline=_(\"core.pages.agency_index.headline%(transit_agency_short_name)s\")\n % {\"transit_agency_short_name\": agency.short_name},\n button=button,\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n headline=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n\n\ndef logged_out(request):\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n\n return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n", "path": "benefits/core/views.py"}]} | 1,893 | 123 |
gh_patches_debug_13285 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-341 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
La Veneciana (Argentina)
Ice cream shop.
HTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/laveneciana.py`
Content:
```
1 import scrapy
2 import re
3 from locations.items import GeojsonPointItem
4 class LavenecianaSpider(scrapy.Spider):
5 name = "laveneciana"
6 allowed_domains = ["www.laveneciana.com.ar"]
7 download_delay = 0.5
8 start_urls = (
9 'http://www.laveneciana.com.ar/sucursales.html',
10 )
11 def parse(self, response):
12 stores = response.xpath('//div[@class="navigation-container"]/div[@id="thumbs"]/ul[@class="thumbs noscript"]/li')
13 for store in stores:
14 addr_full_tel = store.xpath('normalize-space(./div[@class="caption"]/div[@class="image-desc"]/text())').extract_first()
15 location = store.xpath('normalize-space(./div[@class="caption"]/div[@class="ubicacion"]/iframe/@src)').extract_first()
16 position = re.findall(r"ll=[0-9-.,]+" ,location)
17 id = re.findall(r"cid=[0-9]+" ,location)
18 if(len(position)>0):
19 lat =float( position[0][3:].split(',')[0])
20 lon = float(position[0][3:].split(',')[1])
21 id = id[0][4:]
22 else:
23 lat=''
24 lon=''
25 id=''
26 addr_full = re.findall(r"^[^()]{4}[^(.)]+" , addr_full_tel)[0]
27 phone_number = re.findall(r"[0-9]{4}-[0-9]{4}",addr_full_tel)
28 if(len(phone_number)>0):
29 phone_number = phone_number[0]
30 else:
31 phone_number =''
32 if(addr_full!="Direccion"):
33 properties = {
34 'addr_full': addr_full,
35 'phone':phone_number,
36 'city': '',
37 'state': '',
38 'postcode':'',
39 'ref': id,
40 'website': response.url,
41 'lat': lat,
42 'lon': lon,
43 }
44 yield GeojsonPointItem(**properties)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py
--- a/locations/spiders/laveneciana.py
+++ b/locations/spiders/laveneciana.py
@@ -23,13 +23,9 @@
lat=''
lon=''
id=''
- addr_full = re.findall(r"^[^()]{4}[^(.)]+" , addr_full_tel)[0]
- phone_number = re.findall(r"[0-9]{4}-[0-9]{4}",addr_full_tel)
- if(len(phone_number)>0):
- phone_number = phone_number[0]
- else:
- phone_number =''
- if(addr_full!="Direccion"):
+ addr_full = addr_full_tel.split('Tel.: ')[0]
+ phone_number = addr_full_tel.split('Tel.: ')[1]
+ if(addr_full!="Direccion... "):
properties = {
'addr_full': addr_full,
'phone':phone_number,
| {"golden_diff": "diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py\n--- a/locations/spiders/laveneciana.py\n+++ b/locations/spiders/laveneciana.py\n@@ -23,13 +23,9 @@\n lat=''\n lon=''\n id=''\n- addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n- phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n- if(len(phone_number)>0):\n- phone_number = phone_number[0]\n- else:\n- phone_number =''\n- if(addr_full!=\"Direccion\"):\n+ addr_full = addr_full_tel.split('Tel.: ')[0]\n+ phone_number = addr_full_tel.split('Tel.: ')[1]\n+ if(addr_full!=\"Direccion... \"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n", "issue": "La Veneciana (Argentina)\nIce cream shop.\r\n\r\nHTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\nclass LavenecianaSpider(scrapy.Spider):\n name = \"laveneciana\"\n allowed_domains = [\"www.laveneciana.com.ar\"]\n download_delay = 0.5\n start_urls = (\n 'http://www.laveneciana.com.ar/sucursales.html',\n )\n def parse(self, response):\n stores = response.xpath('//div[@class=\"navigation-container\"]/div[@id=\"thumbs\"]/ul[@class=\"thumbs noscript\"]/li')\n for store in stores:\n addr_full_tel = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"image-desc\"]/text())').extract_first()\n location = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"ubicacion\"]/iframe/@src)').extract_first()\n position = re.findall(r\"ll=[0-9-.,]+\" ,location)\n id = re.findall(r\"cid=[0-9]+\" ,location)\n if(len(position)>0):\n lat =float( position[0][3:].split(',')[0])\n lon = float(position[0][3:].split(',')[1])\n id = id[0][4:]\n else:\n lat=''\n lon=''\n id=''\n addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n if(len(phone_number)>0):\n phone_number = phone_number[0]\n else:\n phone_number =''\n if(addr_full!=\"Direccion\"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n 'city': '',\n 'state': '',\n 'postcode':'',\n 'ref': id,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/laveneciana.py"}], "after_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\nclass LavenecianaSpider(scrapy.Spider):\n name = \"laveneciana\"\n allowed_domains = [\"www.laveneciana.com.ar\"]\n download_delay = 0.5\n start_urls = (\n 'http://www.laveneciana.com.ar/sucursales.html',\n )\n def parse(self, response):\n stores = response.xpath('//div[@class=\"navigation-container\"]/div[@id=\"thumbs\"]/ul[@class=\"thumbs noscript\"]/li')\n for store in stores:\n addr_full_tel = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"image-desc\"]/text())').extract_first()\n location = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"ubicacion\"]/iframe/@src)').extract_first()\n position = re.findall(r\"ll=[0-9-.,]+\" ,location)\n id = re.findall(r\"cid=[0-9]+\" ,location)\n if(len(position)>0):\n lat =float( position[0][3:].split(',')[0])\n lon = float(position[0][3:].split(',')[1])\n id = id[0][4:]\n else:\n lat=''\n lon=''\n id=''\n addr_full = addr_full_tel.split('Tel.: ')[0]\n phone_number = addr_full_tel.split('Tel.: ')[1]\n if(addr_full!=\"Direccion... \"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n 'city': '',\n 'state': '',\n 'postcode':'',\n 'ref': id,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/laveneciana.py"}]} | 801 | 219 |
gh_patches_debug_60373 | rasdani/github-patches | git_diff | UTNkar__moore-151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paragraph block alignment
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
See image:

[Description of the issue]
### Steps to Reproduce
1. [First Step]
2. [Second Step]
3. [and so on...]
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/blocks/models.py`
Content:
```
1 from wagtail.wagtailcore import blocks
2 from wagtail.wagtailimages.blocks import ImageChooserBlock
3
4 from django.utils.translation import ugettext_lazy as _
5
6
7 class CountersBlock(blocks.StructBlock):
8 title = blocks.CharBlock()
9 counters = blocks.ListBlock(blocks.StructBlock([
10 ('icon', blocks.CharBlock(
11 help_text=_('Material icon font icon text, as found on: '
12 'https://material.io/icons'),
13 )),
14 ('value', blocks.CharBlock()),
15 ('description', blocks.CharBlock(required=False))
16 ]))
17 style = blocks.ChoiceBlock(choices=[
18 ('light', _('Light')),
19 ('dark', _('Dark')),
20 ])
21
22 class Meta:
23 label = _('Counters')
24 icon = 'fa-balance-scale'
25 template = 'blocks/counter.html'
26
27
28 class HeadingBlock(blocks.StructBlock):
29 title = blocks.CharBlock(required=True)
30 subtitle = blocks.CharBlock(required=False)
31
32 class Meta:
33 label = _('Heading')
34 icon = 'fa-header'
35 template = 'blocks/title.html'
36
37
38 class ImageDescriptionBlock(blocks.StructBlock):
39 description = blocks.RichTextBlock()
40 image = ImageChooserBlock()
41 image_alignment = blocks.ChoiceBlock(choices=[
42 ('left', _('Left')),
43 ('right', _('Right')),
44 ])
45 hide_on_med = blocks.BooleanBlock(required=False)
46
47 class Meta:
48 label = _('Image + Description')
49 icon = 'fa-file-image-o '
50 template = 'blocks/image_description.html'
51
52
53 class ImageIconsBlock(blocks.StructBlock):
54 title = blocks.CharBlock()
55 image = ImageChooserBlock()
56 image_alignment = blocks.ChoiceBlock(choices=[
57 ('left', _('Left')),
58 ('right', _('Right')),
59 ])
60 icons = blocks.ListBlock(blocks.StructBlock([
61 ('icon', blocks.CharBlock(
62 help_text=_('Material icon font icon text, as found on: '
63 'https://material.io/icons'),
64 )),
65 ('title', blocks.CharBlock()),
66 ('description', blocks.CharBlock())
67 ]))
68 hide_on_med = blocks.BooleanBlock(required=False)
69
70 class Meta:
71 label = _('Image + Icons')
72 icon = 'fa-file-excel-o'
73 template = 'blocks/image_icons.html'
74
75
76 class OverlayBlock(blocks.StructBlock):
77 image = ImageChooserBlock()
78 title = blocks.CharBlock(required=False)
79 description = blocks.CharBlock(required=False)
80
81 link = blocks.URLBlock(required=False)
82 button = blocks.CharBlock(required=False)
83
84 class Meta:
85 label = _('Image overlay')
86 icon = 'fa-clone'
87 template = 'blocks/overlay.html'
88
89
90 WAGTAIL_STATIC_BLOCKTYPES = [
91 ('heading', HeadingBlock()),
92 ('paragraph', blocks.RichTextBlock()),
93 ('image_description', ImageIconsBlock()),
94 ('image_icons', ImageDescriptionBlock()),
95 ('overlay', OverlayBlock()),
96 ('logos', blocks.ListBlock(
97 ImageChooserBlock(),
98 icon='fa-pied-piper',
99 template='blocks/logos.html',
100 label=_('Logos'),
101 )),
102 ('counters', CountersBlock()),
103 ('image', ImageChooserBlock(template='blocks/image.html')),
104 ]
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/blocks/models.py b/website/blocks/models.py
--- a/website/blocks/models.py
+++ b/website/blocks/models.py
@@ -89,7 +89,7 @@
WAGTAIL_STATIC_BLOCKTYPES = [
('heading', HeadingBlock()),
- ('paragraph', blocks.RichTextBlock()),
+ ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),
('image_description', ImageIconsBlock()),
('image_icons', ImageDescriptionBlock()),
('overlay', OverlayBlock()),
| {"golden_diff": "diff --git a/website/blocks/models.py b/website/blocks/models.py\n--- a/website/blocks/models.py\n+++ b/website/blocks/models.py\n@@ -89,7 +89,7 @@\n \n WAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n- ('paragraph', blocks.RichTextBlock()),\n+ ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n", "issue": "Paragraph block alignment\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\nSee image:\r\n\r\n\r\n\r\n[Description of the issue]\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from wagtail.wagtailcore import blocks\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CountersBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n counters = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('value', blocks.CharBlock()),\n ('description', blocks.CharBlock(required=False))\n ]))\n style = blocks.ChoiceBlock(choices=[\n ('light', _('Light')),\n ('dark', _('Dark')),\n ])\n\n class Meta:\n label = _('Counters')\n icon = 'fa-balance-scale'\n template = 'blocks/counter.html'\n\n\nclass HeadingBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True)\n subtitle = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Heading')\n icon = 'fa-header'\n template = 'blocks/title.html'\n\n\nclass ImageDescriptionBlock(blocks.StructBlock):\n description = blocks.RichTextBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Description')\n icon = 'fa-file-image-o '\n template = 'blocks/image_description.html'\n\n\nclass ImageIconsBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n icons = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('title', blocks.CharBlock()),\n ('description', blocks.CharBlock())\n ]))\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Icons')\n icon = 'fa-file-excel-o'\n template = 'blocks/image_icons.html'\n\n\nclass OverlayBlock(blocks.StructBlock):\n image = ImageChooserBlock()\n title = blocks.CharBlock(required=False)\n description = blocks.CharBlock(required=False)\n\n link = blocks.URLBlock(required=False)\n button = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Image overlay')\n icon = 'fa-clone'\n template = 'blocks/overlay.html'\n\n\nWAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n ('paragraph', blocks.RichTextBlock()),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n ('logos', blocks.ListBlock(\n ImageChooserBlock(),\n icon='fa-pied-piper',\n template='blocks/logos.html',\n label=_('Logos'),\n )),\n ('counters', CountersBlock()),\n ('image', ImageChooserBlock(template='blocks/image.html')),\n]\n", "path": "website/blocks/models.py"}], "after_files": [{"content": "from wagtail.wagtailcore import blocks\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CountersBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n counters = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('value', blocks.CharBlock()),\n ('description', blocks.CharBlock(required=False))\n ]))\n style = blocks.ChoiceBlock(choices=[\n ('light', _('Light')),\n ('dark', _('Dark')),\n ])\n\n class Meta:\n label = _('Counters')\n icon = 'fa-balance-scale'\n template = 'blocks/counter.html'\n\n\nclass HeadingBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True)\n subtitle = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Heading')\n icon = 'fa-header'\n template = 'blocks/title.html'\n\n\nclass ImageDescriptionBlock(blocks.StructBlock):\n description = blocks.RichTextBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Description')\n icon = 'fa-file-image-o '\n template = 'blocks/image_description.html'\n\n\nclass ImageIconsBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n icons = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('title', blocks.CharBlock()),\n ('description', blocks.CharBlock())\n ]))\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Icons')\n icon = 'fa-file-excel-o'\n template = 'blocks/image_icons.html'\n\n\nclass OverlayBlock(blocks.StructBlock):\n image = ImageChooserBlock()\n title = blocks.CharBlock(required=False)\n description = blocks.CharBlock(required=False)\n\n link = blocks.URLBlock(required=False)\n button = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Image overlay')\n icon = 'fa-clone'\n template = 'blocks/overlay.html'\n\n\nWAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n ('logos', blocks.ListBlock(\n ImageChooserBlock(),\n icon='fa-pied-piper',\n template='blocks/logos.html',\n label=_('Logos'),\n )),\n ('counters', CountersBlock()),\n ('image', ImageChooserBlock(template='blocks/image.html')),\n]\n", "path": "website/blocks/models.py"}]} | 1,279 | 117 |
gh_patches_debug_16424 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-3520 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
libeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL
If you are having errors like:
```
qt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method
qt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new
qt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init
qt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error
qt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error
```
with PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\Qt\bin\`) to your output dir or your frozen binary in a similar path.
In my final specfile, it looks like this:
```python
# -*- mode: python -*-
block_cipher = None
a = Analysis(['cddagl\\launcher.py'],
pathex=['C:\\Program Files (x86)\\Windows Kits\\10\\Redist\\ucrt\\DLLs\\x86\\', 'C:\\Users\\remy\\Projects\\CDDA-Game-Launcher'],
binaries=[('C:\\Users\\remy\\VirtualEnvs\\CDDA-Game-Launcher\\lib\\site-packages\\PyQt5\\Qt\\bin\\libeay32.dll', 'PyQt5\\Qt\\bin'), ('C:\\Users\\remy\\VirtualEnvs\\CDDA-Game-Launcher\\lib\\site-packages\\PyQt5\\Qt\\bin\\ssleay32.dll', 'PyQt5\\Qt\\bin')],
datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\Users\\remy\\AppData\\Local\\Programs\\Python\\Python36-32\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],
hiddenimports=['lxml.cssselect', 'babel.numbers'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='launcher',
debug=True,
strip=False,
upx=False,
runtime_tmpdir=None,
console=True , icon='cddagl\\resources\\launcher.ico')
```
I'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-PyQt5.QtNetwork.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2018, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 from PyInstaller.utils.hooks import add_qt5_dependencies
10
11 hiddenimports, binaries, datas = add_qt5_dependencies(__file__)
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py
--- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py
+++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py
@@ -6,6 +6,21 @@
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-from PyInstaller.utils.hooks import add_qt5_dependencies
+import os.path
+
+from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies
+from PyInstaller.compat import is_win
hiddenimports, binaries, datas = add_qt5_dependencies(__file__)
+
+# Add libraries needed for SSL. See issue #3520.
+if is_win:
+ rel_data_path = ['PyQt5', 'Qt', 'bin']
+ binaries += [
+ (os.path.join(pyqt5_library_info.location['BinariesPath'],
+ 'libeay32.dll'),
+ os.path.join(*rel_data_path)),
+ (os.path.join(pyqt5_library_info.location['BinariesPath'],
+ 'ssleay32.dll'),
+ os.path.join(*rel_data_path))
+ ]
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n--- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n+++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n@@ -6,6 +6,21 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n-from PyInstaller.utils.hooks import add_qt5_dependencies\n+import os.path\n+\n+from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies\n+from PyInstaller.compat import is_win\n \n hiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n+\n+# Add libraries needed for SSL. See issue #3520.\n+if is_win:\n+ rel_data_path = ['PyQt5', 'Qt', 'bin']\n+ binaries += [\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'libeay32.dll'),\n+ os.path.join(*rel_data_path)),\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'ssleay32.dll'),\n+ os.path.join(*rel_data_path))\n+ ]\n", "issue": "libeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL\nIf you are having errors like:\r\n\r\n```\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\n```\r\n\r\nwith PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\\Qt\\bin\\`) to your output dir or your frozen binary in a similar path.\r\n\r\nIn my final specfile, it looks like this:\r\n\r\n```python\r\n# -*- mode: python -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['cddagl\\\\launcher.py'],\r\n pathex=['C:\\\\Program Files (x86)\\\\Windows Kits\\\\10\\\\Redist\\\\ucrt\\\\DLLs\\\\x86\\\\', 'C:\\\\Users\\\\remy\\\\Projects\\\\CDDA-Game-Launcher'],\r\n binaries=[('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\libeay32.dll', 'PyQt5\\\\Qt\\\\bin'), ('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\ssleay32.dll', 'PyQt5\\\\Qt\\\\bin')],\r\n datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\\\Users\\\\remy\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python36-32\\\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\r\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n name='launcher',\r\n debug=True,\r\n strip=False,\r\n upx=False,\r\n runtime_tmpdir=None,\r\n console=True , icon='cddagl\\\\resources\\\\launcher.ico')\r\n```\r\n\r\nI'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770**\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import add_qt5_dependencies\n\nhiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n", "path": "PyInstaller/hooks/hook-PyQt5.QtNetwork.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport os.path\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies\nfrom PyInstaller.compat import is_win\n\nhiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n\n# Add libraries needed for SSL. See issue #3520.\nif is_win:\n rel_data_path = ['PyQt5', 'Qt', 'bin']\n binaries += [\n (os.path.join(pyqt5_library_info.location['BinariesPath'],\n 'libeay32.dll'),\n os.path.join(*rel_data_path)),\n (os.path.join(pyqt5_library_info.location['BinariesPath'],\n 'ssleay32.dll'),\n os.path.join(*rel_data_path))\n ]\n", "path": "PyInstaller/hooks/hook-PyQt5.QtNetwork.py"}]} | 1,121 | 278 |
gh_patches_debug_9014 | rasdani/github-patches | git_diff | stephenmcd__mezzanine-1517 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicate form fields in admin for user profiles
As discussed here:
https://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mezzanine/accounts/admin.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.contrib import admin
4 from django.contrib.auth import get_user_model
5 from mezzanine.accounts import get_profile_model, ProfileNotConfigured
6
7 from mezzanine.core.admin import SitePermissionUserAdmin
8 from mezzanine.conf import settings
9 from mezzanine.utils.email import send_approved_mail, send_verification_mail
10
11
12 User = get_user_model()
13
14 user_list_display = SitePermissionUserAdmin.list_display
15 user_list_display += ("is_active", "date_joined", "last_login")
16
17
18 class UserProfileAdmin(SitePermissionUserAdmin):
19
20 list_display = user_list_display
21
22 def save_model(self, request, obj, form, change):
23 """
24 If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,
25 send a notification email to the user being saved if their
26 ``active`` status has changed to ``True``.
27 If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,
28 send a verification email instead.
29 """
30 must_send_verification_mail_after_save = False
31 if change and settings.ACCOUNTS_APPROVAL_REQUIRED:
32 if obj.is_active and not User.objects.get(id=obj.id).is_active:
33 if settings.ACCOUNTS_VERIFICATION_REQUIRED:
34 # Accounts verification requires an inactive account
35 obj.is_active = False
36 # The token generated by send_verification_mail()
37 # must match the _saved_ User object,
38 # so postpone send_verification_mail() until later
39 must_send_verification_mail_after_save = True
40 else:
41 send_approved_mail(request, obj)
42 super(UserProfileAdmin, self).save_model(request, obj, form, change)
43 if must_send_verification_mail_after_save:
44 user = User.objects.get(id=obj.id)
45 send_verification_mail(request, user, "signup_verify")
46
47
48 try:
49 class ProfileInline(admin.StackedInline):
50 model = get_profile_model()
51 can_delete = False
52 template = "admin/profile_inline.html"
53 extra = 0
54 UserProfileAdmin.inlines += (ProfileInline,)
55 except ProfileNotConfigured:
56 pass
57
58
59 if User in admin.site._registry:
60 admin.site.unregister(User)
61 admin.site.register(User, UserProfileAdmin)
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py
--- a/mezzanine/accounts/admin.py
+++ b/mezzanine/accounts/admin.py
@@ -51,6 +51,13 @@
can_delete = False
template = "admin/profile_inline.html"
extra = 0
+
+ def get_min_num(self, request, obj=None, **kwargs):
+ """This causes profile forms to be shown when editing but hidden
+ when creating. If min_num is fixed at 1, Django's initial user
+ creation form fails if the profile model has a required field."""
+ return 0 if obj is None else 1
+
UserProfileAdmin.inlines += (ProfileInline,)
except ProfileNotConfigured:
pass
| {"golden_diff": "diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py\n--- a/mezzanine/accounts/admin.py\n+++ b/mezzanine/accounts/admin.py\n@@ -51,6 +51,13 @@\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n+\n+ def get_min_num(self, request, obj=None, **kwargs):\n+ \"\"\"This causes profile forms to be shown when editing but hidden\n+ when creating. If min_num is fixed at 1, Django's initial user\n+ creation form fails if the profile model has a required field.\"\"\"\n+ return 0 if obj is None else 1\n+\n UserProfileAdmin.inlines += (ProfileInline,)\n except ProfileNotConfigured:\n pass\n", "issue": "Duplicate form fields in admin for user profiles\nAs discussed here:\n\nhttps://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom mezzanine.accounts import get_profile_model, ProfileNotConfigured\n\nfrom mezzanine.core.admin import SitePermissionUserAdmin\nfrom mezzanine.conf import settings\nfrom mezzanine.utils.email import send_approved_mail, send_verification_mail\n\n\nUser = get_user_model()\n\nuser_list_display = SitePermissionUserAdmin.list_display\nuser_list_display += (\"is_active\", \"date_joined\", \"last_login\")\n\n\nclass UserProfileAdmin(SitePermissionUserAdmin):\n\n list_display = user_list_display\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,\n send a notification email to the user being saved if their\n ``active`` status has changed to ``True``.\n If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,\n send a verification email instead.\n \"\"\"\n must_send_verification_mail_after_save = False\n if change and settings.ACCOUNTS_APPROVAL_REQUIRED:\n if obj.is_active and not User.objects.get(id=obj.id).is_active:\n if settings.ACCOUNTS_VERIFICATION_REQUIRED:\n # Accounts verification requires an inactive account\n obj.is_active = False\n # The token generated by send_verification_mail()\n # must match the _saved_ User object,\n # so postpone send_verification_mail() until later\n must_send_verification_mail_after_save = True\n else:\n send_approved_mail(request, obj)\n super(UserProfileAdmin, self).save_model(request, obj, form, change)\n if must_send_verification_mail_after_save:\n user = User.objects.get(id=obj.id)\n send_verification_mail(request, user, \"signup_verify\")\n\n\ntry:\n class ProfileInline(admin.StackedInline):\n model = get_profile_model()\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n UserProfileAdmin.inlines += (ProfileInline,)\nexcept ProfileNotConfigured:\n pass\n\n\nif User in admin.site._registry:\n admin.site.unregister(User)\nadmin.site.register(User, UserProfileAdmin)\n", "path": "mezzanine/accounts/admin.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom mezzanine.accounts import get_profile_model, ProfileNotConfigured\n\nfrom mezzanine.core.admin import SitePermissionUserAdmin\nfrom mezzanine.conf import settings\nfrom mezzanine.utils.email import send_approved_mail, send_verification_mail\n\n\nUser = get_user_model()\n\nuser_list_display = SitePermissionUserAdmin.list_display\nuser_list_display += (\"is_active\", \"date_joined\", \"last_login\")\n\n\nclass UserProfileAdmin(SitePermissionUserAdmin):\n\n list_display = user_list_display\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,\n send a notification email to the user being saved if their\n ``active`` status has changed to ``True``.\n If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,\n send a verification email instead.\n \"\"\"\n must_send_verification_mail_after_save = False\n if change and settings.ACCOUNTS_APPROVAL_REQUIRED:\n if obj.is_active and not User.objects.get(id=obj.id).is_active:\n if settings.ACCOUNTS_VERIFICATION_REQUIRED:\n # Accounts verification requires an inactive account\n obj.is_active = False\n # The token generated by send_verification_mail()\n # must match the _saved_ User object,\n # so postpone send_verification_mail() until later\n must_send_verification_mail_after_save = True\n else:\n send_approved_mail(request, obj)\n super(UserProfileAdmin, self).save_model(request, obj, form, change)\n if must_send_verification_mail_after_save:\n user = User.objects.get(id=obj.id)\n send_verification_mail(request, user, \"signup_verify\")\n\n\ntry:\n class ProfileInline(admin.StackedInline):\n model = get_profile_model()\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n\n def get_min_num(self, request, obj=None, **kwargs):\n \"\"\"This causes profile forms to be shown when editing but hidden\n when creating. If min_num is fixed at 1, Django's initial user\n creation form fails if the profile model has a required field.\"\"\"\n return 0 if obj is None else 1\n\n UserProfileAdmin.inlines += (ProfileInline,)\nexcept ProfileNotConfigured:\n pass\n\n\nif User in admin.site._registry:\n admin.site.unregister(User)\nadmin.site.register(User, UserProfileAdmin)\n", "path": "mezzanine/accounts/admin.py"}]} | 879 | 168 |
gh_patches_debug_2536 | rasdani/github-patches | git_diff | optuna__optuna-122 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`TPESampler._sample_categorical` fails with PostgreSQL backend
`TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because:
- `TPESampler._sample_categorical` returns an integer as `numpy.int32`.
- The integer value is input to storage class without any cast.
- SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one.
**Repro Steps**
With any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as:
```
study = pfnopt.create_study(storage=SOME_POSTGRES_URL)
pfnopt.minimize(objective, n_trials=100, study=study)
```
It fails after running trials `n_startup_trails` times.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pfnopt/samplers/tpe.py`
Content:
```
1 import math
2 import numpy
3 from typing import List # NOQA
4 from typing import Optional # NOQA
5
6 from pfnopt import distributions # NOQA
7 from pfnopt.samplers import _hyperopt
8 from pfnopt.samplers import base
9 from pfnopt.samplers import random
10 from pfnopt.storages.base import BaseStorage # NOQA
11
12
13 class TPESampler(base.BaseSampler):
14
15 def __init__(self,
16 prior_weight=_hyperopt.default_prior_weight,
17 n_startup_trials=_hyperopt.default_n_startup_trials,
18 n_ei_candidates=_hyperopt.default_n_ei_candidates,
19 gamma=_hyperopt.default_gamma,
20 seed=None):
21 # type: (float, int, int, float, Optional[int]) -> None
22 self.prior_weight = prior_weight
23 self.n_startup_trials = n_startup_trials
24 self.n_ei_candidates = n_ei_candidates
25 self.gamma = gamma
26 self.seed = seed
27
28 self.rng = numpy.random.RandomState(seed)
29 self.random_sampler = random.RandomSampler(seed=seed)
30
31 def sample(self, storage, study_id, param_name, param_distribution):
32 # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float
33 observation_pairs = storage.get_trial_param_result_pairs(
34 study_id, param_name)
35 n = len(observation_pairs)
36
37 # TODO(Akiba): this behavior is slightly different from hyperopt
38 if n < self.n_startup_trials:
39 return self.random_sampler.sample(storage, study_id, param_name, param_distribution)
40
41 below_param_values, above_param_values = _hyperopt.ap_filter_trials(
42 range(n), [p[0] for p in observation_pairs],
43 range(n), [p[1] for p in observation_pairs],
44 self.gamma)
45
46 if isinstance(param_distribution, distributions.UniformDistribution):
47 return self._sample_uniform(
48 param_distribution, below_param_values, above_param_values)
49 elif isinstance(param_distribution, distributions.LogUniformDistribution):
50 return self._sample_loguniform(
51 param_distribution, below_param_values, above_param_values)
52 elif isinstance(param_distribution, distributions.CategoricalDistribution):
53 return self._sample_categorical(
54 param_distribution, below_param_values, above_param_values)
55 else:
56 raise NotImplementedError
57
58 def _sample_uniform(self, distribution, below, above):
59 # type: (distributions.UniformDistribution, List[float], List[float]) -> float
60 return _hyperopt.sample_uniform(
61 obs_below=below, obs_above=above, prior_weight=self.prior_weight,
62 low=distribution.low, high=distribution.high,
63 size=(self.n_ei_candidates,), rng=self.rng)
64
65 def _sample_loguniform(self, distribution, below, above):
66 # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float
67
68 return _hyperopt.sample_loguniform(
69 obs_below=below, obs_above=above, prior_weight=self.prior_weight,
70 # `sample_loguniform` generates values in [exp(low), exp(high)]
71 low=math.log(distribution.low),
72 high=math.log(distribution.high),
73 size=(self.n_ei_candidates,), rng=self.rng)
74
75 def _sample_categorical(self, distribution, below, above):
76 # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float
77 choices = distribution.choices
78 below = list(map(int, below))
79 above = list(map(int, above))
80 idx = _hyperopt.sample_categorical(
81 obs_below=below, obs_above=above, prior_weight=self.prior_weight,
82 upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)
83 return idx
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py
--- a/pfnopt/samplers/tpe.py
+++ b/pfnopt/samplers/tpe.py
@@ -80,4 +80,4 @@
idx = _hyperopt.sample_categorical(
obs_below=below, obs_above=above, prior_weight=self.prior_weight,
upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)
- return idx
+ return int(idx)
| {"golden_diff": "diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py\n--- a/pfnopt/samplers/tpe.py\n+++ b/pfnopt/samplers/tpe.py\n@@ -80,4 +80,4 @@\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n- return idx\n+ return int(idx)\n", "issue": "`TPESampler._sample_categorical` fails with PostgreSQL backend\n`TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because:\r\n- `TPESampler._sample_categorical` returns an integer as `numpy.int32`.\r\n- The integer value is input to storage class without any cast.\r\n- SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one.\r\n\r\n**Repro Steps**\r\nWith any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as:\r\n```\r\nstudy = pfnopt.create_study(storage=SOME_POSTGRES_URL)\r\npfnopt.minimize(objective, n_trials=100, study=study)\r\n```\r\n\r\nIt fails after running trials `n_startup_trails` times.\n", "before_files": [{"content": "import math\nimport numpy\nfrom typing import List # NOQA\nfrom typing import Optional # NOQA\n\nfrom pfnopt import distributions # NOQA\nfrom pfnopt.samplers import _hyperopt\nfrom pfnopt.samplers import base\nfrom pfnopt.samplers import random\nfrom pfnopt.storages.base import BaseStorage # NOQA\n\n\nclass TPESampler(base.BaseSampler):\n\n def __init__(self,\n prior_weight=_hyperopt.default_prior_weight,\n n_startup_trials=_hyperopt.default_n_startup_trials,\n n_ei_candidates=_hyperopt.default_n_ei_candidates,\n gamma=_hyperopt.default_gamma,\n seed=None):\n # type: (float, int, int, float, Optional[int]) -> None\n self.prior_weight = prior_weight\n self.n_startup_trials = n_startup_trials\n self.n_ei_candidates = n_ei_candidates\n self.gamma = gamma\n self.seed = seed\n\n self.rng = numpy.random.RandomState(seed)\n self.random_sampler = random.RandomSampler(seed=seed)\n\n def sample(self, storage, study_id, param_name, param_distribution):\n # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float\n observation_pairs = storage.get_trial_param_result_pairs(\n study_id, param_name)\n n = len(observation_pairs)\n\n # TODO(Akiba): this behavior is slightly different from hyperopt\n if n < self.n_startup_trials:\n return self.random_sampler.sample(storage, study_id, param_name, param_distribution)\n\n below_param_values, above_param_values = _hyperopt.ap_filter_trials(\n range(n), [p[0] for p in observation_pairs],\n range(n), [p[1] for p in observation_pairs],\n self.gamma)\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._sample_uniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n return self._sample_loguniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n return self._sample_categorical(\n param_distribution, below_param_values, above_param_values)\n else:\n raise NotImplementedError\n\n def _sample_uniform(self, distribution, below, above):\n # type: (distributions.UniformDistribution, List[float], List[float]) -> float\n return _hyperopt.sample_uniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n low=distribution.low, high=distribution.high,\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_loguniform(self, distribution, below, above):\n # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float\n\n return _hyperopt.sample_loguniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n # `sample_loguniform` generates values in [exp(low), exp(high)]\n low=math.log(distribution.low),\n high=math.log(distribution.high),\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_categorical(self, distribution, below, above):\n # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float\n choices = distribution.choices\n below = list(map(int, below))\n above = list(map(int, above))\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n return idx\n", "path": "pfnopt/samplers/tpe.py"}], "after_files": [{"content": "import math\nimport numpy\nfrom typing import List # NOQA\nfrom typing import Optional # NOQA\n\nfrom pfnopt import distributions # NOQA\nfrom pfnopt.samplers import _hyperopt\nfrom pfnopt.samplers import base\nfrom pfnopt.samplers import random\nfrom pfnopt.storages.base import BaseStorage # NOQA\n\n\nclass TPESampler(base.BaseSampler):\n\n def __init__(self,\n prior_weight=_hyperopt.default_prior_weight,\n n_startup_trials=_hyperopt.default_n_startup_trials,\n n_ei_candidates=_hyperopt.default_n_ei_candidates,\n gamma=_hyperopt.default_gamma,\n seed=None):\n # type: (float, int, int, float, Optional[int]) -> None\n self.prior_weight = prior_weight\n self.n_startup_trials = n_startup_trials\n self.n_ei_candidates = n_ei_candidates\n self.gamma = gamma\n self.seed = seed\n\n self.rng = numpy.random.RandomState(seed)\n self.random_sampler = random.RandomSampler(seed=seed)\n\n def sample(self, storage, study_id, param_name, param_distribution):\n # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float\n observation_pairs = storage.get_trial_param_result_pairs(\n study_id, param_name)\n n = len(observation_pairs)\n\n # TODO(Akiba): this behavior is slightly different from hyperopt\n if n < self.n_startup_trials:\n return self.random_sampler.sample(storage, study_id, param_name, param_distribution)\n\n below_param_values, above_param_values = _hyperopt.ap_filter_trials(\n range(n), [p[0] for p in observation_pairs],\n range(n), [p[1] for p in observation_pairs],\n self.gamma)\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._sample_uniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n return self._sample_loguniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n return self._sample_categorical(\n param_distribution, below_param_values, above_param_values)\n else:\n raise NotImplementedError\n\n def _sample_uniform(self, distribution, below, above):\n # type: (distributions.UniformDistribution, List[float], List[float]) -> float\n return _hyperopt.sample_uniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n low=distribution.low, high=distribution.high,\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_loguniform(self, distribution, below, above):\n # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float\n\n return _hyperopt.sample_loguniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n # `sample_loguniform` generates values in [exp(low), exp(high)]\n low=math.log(distribution.low),\n high=math.log(distribution.high),\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_categorical(self, distribution, below, above):\n # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float\n choices = distribution.choices\n below = list(map(int, below))\n above = list(map(int, above))\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n return int(idx)\n", "path": "pfnopt/samplers/tpe.py"}]} | 1,385 | 121 |
gh_patches_debug_34056 | rasdani/github-patches | git_diff | modin-project__modin-2806 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ASV] use `wait` function to get the right performance times
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `asv_bench/benchmarks/scalability/scalability_benchmarks.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 import modin.pandas as pd
15 from modin.pandas.utils import from_pandas
16 from modin.utils import to_pandas
17 import pandas
18
19 from ..utils import (
20 gen_data,
21 generate_dataframe,
22 RAND_LOW,
23 RAND_HIGH,
24 ASV_DATASET_SIZE,
25 UNARY_OP_DATA_SIZE,
26 execute,
27 )
28
29
30 class TimeFromPandas:
31 param_names = ["shape", "cpus"]
32 params = [
33 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],
34 [4, 16, 32],
35 ]
36
37 def setup(self, shape, cpus):
38 self.data = pandas.DataFrame(gen_data("int", *shape, RAND_LOW, RAND_HIGH))
39 from modin.config import NPartitions
40
41 NPartitions.get = lambda: cpus
42 # trigger ray init
43 pd.DataFrame([])
44
45 def time_from_pandas(self, shape, cpus):
46 execute(from_pandas(self.data))
47
48
49 class TimeToPandas:
50 param_names = ["shape", "cpus"]
51 params = [
52 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],
53 [4, 16, 32],
54 ]
55
56 def setup(self, shape, cpus):
57 from modin.config import NPartitions
58
59 NPartitions.get = lambda: cpus
60 self.data = generate_dataframe("modin", "int", *shape, RAND_LOW, RAND_HIGH)
61
62 def time_to_pandas(self, shape, cpus):
63 execute(to_pandas(self.data))
64
```
Path: `asv_bench/benchmarks/utils.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 import os
15 import logging
16 import modin.pandas as pd
17 import pandas
18 import numpy as np
19 import uuid
20
21 RAND_LOW = 0
22 RAND_HIGH = 100
23 random_state = np.random.RandomState(seed=42)
24
25
26 try:
27 from modin.config import NPartitions
28
29 NPARTITIONS = NPartitions.get()
30 except ImportError:
31 NPARTITIONS = pd.DEFAULT_NPARTITIONS
32
33 try:
34 from modin.config import TestDatasetSize, AsvImplementation
35
36 ASV_USE_IMPL = AsvImplementation.get()
37 ASV_DATASET_SIZE = TestDatasetSize.get() or "Small"
38 except ImportError:
39 # The same benchmarking code can be run for different versions of Modin, so in
40 # case of an error importing important variables, we'll just use predefined values
41 ASV_USE_IMPL = os.environ.get("MODIN_ASV_USE_IMPL", "modin")
42 ASV_DATASET_SIZE = os.environ.get("MODIN_TEST_DATASET_SIZE", "Small")
43
44 assert ASV_USE_IMPL in ("modin", "pandas")
45
46 BINARY_OP_DATA_SIZE = {
47 "Big": [
48 ((5000, 5000), (5000, 5000)),
49 # the case extremely inefficient
50 # ((20, 500_000), (10, 1_000_000)),
51 ((500_000, 20), (1_000_000, 10)),
52 ],
53 "Small": [
54 ((250, 250), (250, 250)),
55 ((20, 10_000), (10, 25_000)),
56 ((10_000, 20), (25_000, 10)),
57 ],
58 }
59
60 UNARY_OP_DATA_SIZE = {
61 "Big": [
62 (5000, 5000),
63 # the case extremely inefficient
64 # (10, 1_000_000),
65 (1_000_000, 10),
66 ],
67 "Small": [
68 (250, 250),
69 (10, 10_000),
70 (10_000, 10),
71 ],
72 }
73
74 GROUPBY_NGROUPS = {
75 "Big": [100, "huge_amount_groups"],
76 "Small": [5],
77 }
78
79 IMPL = {
80 "modin": pd,
81 "pandas": pandas,
82 }
83
84
85 def translator_groupby_ngroups(groupby_ngroups, shape):
86 if ASV_DATASET_SIZE == "Big":
87 if groupby_ngroups == "huge_amount_groups":
88 return min(shape[0] // 2, 5000)
89 return groupby_ngroups
90 else:
91 return groupby_ngroups
92
93
94 class weakdict(dict):
95 __slots__ = ("__weakref__",)
96
97
98 data_cache = dict()
99 dataframes_cache = dict()
100
101
102 def gen_int_data(nrows, ncols, rand_low, rand_high):
103 cache_key = ("int", nrows, ncols, rand_low, rand_high)
104 if cache_key in data_cache:
105 return data_cache[cache_key]
106
107 logging.info(
108 "Generating int data {} rows and {} columns [{}-{}]".format(
109 nrows, ncols, rand_low, rand_high
110 )
111 )
112 data = {
113 "col{}".format(i): random_state.randint(rand_low, rand_high, size=(nrows))
114 for i in range(ncols)
115 }
116 data_cache[cache_key] = weakdict(data)
117 return data
118
119
120 def gen_str_int_data(nrows, ncols, rand_low, rand_high):
121 cache_key = ("str_int", nrows, ncols, rand_low, rand_high)
122 if cache_key in data_cache:
123 return data_cache[cache_key]
124
125 logging.info(
126 "Generating str_int data {} rows and {} columns [{}-{}]".format(
127 nrows, ncols, rand_low, rand_high
128 )
129 )
130 data = gen_int_data(nrows, ncols, rand_low, rand_high).copy()
131 data["gb_col"] = [
132 "str_{}".format(random_state.randint(rand_low, rand_high)) for i in range(nrows)
133 ]
134 data_cache[cache_key] = weakdict(data)
135 return data
136
137
138 def gen_data(data_type, nrows, ncols, rand_low, rand_high):
139 if data_type == "int":
140 return gen_int_data(nrows, ncols, rand_low, rand_high)
141 elif data_type == "str_int":
142 return gen_str_int_data(nrows, ncols, rand_low, rand_high)
143 else:
144 assert False
145
146
147 def generate_dataframe(
148 impl,
149 data_type,
150 nrows,
151 ncols,
152 rand_low,
153 rand_high,
154 groupby_ncols=None,
155 count_groups=None,
156 ):
157 assert not (
158 (groupby_ncols is None) ^ (count_groups is None)
159 ), "You must either specify both parameters 'groupby_ncols' and 'count_groups' or none of them."
160
161 if groupby_ncols and count_groups:
162 ncols -= groupby_ncols
163 cache_key = (
164 impl,
165 data_type,
166 nrows,
167 ncols,
168 rand_low,
169 rand_high,
170 groupby_ncols,
171 count_groups,
172 )
173 else:
174 cache_key = (impl, data_type, nrows, ncols, rand_low, rand_high)
175
176 if cache_key in dataframes_cache:
177 return dataframes_cache[cache_key]
178
179 logging.info(
180 "Allocating {} DataFrame {}: {} rows and {} columns [{}-{}]".format(
181 impl, data_type, nrows, ncols, rand_low, rand_high
182 )
183 )
184 data = gen_data(data_type, nrows, ncols, rand_low, rand_high)
185
186 if groupby_ncols and count_groups:
187 groupby_columns = [f"groupby_col{x}" for x in range(groupby_ncols)]
188 for groupby_col in groupby_columns:
189 data[groupby_col] = np.tile(np.arange(count_groups), nrows // count_groups)
190
191 if impl == "modin":
192 df = pd.DataFrame(data)
193 elif impl == "pandas":
194 df = pandas.DataFrame(data)
195 else:
196 assert False
197
198 if groupby_ncols and count_groups:
199 dataframes_cache[cache_key] = df, groupby_columns
200 return df, groupby_columns
201
202 dataframes_cache[cache_key] = df
203 return df
204
205
206 def random_string():
207 return str(uuid.uuid1())
208
209
210 def random_columns(df_columns, columns_number):
211 return list(random_state.choice(df_columns, size=columns_number))
212
213
214 def random_booleans(number):
215 return list(random_state.choice([True, False], size=number))
216
217
218 def execute(df):
219 "Make sure the calculations are done."
220 return df.shape, df.dtypes
221
222
223 def get_shape_id(array):
224 return "_".join([str(element) for element in array])
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/asv_bench/benchmarks/scalability/scalability_benchmarks.py b/asv_bench/benchmarks/scalability/scalability_benchmarks.py
--- a/asv_bench/benchmarks/scalability/scalability_benchmarks.py
+++ b/asv_bench/benchmarks/scalability/scalability_benchmarks.py
@@ -60,4 +60,5 @@
self.data = generate_dataframe("modin", "int", *shape, RAND_LOW, RAND_HIGH)
def time_to_pandas(self, shape, cpus):
- execute(to_pandas(self.data))
+ # to_pandas is already synchronous
+ to_pandas(self.data)
diff --git a/asv_bench/benchmarks/utils.py b/asv_bench/benchmarks/utils.py
--- a/asv_bench/benchmarks/utils.py
+++ b/asv_bench/benchmarks/utils.py
@@ -31,15 +31,17 @@
NPARTITIONS = pd.DEFAULT_NPARTITIONS
try:
- from modin.config import TestDatasetSize, AsvImplementation
+ from modin.config import TestDatasetSize, AsvImplementation, Engine
ASV_USE_IMPL = AsvImplementation.get()
ASV_DATASET_SIZE = TestDatasetSize.get() or "Small"
+ ASV_USE_ENGINE = Engine.get()
except ImportError:
# The same benchmarking code can be run for different versions of Modin, so in
# case of an error importing important variables, we'll just use predefined values
ASV_USE_IMPL = os.environ.get("MODIN_ASV_USE_IMPL", "modin")
ASV_DATASET_SIZE = os.environ.get("MODIN_TEST_DATASET_SIZE", "Small")
+ ASV_USE_ENGINE = os.environ.get("MODIN_ENGINE", "Ray")
assert ASV_USE_IMPL in ("modin", "pandas")
@@ -217,7 +219,24 @@
def execute(df):
"Make sure the calculations are done."
- return df.shape, df.dtypes
+ if ASV_USE_IMPL == "modin":
+ partitions = df._query_compiler._modin_frame._partitions
+ map(lambda partition: partition.drain_call_queue(), partitions)
+ if ASV_USE_ENGINE == "Ray":
+ from ray import wait
+
+ map(lambda partition: wait(partition.oid), partitions)
+ elif ASV_USE_ENGINE == "Dask":
+ from dask.distributed import wait
+
+ map(lambda partition: wait(partition.future), partitions)
+ elif ASV_USE_ENGINE == "Python":
+ pass
+
+ elif ASV_USE_IMPL == "pandas":
+ pass
+ else:
+ raise ValueError(f"wrong value of {ASV_USE_IMPL}")
def get_shape_id(array):
| {"golden_diff": "diff --git a/asv_bench/benchmarks/scalability/scalability_benchmarks.py b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n--- a/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n+++ b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n@@ -60,4 +60,5 @@\n self.data = generate_dataframe(\"modin\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n \n def time_to_pandas(self, shape, cpus):\n- execute(to_pandas(self.data))\n+ # to_pandas is already synchronous\n+ to_pandas(self.data)\ndiff --git a/asv_bench/benchmarks/utils.py b/asv_bench/benchmarks/utils.py\n--- a/asv_bench/benchmarks/utils.py\n+++ b/asv_bench/benchmarks/utils.py\n@@ -31,15 +31,17 @@\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\n \n try:\n- from modin.config import TestDatasetSize, AsvImplementation\n+ from modin.config import TestDatasetSize, AsvImplementation, Engine\n \n ASV_USE_IMPL = AsvImplementation.get()\n ASV_DATASET_SIZE = TestDatasetSize.get() or \"Small\"\n+ ASV_USE_ENGINE = Engine.get()\n except ImportError:\n # The same benchmarking code can be run for different versions of Modin, so in\n # case of an error importing important variables, we'll just use predefined values\n ASV_USE_IMPL = os.environ.get(\"MODIN_ASV_USE_IMPL\", \"modin\")\n ASV_DATASET_SIZE = os.environ.get(\"MODIN_TEST_DATASET_SIZE\", \"Small\")\n+ ASV_USE_ENGINE = os.environ.get(\"MODIN_ENGINE\", \"Ray\")\n \n assert ASV_USE_IMPL in (\"modin\", \"pandas\")\n \n@@ -217,7 +219,24 @@\n \n def execute(df):\n \"Make sure the calculations are done.\"\n- return df.shape, df.dtypes\n+ if ASV_USE_IMPL == \"modin\":\n+ partitions = df._query_compiler._modin_frame._partitions\n+ map(lambda partition: partition.drain_call_queue(), partitions)\n+ if ASV_USE_ENGINE == \"Ray\":\n+ from ray import wait\n+\n+ map(lambda partition: wait(partition.oid), partitions)\n+ elif ASV_USE_ENGINE == \"Dask\":\n+ from dask.distributed import wait\n+\n+ map(lambda partition: wait(partition.future), partitions)\n+ elif ASV_USE_ENGINE == \"Python\":\n+ pass\n+\n+ elif ASV_USE_IMPL == \"pandas\":\n+ pass\n+ else:\n+ raise ValueError(f\"wrong value of {ASV_USE_IMPL}\")\n \n \n def get_shape_id(array):\n", "issue": "[ASV] use `wait` function to get the right performance times\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport modin.pandas as pd\nfrom modin.pandas.utils import from_pandas\nfrom modin.utils import to_pandas\nimport pandas\n\nfrom ..utils import (\n gen_data,\n generate_dataframe,\n RAND_LOW,\n RAND_HIGH,\n ASV_DATASET_SIZE,\n UNARY_OP_DATA_SIZE,\n execute,\n)\n\n\nclass TimeFromPandas:\n param_names = [\"shape\", \"cpus\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [4, 16, 32],\n ]\n\n def setup(self, shape, cpus):\n self.data = pandas.DataFrame(gen_data(\"int\", *shape, RAND_LOW, RAND_HIGH))\n from modin.config import NPartitions\n\n NPartitions.get = lambda: cpus\n # trigger ray init\n pd.DataFrame([])\n\n def time_from_pandas(self, shape, cpus):\n execute(from_pandas(self.data))\n\n\nclass TimeToPandas:\n param_names = [\"shape\", \"cpus\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [4, 16, 32],\n ]\n\n def setup(self, shape, cpus):\n from modin.config import NPartitions\n\n NPartitions.get = lambda: cpus\n self.data = generate_dataframe(\"modin\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n\n def time_to_pandas(self, shape, cpus):\n execute(to_pandas(self.data))\n", "path": "asv_bench/benchmarks/scalability/scalability_benchmarks.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport logging\nimport modin.pandas as pd\nimport pandas\nimport numpy as np\nimport uuid\n\nRAND_LOW = 0\nRAND_HIGH = 100\nrandom_state = np.random.RandomState(seed=42)\n\n\ntry:\n from modin.config import NPartitions\n\n NPARTITIONS = NPartitions.get()\nexcept ImportError:\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\n\ntry:\n from modin.config import TestDatasetSize, AsvImplementation\n\n ASV_USE_IMPL = AsvImplementation.get()\n ASV_DATASET_SIZE = TestDatasetSize.get() or \"Small\"\nexcept ImportError:\n # The same benchmarking code can be run for different versions of Modin, so in\n # case of an error importing important variables, we'll just use predefined values\n ASV_USE_IMPL = os.environ.get(\"MODIN_ASV_USE_IMPL\", \"modin\")\n ASV_DATASET_SIZE = os.environ.get(\"MODIN_TEST_DATASET_SIZE\", \"Small\")\n\nassert ASV_USE_IMPL in (\"modin\", \"pandas\")\n\nBINARY_OP_DATA_SIZE = {\n \"Big\": [\n ((5000, 5000), (5000, 5000)),\n # the case extremely inefficient\n # ((20, 500_000), (10, 1_000_000)),\n ((500_000, 20), (1_000_000, 10)),\n ],\n \"Small\": [\n ((250, 250), (250, 250)),\n ((20, 10_000), (10, 25_000)),\n ((10_000, 20), (25_000, 10)),\n ],\n}\n\nUNARY_OP_DATA_SIZE = {\n \"Big\": [\n (5000, 5000),\n # the case extremely inefficient\n # (10, 1_000_000),\n (1_000_000, 10),\n ],\n \"Small\": [\n (250, 250),\n (10, 10_000),\n (10_000, 10),\n ],\n}\n\nGROUPBY_NGROUPS = {\n \"Big\": [100, \"huge_amount_groups\"],\n \"Small\": [5],\n}\n\nIMPL = {\n \"modin\": pd,\n \"pandas\": pandas,\n}\n\n\ndef translator_groupby_ngroups(groupby_ngroups, shape):\n if ASV_DATASET_SIZE == \"Big\":\n if groupby_ngroups == \"huge_amount_groups\":\n return min(shape[0] // 2, 5000)\n return groupby_ngroups\n else:\n return groupby_ngroups\n\n\nclass weakdict(dict):\n __slots__ = (\"__weakref__\",)\n\n\ndata_cache = dict()\ndataframes_cache = dict()\n\n\ndef gen_int_data(nrows, ncols, rand_low, rand_high):\n cache_key = (\"int\", nrows, ncols, rand_low, rand_high)\n if cache_key in data_cache:\n return data_cache[cache_key]\n\n logging.info(\n \"Generating int data {} rows and {} columns [{}-{}]\".format(\n nrows, ncols, rand_low, rand_high\n )\n )\n data = {\n \"col{}\".format(i): random_state.randint(rand_low, rand_high, size=(nrows))\n for i in range(ncols)\n }\n data_cache[cache_key] = weakdict(data)\n return data\n\n\ndef gen_str_int_data(nrows, ncols, rand_low, rand_high):\n cache_key = (\"str_int\", nrows, ncols, rand_low, rand_high)\n if cache_key in data_cache:\n return data_cache[cache_key]\n\n logging.info(\n \"Generating str_int data {} rows and {} columns [{}-{}]\".format(\n nrows, ncols, rand_low, rand_high\n )\n )\n data = gen_int_data(nrows, ncols, rand_low, rand_high).copy()\n data[\"gb_col\"] = [\n \"str_{}\".format(random_state.randint(rand_low, rand_high)) for i in range(nrows)\n ]\n data_cache[cache_key] = weakdict(data)\n return data\n\n\ndef gen_data(data_type, nrows, ncols, rand_low, rand_high):\n if data_type == \"int\":\n return gen_int_data(nrows, ncols, rand_low, rand_high)\n elif data_type == \"str_int\":\n return gen_str_int_data(nrows, ncols, rand_low, rand_high)\n else:\n assert False\n\n\ndef generate_dataframe(\n impl,\n data_type,\n nrows,\n ncols,\n rand_low,\n rand_high,\n groupby_ncols=None,\n count_groups=None,\n):\n assert not (\n (groupby_ncols is None) ^ (count_groups is None)\n ), \"You must either specify both parameters 'groupby_ncols' and 'count_groups' or none of them.\"\n\n if groupby_ncols and count_groups:\n ncols -= groupby_ncols\n cache_key = (\n impl,\n data_type,\n nrows,\n ncols,\n rand_low,\n rand_high,\n groupby_ncols,\n count_groups,\n )\n else:\n cache_key = (impl, data_type, nrows, ncols, rand_low, rand_high)\n\n if cache_key in dataframes_cache:\n return dataframes_cache[cache_key]\n\n logging.info(\n \"Allocating {} DataFrame {}: {} rows and {} columns [{}-{}]\".format(\n impl, data_type, nrows, ncols, rand_low, rand_high\n )\n )\n data = gen_data(data_type, nrows, ncols, rand_low, rand_high)\n\n if groupby_ncols and count_groups:\n groupby_columns = [f\"groupby_col{x}\" for x in range(groupby_ncols)]\n for groupby_col in groupby_columns:\n data[groupby_col] = np.tile(np.arange(count_groups), nrows // count_groups)\n\n if impl == \"modin\":\n df = pd.DataFrame(data)\n elif impl == \"pandas\":\n df = pandas.DataFrame(data)\n else:\n assert False\n\n if groupby_ncols and count_groups:\n dataframes_cache[cache_key] = df, groupby_columns\n return df, groupby_columns\n\n dataframes_cache[cache_key] = df\n return df\n\n\ndef random_string():\n return str(uuid.uuid1())\n\n\ndef random_columns(df_columns, columns_number):\n return list(random_state.choice(df_columns, size=columns_number))\n\n\ndef random_booleans(number):\n return list(random_state.choice([True, False], size=number))\n\n\ndef execute(df):\n \"Make sure the calculations are done.\"\n return df.shape, df.dtypes\n\n\ndef get_shape_id(array):\n return \"_\".join([str(element) for element in array])\n", "path": "asv_bench/benchmarks/utils.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport modin.pandas as pd\nfrom modin.pandas.utils import from_pandas\nfrom modin.utils import to_pandas\nimport pandas\n\nfrom ..utils import (\n gen_data,\n generate_dataframe,\n RAND_LOW,\n RAND_HIGH,\n ASV_DATASET_SIZE,\n UNARY_OP_DATA_SIZE,\n execute,\n)\n\n\nclass TimeFromPandas:\n param_names = [\"shape\", \"cpus\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [4, 16, 32],\n ]\n\n def setup(self, shape, cpus):\n self.data = pandas.DataFrame(gen_data(\"int\", *shape, RAND_LOW, RAND_HIGH))\n from modin.config import NPartitions\n\n NPartitions.get = lambda: cpus\n # trigger ray init\n pd.DataFrame([])\n\n def time_from_pandas(self, shape, cpus):\n execute(from_pandas(self.data))\n\n\nclass TimeToPandas:\n param_names = [\"shape\", \"cpus\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [4, 16, 32],\n ]\n\n def setup(self, shape, cpus):\n from modin.config import NPartitions\n\n NPartitions.get = lambda: cpus\n self.data = generate_dataframe(\"modin\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n\n def time_to_pandas(self, shape, cpus):\n # to_pandas is already synchronous\n to_pandas(self.data)\n", "path": "asv_bench/benchmarks/scalability/scalability_benchmarks.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport logging\nimport modin.pandas as pd\nimport pandas\nimport numpy as np\nimport uuid\n\nRAND_LOW = 0\nRAND_HIGH = 100\nrandom_state = np.random.RandomState(seed=42)\n\n\ntry:\n from modin.config import NPartitions\n\n NPARTITIONS = NPartitions.get()\nexcept ImportError:\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\n\ntry:\n from modin.config import TestDatasetSize, AsvImplementation, Engine\n\n ASV_USE_IMPL = AsvImplementation.get()\n ASV_DATASET_SIZE = TestDatasetSize.get() or \"Small\"\n ASV_USE_ENGINE = Engine.get()\nexcept ImportError:\n # The same benchmarking code can be run for different versions of Modin, so in\n # case of an error importing important variables, we'll just use predefined values\n ASV_USE_IMPL = os.environ.get(\"MODIN_ASV_USE_IMPL\", \"modin\")\n ASV_DATASET_SIZE = os.environ.get(\"MODIN_TEST_DATASET_SIZE\", \"Small\")\n ASV_USE_ENGINE = os.environ.get(\"MODIN_ENGINE\", \"Ray\")\n\nassert ASV_USE_IMPL in (\"modin\", \"pandas\")\n\nBINARY_OP_DATA_SIZE = {\n \"Big\": [\n ((5000, 5000), (5000, 5000)),\n # the case extremely inefficient\n # ((20, 500_000), (10, 1_000_000)),\n ((500_000, 20), (1_000_000, 10)),\n ],\n \"Small\": [\n ((250, 250), (250, 250)),\n ((20, 10_000), (10, 25_000)),\n ((10_000, 20), (25_000, 10)),\n ],\n}\n\nUNARY_OP_DATA_SIZE = {\n \"Big\": [\n (5000, 5000),\n # the case extremely inefficient\n # (10, 1_000_000),\n (1_000_000, 10),\n ],\n \"Small\": [\n (250, 250),\n (10, 10_000),\n (10_000, 10),\n ],\n}\n\nGROUPBY_NGROUPS = {\n \"Big\": [100, \"huge_amount_groups\"],\n \"Small\": [5],\n}\n\nIMPL = {\n \"modin\": pd,\n \"pandas\": pandas,\n}\n\n\ndef translator_groupby_ngroups(groupby_ngroups, shape):\n if ASV_DATASET_SIZE == \"Big\":\n if groupby_ngroups == \"huge_amount_groups\":\n return min(shape[0] // 2, 5000)\n return groupby_ngroups\n else:\n return groupby_ngroups\n\n\nclass weakdict(dict):\n __slots__ = (\"__weakref__\",)\n\n\ndata_cache = dict()\ndataframes_cache = dict()\n\n\ndef gen_int_data(nrows, ncols, rand_low, rand_high):\n cache_key = (\"int\", nrows, ncols, rand_low, rand_high)\n if cache_key in data_cache:\n return data_cache[cache_key]\n\n logging.info(\n \"Generating int data {} rows and {} columns [{}-{}]\".format(\n nrows, ncols, rand_low, rand_high\n )\n )\n data = {\n \"col{}\".format(i): random_state.randint(rand_low, rand_high, size=(nrows))\n for i in range(ncols)\n }\n data_cache[cache_key] = weakdict(data)\n return data\n\n\ndef gen_str_int_data(nrows, ncols, rand_low, rand_high):\n cache_key = (\"str_int\", nrows, ncols, rand_low, rand_high)\n if cache_key in data_cache:\n return data_cache[cache_key]\n\n logging.info(\n \"Generating str_int data {} rows and {} columns [{}-{}]\".format(\n nrows, ncols, rand_low, rand_high\n )\n )\n data = gen_int_data(nrows, ncols, rand_low, rand_high).copy()\n data[\"gb_col\"] = [\n \"str_{}\".format(random_state.randint(rand_low, rand_high)) for i in range(nrows)\n ]\n data_cache[cache_key] = weakdict(data)\n return data\n\n\ndef gen_data(data_type, nrows, ncols, rand_low, rand_high):\n if data_type == \"int\":\n return gen_int_data(nrows, ncols, rand_low, rand_high)\n elif data_type == \"str_int\":\n return gen_str_int_data(nrows, ncols, rand_low, rand_high)\n else:\n assert False\n\n\ndef generate_dataframe(\n impl,\n data_type,\n nrows,\n ncols,\n rand_low,\n rand_high,\n groupby_ncols=None,\n count_groups=None,\n):\n assert not (\n (groupby_ncols is None) ^ (count_groups is None)\n ), \"You must either specify both parameters 'groupby_ncols' and 'count_groups' or none of them.\"\n\n if groupby_ncols and count_groups:\n ncols -= groupby_ncols\n cache_key = (\n impl,\n data_type,\n nrows,\n ncols,\n rand_low,\n rand_high,\n groupby_ncols,\n count_groups,\n )\n else:\n cache_key = (impl, data_type, nrows, ncols, rand_low, rand_high)\n\n if cache_key in dataframes_cache:\n return dataframes_cache[cache_key]\n\n logging.info(\n \"Allocating {} DataFrame {}: {} rows and {} columns [{}-{}]\".format(\n impl, data_type, nrows, ncols, rand_low, rand_high\n )\n )\n data = gen_data(data_type, nrows, ncols, rand_low, rand_high)\n\n if groupby_ncols and count_groups:\n groupby_columns = [f\"groupby_col{x}\" for x in range(groupby_ncols)]\n for groupby_col in groupby_columns:\n data[groupby_col] = np.tile(np.arange(count_groups), nrows // count_groups)\n\n if impl == \"modin\":\n df = pd.DataFrame(data)\n elif impl == \"pandas\":\n df = pandas.DataFrame(data)\n else:\n assert False\n\n if groupby_ncols and count_groups:\n dataframes_cache[cache_key] = df, groupby_columns\n return df, groupby_columns\n\n dataframes_cache[cache_key] = df\n return df\n\n\ndef random_string():\n return str(uuid.uuid1())\n\n\ndef random_columns(df_columns, columns_number):\n return list(random_state.choice(df_columns, size=columns_number))\n\n\ndef random_booleans(number):\n return list(random_state.choice([True, False], size=number))\n\n\ndef execute(df):\n \"Make sure the calculations are done.\"\n if ASV_USE_IMPL == \"modin\":\n partitions = df._query_compiler._modin_frame._partitions\n map(lambda partition: partition.drain_call_queue(), partitions)\n if ASV_USE_ENGINE == \"Ray\":\n from ray import wait\n\n map(lambda partition: wait(partition.oid), partitions)\n elif ASV_USE_ENGINE == \"Dask\":\n from dask.distributed import wait\n\n map(lambda partition: wait(partition.future), partitions)\n elif ASV_USE_ENGINE == \"Python\":\n pass\n\n elif ASV_USE_IMPL == \"pandas\":\n pass\n else:\n raise ValueError(f\"wrong value of {ASV_USE_IMPL}\")\n\n\ndef get_shape_id(array):\n return \"_\".join([str(element) for element in array])\n", "path": "asv_bench/benchmarks/utils.py"}]} | 3,213 | 623 |
gh_patches_debug_13862 | rasdani/github-patches | git_diff | pulp__pulpcore-2665 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve proxy connection failure error message
Raise a more informative error message when the proxy rejects requests from Pulp
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/download/http.py`
Content:
```
1 import logging
2
3 import aiohttp
4 import asyncio
5 import backoff
6
7 from .base import BaseDownloader, DownloadResult
8 from pulpcore.exceptions import (
9 DigestValidationError,
10 SizeValidationError,
11 TimeoutException,
12 )
13
14
15 log = logging.getLogger(__name__)
16
17
18 logging.getLogger("backoff").addHandler(logging.StreamHandler())
19
20
21 def http_giveup_handler(exc):
22 """
23 Inspect a raised exception and determine if we should give up.
24
25 Do not give up when the error is one of the following:
26
27 HTTP 429 - Too Many Requests
28 HTTP 5xx - Server errors
29 Socket timeout
30 TCP disconnect
31 Client SSL Error
32
33 Based on the AWS and Google Cloud guidelines:
34 https://docs.aws.amazon.com/general/latest/gr/api-retries.html
35 https://cloud.google.com/storage/docs/retry-strategy
36
37 Args:
38 exc (Exception): The exception to inspect
39
40 Returns:
41 True if the download should give up, False otherwise
42 """
43 if isinstance(exc, aiohttp.ClientResponseError):
44 server_error = 500 <= exc.code < 600
45 too_many_requests = exc.code == 429
46 return not server_error and not too_many_requests
47
48 # any other type of error (pre-filtered by the backoff decorator) shouldn't be fatal
49 return False
50
51
52 class HttpDownloader(BaseDownloader):
53 """
54 An HTTP/HTTPS Downloader built on `aiohttp`.
55
56 This downloader downloads data from one `url` and is not reused.
57
58 The downloader optionally takes a session argument, which is an `aiohttp.ClientSession`. This
59 allows many downloaders to share one `aiohttp.ClientSession` which provides a connection pool,
60 connection reuse, and keep-alives across multiple downloaders. When creating many downloaders,
61 have one session shared by all of your `HttpDownloader` objects.
62
63 A session is optional; if omitted, one session will be created, used for this downloader, and
64 then closed when the download is complete. A session that is passed in will not be closed when
65 the download is complete.
66
67 If a session is not provided, the one created by HttpDownloader uses non-default timing values.
68 Specifically, the "total" timeout is set to None and the "sock_connect" and "sock_read" are both
69 5 minutes. For more info on these settings, see the aiohttp docs:
70 http://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts Behaviorally, it should
71 allow for an active download to be arbitrarily long, while still detecting dead or closed
72 sessions even when TCPKeepAlive is disabled.
73
74 If a session is not provided, the one created will force TCP connection closure after each
75 request. This is done for compatibility reasons due to various issues related to session
76 continuation implementation in various servers.
77
78 `aiohttp.ClientSession` objects allows you to configure options that will apply to all
79 downloaders using that session such as auth, timeouts, headers, etc. For more info on these
80 options see the `aiohttp.ClientSession` docs for more information:
81 http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession
82
83 The `aiohttp.ClientSession` can additionally be configured for SSL configuration by passing in a
84 `aiohttp.TCPConnector`. For information on configuring either server or client certificate based
85 identity verification, see the aiohttp documentation:
86 http://aiohttp.readthedocs.io/en/stable/client.html#ssl-control-for-tcp-sockets
87
88 For more information on `aiohttp.BasicAuth` objects, see their docs:
89 http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.BasicAuth
90
91 Synchronous Download::
92
93 downloader = HttpDownloader('http://example.com/')
94 result = downloader.fetch()
95
96 Parallel Download::
97
98 download_coroutines = [
99 HttpDownloader('http://example.com/').run(),
100 HttpDownloader('http://pulpproject.org/').run(),
101 ]
102
103 loop = asyncio.get_event_loop()
104 done, not_done = loop.run_until_complete(asyncio.wait(download_coroutines))
105
106 for task in done:
107 try:
108 task.result() # This is a DownloadResult
109 except Exception as error:
110 pass # fatal exceptions are raised by result()
111
112 The HTTPDownloaders contain automatic retry logic if the server responds with HTTP 429 response.
113 The coroutine will automatically retry 10 times with exponential backoff before allowing a
114 final exception to be raised.
115
116 Attributes:
117 session (aiohttp.ClientSession): The session to be used by the downloader.
118 auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization or None
119 proxy (str): An optional proxy URL or None
120 proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic
121 Authorization or None
122 headers_ready_callback (callable): An optional callback that accepts a single dictionary
123 as its argument. The callback will be called when the response headers are
124 available. The dictionary passed has the header names as the keys and header values
125 as its values. e.g. `{'Transfer-Encoding': 'chunked'}`. This can also be None.
126
127 This downloader also has all of the attributes of
128 :class:`~pulpcore.plugin.download.BaseDownloader`
129 """
130
131 def __init__(
132 self,
133 url,
134 session=None,
135 auth=None,
136 proxy=None,
137 proxy_auth=None,
138 headers_ready_callback=None,
139 headers=None,
140 throttler=None,
141 max_retries=0,
142 **kwargs,
143 ):
144 """
145 Args:
146 url (str): The url to download.
147 session (aiohttp.ClientSession): The session to be used by the downloader. (optional) If
148 not specified it will open the session and close it
149 auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization (optional)
150 proxy (str): An optional proxy URL.
151 proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic
152 Authorization.
153 headers_ready_callback (callable): An optional callback that accepts a single dictionary
154 as its argument. The callback will be called when the response headers are
155 available. The dictionary passed has the header names as the keys and header values
156 as its values. e.g. `{'Transfer-Encoding': 'chunked'}`
157 headers (dict): Headers to be submitted with the request.
158 throttler (asyncio_throttle.Throttler): Throttler for asyncio.
159 max_retries (int): The maximum number of times to retry a download upon failure.
160 kwargs (dict): This accepts the parameters of
161 :class:`~pulpcore.plugin.download.BaseDownloader`.
162 """
163 if session:
164 self.session = session
165 self._close_session_on_finalize = False
166 else:
167 timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
168 conn = aiohttp.TCPConnector({"force_close": True})
169 self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
170 self._close_session_on_finalize = True
171 self.auth = auth
172 self.proxy = proxy
173 self.proxy_auth = proxy_auth
174 self.headers_ready_callback = headers_ready_callback
175 self.download_throttler = throttler
176 self.max_retries = max_retries
177 super().__init__(url, **kwargs)
178
179 def raise_for_status(self, response):
180 """
181 Raise error if aiohttp response status is >= 400 and not silenced.
182
183 Args:
184 response (aiohttp.ClientResponse): The response to handle.
185
186 Raises:
187 aiohttp.ClientResponseError: When the response status is >= 400.
188 """
189 response.raise_for_status()
190
191 async def _handle_response(self, response):
192 """
193 Handle the aiohttp response by writing it to disk and calculating digests
194
195 Args:
196 response (aiohttp.ClientResponse): The response to handle.
197
198 Returns:
199 DownloadResult: Contains information about the result. See the DownloadResult docs for
200 more information.
201 """
202 if self.headers_ready_callback:
203 await self.headers_ready_callback(response.headers)
204 while True:
205 chunk = await response.content.read(1048576) # 1 megabyte
206 if not chunk:
207 await self.finalize()
208 break # the download is done
209 await self.handle_data(chunk)
210 return DownloadResult(
211 path=self.path,
212 artifact_attributes=self.artifact_attributes,
213 url=self.url,
214 headers=response.headers,
215 )
216
217 async def run(self, extra_data=None):
218 """
219 Run the downloader with concurrency restriction and retry logic.
220
221 This method acquires `self.semaphore` before calling the actual download implementation
222 contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`
223 wrapper around `_run()`, handles backoff-and-retry logic.
224
225 Args:
226 extra_data (dict): Extra data passed to the downloader.
227
228 Returns:
229 :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.
230
231 """
232 retryable_errors = (
233 aiohttp.ClientConnectorSSLError,
234 aiohttp.ClientConnectorError,
235 aiohttp.ClientOSError,
236 aiohttp.ClientPayloadError,
237 aiohttp.ClientResponseError,
238 aiohttp.ServerDisconnectedError,
239 TimeoutError,
240 TimeoutException,
241 DigestValidationError,
242 SizeValidationError,
243 )
244
245 async with self.semaphore:
246
247 @backoff.on_exception(
248 backoff.expo,
249 retryable_errors,
250 max_tries=self.max_retries + 1,
251 giveup=http_giveup_handler,
252 )
253 async def download_wrapper():
254 self._ensure_no_broken_file()
255 try:
256 return await self._run(extra_data=extra_data)
257 except asyncio.TimeoutError:
258 raise TimeoutException(self.url)
259
260 return await download_wrapper()
261
262 async def _run(self, extra_data=None):
263 """
264 Download, validate, and compute digests on the `url`. This is a coroutine.
265
266 This method is externally wrapped with backoff-and-retry behavior for some errors.
267 It retries with exponential backoff some number of times before allowing a final
268 exception to be raised.
269
270 This method provides the same return object type and documented in
271 :meth:`~pulpcore.plugin.download.BaseDownloader._run`.
272
273 Args:
274 extra_data (dict): Extra data passed by the downloader.
275 """
276 if self.download_throttler:
277 await self.download_throttler.acquire()
278 async with self.session.get(
279 self.url, proxy=self.proxy, proxy_auth=self.proxy_auth, auth=self.auth
280 ) as response:
281 self.raise_for_status(response)
282 to_return = await self._handle_response(response)
283 await response.release()
284 if self._close_session_on_finalize:
285 await self.session.close()
286 return to_return
287
288 def _ensure_no_broken_file(self):
289 """Upon retry reset writer back to None to get a fresh file."""
290 if self._writer is not None:
291 self._writer.delete = True
292 self._writer.close()
293 self._writer = None
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -256,6 +256,17 @@
return await self._run(extra_data=extra_data)
except asyncio.TimeoutError:
raise TimeoutException(self.url)
+ except aiohttp.ClientHttpProxyError as e:
+ log.error(
+ "Proxy {!r} rejected connection request during a request to "
+ "{!r}, status={}, message={!r}".format(
+ e.request_info.real_url,
+ e.request_info.url,
+ e.status,
+ e.message,
+ )
+ )
+ raise e
return await download_wrapper()
| {"golden_diff": "diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py\n--- a/pulpcore/download/http.py\n+++ b/pulpcore/download/http.py\n@@ -256,6 +256,17 @@\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n+ except aiohttp.ClientHttpProxyError as e:\n+ log.error(\n+ \"Proxy {!r} rejected connection request during a request to \"\n+ \"{!r}, status={}, message={!r}\".format(\n+ e.request_info.real_url,\n+ e.request_info.url,\n+ e.status,\n+ e.message,\n+ )\n+ )\n+ raise e\n \n return await download_wrapper()\n", "issue": "Improve proxy connection failure error message\nRaise a more informative error message when the proxy rejects requests from Pulp\r\n\r\nBZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485\n", "before_files": [{"content": "import logging\n\nimport aiohttp\nimport asyncio\nimport backoff\n\nfrom .base import BaseDownloader, DownloadResult\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nlogging.getLogger(\"backoff\").addHandler(logging.StreamHandler())\n\n\ndef http_giveup_handler(exc):\n \"\"\"\n Inspect a raised exception and determine if we should give up.\n\n Do not give up when the error is one of the following:\n\n HTTP 429 - Too Many Requests\n HTTP 5xx - Server errors\n Socket timeout\n TCP disconnect\n Client SSL Error\n\n Based on the AWS and Google Cloud guidelines:\n https://docs.aws.amazon.com/general/latest/gr/api-retries.html\n https://cloud.google.com/storage/docs/retry-strategy\n\n Args:\n exc (Exception): The exception to inspect\n\n Returns:\n True if the download should give up, False otherwise\n \"\"\"\n if isinstance(exc, aiohttp.ClientResponseError):\n server_error = 500 <= exc.code < 600\n too_many_requests = exc.code == 429\n return not server_error and not too_many_requests\n\n # any other type of error (pre-filtered by the backoff decorator) shouldn't be fatal\n return False\n\n\nclass HttpDownloader(BaseDownloader):\n \"\"\"\n An HTTP/HTTPS Downloader built on `aiohttp`.\n\n This downloader downloads data from one `url` and is not reused.\n\n The downloader optionally takes a session argument, which is an `aiohttp.ClientSession`. This\n allows many downloaders to share one `aiohttp.ClientSession` which provides a connection pool,\n connection reuse, and keep-alives across multiple downloaders. When creating many downloaders,\n have one session shared by all of your `HttpDownloader` objects.\n\n A session is optional; if omitted, one session will be created, used for this downloader, and\n then closed when the download is complete. A session that is passed in will not be closed when\n the download is complete.\n\n If a session is not provided, the one created by HttpDownloader uses non-default timing values.\n Specifically, the \"total\" timeout is set to None and the \"sock_connect\" and \"sock_read\" are both\n 5 minutes. For more info on these settings, see the aiohttp docs:\n http://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts Behaviorally, it should\n allow for an active download to be arbitrarily long, while still detecting dead or closed\n sessions even when TCPKeepAlive is disabled.\n\n If a session is not provided, the one created will force TCP connection closure after each\n request. This is done for compatibility reasons due to various issues related to session\n continuation implementation in various servers.\n\n `aiohttp.ClientSession` objects allows you to configure options that will apply to all\n downloaders using that session such as auth, timeouts, headers, etc. For more info on these\n options see the `aiohttp.ClientSession` docs for more information:\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession\n\n The `aiohttp.ClientSession` can additionally be configured for SSL configuration by passing in a\n `aiohttp.TCPConnector`. For information on configuring either server or client certificate based\n identity verification, see the aiohttp documentation:\n http://aiohttp.readthedocs.io/en/stable/client.html#ssl-control-for-tcp-sockets\n\n For more information on `aiohttp.BasicAuth` objects, see their docs:\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.BasicAuth\n\n Synchronous Download::\n\n downloader = HttpDownloader('http://example.com/')\n result = downloader.fetch()\n\n Parallel Download::\n\n download_coroutines = [\n HttpDownloader('http://example.com/').run(),\n HttpDownloader('http://pulpproject.org/').run(),\n ]\n\n loop = asyncio.get_event_loop()\n done, not_done = loop.run_until_complete(asyncio.wait(download_coroutines))\n\n for task in done:\n try:\n task.result() # This is a DownloadResult\n except Exception as error:\n pass # fatal exceptions are raised by result()\n\n The HTTPDownloaders contain automatic retry logic if the server responds with HTTP 429 response.\n The coroutine will automatically retry 10 times with exponential backoff before allowing a\n final exception to be raised.\n\n Attributes:\n session (aiohttp.ClientSession): The session to be used by the downloader.\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization or None\n proxy (str): An optional proxy URL or None\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n Authorization or None\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\n as its argument. The callback will be called when the response headers are\n available. The dictionary passed has the header names as the keys and header values\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`. This can also be None.\n\n This downloader also has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(\n self,\n url,\n session=None,\n auth=None,\n proxy=None,\n proxy_auth=None,\n headers_ready_callback=None,\n headers=None,\n throttler=None,\n max_retries=0,\n **kwargs,\n ):\n \"\"\"\n Args:\n url (str): The url to download.\n session (aiohttp.ClientSession): The session to be used by the downloader. (optional) If\n not specified it will open the session and close it\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization (optional)\n proxy (str): An optional proxy URL.\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n Authorization.\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\n as its argument. The callback will be called when the response headers are\n available. The dictionary passed has the header names as the keys and header values\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`\n headers (dict): Headers to be submitted with the request.\n throttler (asyncio_throttle.Throttler): Throttler for asyncio.\n max_retries (int): The maximum number of times to retry a download upon failure.\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n \"\"\"\n if session:\n self.session = session\n self._close_session_on_finalize = False\n else:\n timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)\n conn = aiohttp.TCPConnector({\"force_close\": True})\n self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)\n self._close_session_on_finalize = True\n self.auth = auth\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n self.headers_ready_callback = headers_ready_callback\n self.download_throttler = throttler\n self.max_retries = max_retries\n super().__init__(url, **kwargs)\n\n def raise_for_status(self, response):\n \"\"\"\n Raise error if aiohttp response status is >= 400 and not silenced.\n\n Args:\n response (aiohttp.ClientResponse): The response to handle.\n\n Raises:\n aiohttp.ClientResponseError: When the response status is >= 400.\n \"\"\"\n response.raise_for_status()\n\n async def _handle_response(self, response):\n \"\"\"\n Handle the aiohttp response by writing it to disk and calculating digests\n\n Args:\n response (aiohttp.ClientResponse): The response to handle.\n\n Returns:\n DownloadResult: Contains information about the result. See the DownloadResult docs for\n more information.\n \"\"\"\n if self.headers_ready_callback:\n await self.headers_ready_callback(response.headers)\n while True:\n chunk = await response.content.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the download is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=response.headers,\n )\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction and retry logic.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n wrapper around `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n retryable_errors = (\n aiohttp.ClientConnectorSSLError,\n aiohttp.ClientConnectorError,\n aiohttp.ClientOSError,\n aiohttp.ClientPayloadError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n TimeoutError,\n TimeoutException,\n DigestValidationError,\n SizeValidationError,\n )\n\n async with self.semaphore:\n\n @backoff.on_exception(\n backoff.expo,\n retryable_errors,\n max_tries=self.max_retries + 1,\n giveup=http_giveup_handler,\n )\n async def download_wrapper():\n self._ensure_no_broken_file()\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n return await download_wrapper()\n\n async def _run(self, extra_data=None):\n \"\"\"\n Download, validate, and compute digests on the `url`. This is a coroutine.\n\n This method is externally wrapped with backoff-and-retry behavior for some errors.\n It retries with exponential backoff some number of times before allowing a final\n exception to be raised.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed by the downloader.\n \"\"\"\n if self.download_throttler:\n await self.download_throttler.acquire()\n async with self.session.get(\n self.url, proxy=self.proxy, proxy_auth=self.proxy_auth, auth=self.auth\n ) as response:\n self.raise_for_status(response)\n to_return = await self._handle_response(response)\n await response.release()\n if self._close_session_on_finalize:\n await self.session.close()\n return to_return\n\n def _ensure_no_broken_file(self):\n \"\"\"Upon retry reset writer back to None to get a fresh file.\"\"\"\n if self._writer is not None:\n self._writer.delete = True\n self._writer.close()\n self._writer = None\n", "path": "pulpcore/download/http.py"}], "after_files": [{"content": "import logging\n\nimport aiohttp\nimport asyncio\nimport backoff\n\nfrom .base import BaseDownloader, DownloadResult\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nlogging.getLogger(\"backoff\").addHandler(logging.StreamHandler())\n\n\ndef http_giveup_handler(exc):\n \"\"\"\n Inspect a raised exception and determine if we should give up.\n\n Do not give up when the error is one of the following:\n\n HTTP 429 - Too Many Requests\n HTTP 5xx - Server errors\n Socket timeout\n TCP disconnect\n Client SSL Error\n\n Based on the AWS and Google Cloud guidelines:\n https://docs.aws.amazon.com/general/latest/gr/api-retries.html\n https://cloud.google.com/storage/docs/retry-strategy\n\n Args:\n exc (Exception): The exception to inspect\n\n Returns:\n True if the download should give up, False otherwise\n \"\"\"\n if isinstance(exc, aiohttp.ClientResponseError):\n server_error = 500 <= exc.code < 600\n too_many_requests = exc.code == 429\n return not server_error and not too_many_requests\n\n # any other type of error (pre-filtered by the backoff decorator) shouldn't be fatal\n return False\n\n\nclass HttpDownloader(BaseDownloader):\n \"\"\"\n An HTTP/HTTPS Downloader built on `aiohttp`.\n\n This downloader downloads data from one `url` and is not reused.\n\n The downloader optionally takes a session argument, which is an `aiohttp.ClientSession`. This\n allows many downloaders to share one `aiohttp.ClientSession` which provides a connection pool,\n connection reuse, and keep-alives across multiple downloaders. When creating many downloaders,\n have one session shared by all of your `HttpDownloader` objects.\n\n A session is optional; if omitted, one session will be created, used for this downloader, and\n then closed when the download is complete. A session that is passed in will not be closed when\n the download is complete.\n\n If a session is not provided, the one created by HttpDownloader uses non-default timing values.\n Specifically, the \"total\" timeout is set to None and the \"sock_connect\" and \"sock_read\" are both\n 5 minutes. For more info on these settings, see the aiohttp docs:\n http://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts Behaviorally, it should\n allow for an active download to be arbitrarily long, while still detecting dead or closed\n sessions even when TCPKeepAlive is disabled.\n\n If a session is not provided, the one created will force TCP connection closure after each\n request. This is done for compatibility reasons due to various issues related to session\n continuation implementation in various servers.\n\n `aiohttp.ClientSession` objects allows you to configure options that will apply to all\n downloaders using that session such as auth, timeouts, headers, etc. For more info on these\n options see the `aiohttp.ClientSession` docs for more information:\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession\n\n The `aiohttp.ClientSession` can additionally be configured for SSL configuration by passing in a\n `aiohttp.TCPConnector`. For information on configuring either server or client certificate based\n identity verification, see the aiohttp documentation:\n http://aiohttp.readthedocs.io/en/stable/client.html#ssl-control-for-tcp-sockets\n\n For more information on `aiohttp.BasicAuth` objects, see their docs:\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.BasicAuth\n\n Synchronous Download::\n\n downloader = HttpDownloader('http://example.com/')\n result = downloader.fetch()\n\n Parallel Download::\n\n download_coroutines = [\n HttpDownloader('http://example.com/').run(),\n HttpDownloader('http://pulpproject.org/').run(),\n ]\n\n loop = asyncio.get_event_loop()\n done, not_done = loop.run_until_complete(asyncio.wait(download_coroutines))\n\n for task in done:\n try:\n task.result() # This is a DownloadResult\n except Exception as error:\n pass # fatal exceptions are raised by result()\n\n The HTTPDownloaders contain automatic retry logic if the server responds with HTTP 429 response.\n The coroutine will automatically retry 10 times with exponential backoff before allowing a\n final exception to be raised.\n\n Attributes:\n session (aiohttp.ClientSession): The session to be used by the downloader.\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization or None\n proxy (str): An optional proxy URL or None\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n Authorization or None\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\n as its argument. The callback will be called when the response headers are\n available. The dictionary passed has the header names as the keys and header values\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`. This can also be None.\n\n This downloader also has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(\n self,\n url,\n session=None,\n auth=None,\n proxy=None,\n proxy_auth=None,\n headers_ready_callback=None,\n headers=None,\n throttler=None,\n max_retries=0,\n **kwargs,\n ):\n \"\"\"\n Args:\n url (str): The url to download.\n session (aiohttp.ClientSession): The session to be used by the downloader. (optional) If\n not specified it will open the session and close it\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization (optional)\n proxy (str): An optional proxy URL.\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n Authorization.\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\n as its argument. The callback will be called when the response headers are\n available. The dictionary passed has the header names as the keys and header values\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`\n headers (dict): Headers to be submitted with the request.\n throttler (asyncio_throttle.Throttler): Throttler for asyncio.\n max_retries (int): The maximum number of times to retry a download upon failure.\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n \"\"\"\n if session:\n self.session = session\n self._close_session_on_finalize = False\n else:\n timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)\n conn = aiohttp.TCPConnector({\"force_close\": True})\n self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)\n self._close_session_on_finalize = True\n self.auth = auth\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n self.headers_ready_callback = headers_ready_callback\n self.download_throttler = throttler\n self.max_retries = max_retries\n super().__init__(url, **kwargs)\n\n def raise_for_status(self, response):\n \"\"\"\n Raise error if aiohttp response status is >= 400 and not silenced.\n\n Args:\n response (aiohttp.ClientResponse): The response to handle.\n\n Raises:\n aiohttp.ClientResponseError: When the response status is >= 400.\n \"\"\"\n response.raise_for_status()\n\n async def _handle_response(self, response):\n \"\"\"\n Handle the aiohttp response by writing it to disk and calculating digests\n\n Args:\n response (aiohttp.ClientResponse): The response to handle.\n\n Returns:\n DownloadResult: Contains information about the result. See the DownloadResult docs for\n more information.\n \"\"\"\n if self.headers_ready_callback:\n await self.headers_ready_callback(response.headers)\n while True:\n chunk = await response.content.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the download is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=response.headers,\n )\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction and retry logic.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n wrapper around `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n retryable_errors = (\n aiohttp.ClientConnectorSSLError,\n aiohttp.ClientConnectorError,\n aiohttp.ClientOSError,\n aiohttp.ClientPayloadError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n TimeoutError,\n TimeoutException,\n DigestValidationError,\n SizeValidationError,\n )\n\n async with self.semaphore:\n\n @backoff.on_exception(\n backoff.expo,\n retryable_errors,\n max_tries=self.max_retries + 1,\n giveup=http_giveup_handler,\n )\n async def download_wrapper():\n self._ensure_no_broken_file()\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n except aiohttp.ClientHttpProxyError as e:\n log.error(\n \"Proxy {!r} rejected connection request during a request to \"\n \"{!r}, status={}, message={!r}\".format(\n e.request_info.real_url,\n e.request_info.url,\n e.status,\n e.message,\n )\n )\n raise e\n\n return await download_wrapper()\n\n async def _run(self, extra_data=None):\n \"\"\"\n Download, validate, and compute digests on the `url`. This is a coroutine.\n\n This method is externally wrapped with backoff-and-retry behavior for some errors.\n It retries with exponential backoff some number of times before allowing a final\n exception to be raised.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed by the downloader.\n \"\"\"\n if self.download_throttler:\n await self.download_throttler.acquire()\n async with self.session.get(\n self.url, proxy=self.proxy, proxy_auth=self.proxy_auth, auth=self.auth\n ) as response:\n self.raise_for_status(response)\n to_return = await self._handle_response(response)\n await response.release()\n if self._close_session_on_finalize:\n await self.session.close()\n return to_return\n\n def _ensure_no_broken_file(self):\n \"\"\"Upon retry reset writer back to None to get a fresh file.\"\"\"\n if self._writer is not None:\n self._writer.delete = True\n self._writer.close()\n self._writer = None\n", "path": "pulpcore/download/http.py"}]} | 3,511 | 164 |
gh_patches_debug_599 | rasdani/github-patches | git_diff | pex-tool__pex-1834 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.95
On the docket:
+ [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821
+ [x] Feature request: "universal" lock mode can reject unsupported platforms #1595
+ [x] Avoid ENOEXEC for --venv shebangs. #1828
+ [x] pex3 lock export does't seem to respect the platform flag. #1826
+ [x] Clarify pex3 lock export command. #1645
+ [x] Support exporting PYTHONPATH before running user code #1825
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.94"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.94"
+__version__ = "2.1.95"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.94\"\n+__version__ = \"2.1.95\"\n", "issue": "Release 2.1.95\nOn the docket:\r\n+ [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821\r\n+ [x] Feature request: \"universal\" lock mode can reject unsupported platforms #1595\r\n+ [x] Avoid ENOEXEC for --venv shebangs. #1828 \r\n+ [x] pex3 lock export does't seem to respect the platform flag. #1826\r\n+ [x] Clarify pex3 lock export command. #1645\r\n+ [x] Support exporting PYTHONPATH before running user code #1825\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.94\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.95\"\n", "path": "pex/version.py"}]} | 454 | 96 |
gh_patches_debug_17669 | rasdani/github-patches | git_diff | gammapy__gammapy-1690 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError from SpectrumEnergyGroupMaker
In this example CTA DC1 analysis the SpectrumEnergyGroupMaker give an IndexError if e.g. `emax=100 TeV` is chosen (see comment in cell 23):
https://github.com/gammasky/cta-analyses/blob/bf571038b389b3eb13ce8ba81b35384ebd4b6750/dc-1-checks/hess_j1702/spectrum.ipynb
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gammapy/spectrum/energy_group.py`
Content:
```
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 """Spectrum energy bin grouping.
3
4 There are three classes:
5
6 * SpectrumEnergyGroup - one group
7 * SpectrumEnergyGroups - one grouping, i.e. collection of groups
8 * SpectrumEnergyGroupMaker - algorithms to compute groupings.
9
10 Algorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker.
11 The difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that
12 just have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses
13 information from SpectrumObservation (e.g. safe energy range or counts data) and
14 implements higher-level algorithms.
15 """
16 from __future__ import absolute_import, division, print_function, unicode_literals
17 from collections import OrderedDict
18 from copy import deepcopy
19 import numpy as np
20 import logging
21 from ..extern.six.moves import UserList
22 from astropy.units import Quantity
23 from astropy.table import Table
24 from astropy.table import vstack as table_vstack
25 from ..utils.table import table_from_row_data, table_row_to_dict
26
27 __all__ = [
28 'SpectrumEnergyGroup',
29 'SpectrumEnergyGroups',
30 'SpectrumEnergyGroupMaker',
31 ]
32
33 log = logging.getLogger(__name__)
34
35
36 class SpectrumEnergyGroup(object):
37 """Spectrum energy group.
38
39 Represents a consecutive range of bin indices (both ends inclusive).
40 """
41 fields = [
42 'energy_group_idx', 'bin_idx_min', 'bin_idx_max',
43 'bin_type', 'energy_min', 'energy_max',
44 ]
45 """List of data members of this class."""
46
47 valid_bin_types = ['normal', 'underflow', 'overflow']
48 """Valid values for ``bin_types`` attribute."""
49
50 def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type,
51 energy_min, energy_max):
52 self.energy_group_idx = energy_group_idx
53 self.bin_idx_min = bin_idx_min
54 self.bin_idx_max = bin_idx_max
55 if bin_type not in self.valid_bin_types:
56 raise ValueError('Invalid bin type: {}'.format(bin_type))
57 self.bin_type = bin_type
58 self.energy_min = Quantity(energy_min)
59 self.energy_max = Quantity(energy_max)
60
61 @classmethod
62 def from_dict(cls, data):
63 data = dict((_, data[_]) for _ in cls.fields)
64 return cls(**data)
65
66 @property
67 def _data(self):
68 return [(_, getattr(self, _)) for _ in self.fields]
69
70 def __repr__(self):
71 txt = ['{}={!r}'.format(k, v) for k, v in self._data]
72 return '{}({})'.format(self.__class__.__name__, ', '.join(txt))
73
74 def __eq__(self, other):
75 return self.to_dict() == other.to_dict()
76
77 def to_dict(self):
78 return OrderedDict(self._data)
79
80 @property
81 def bin_idx_array(self):
82 """Numpy array of bin indices in the group."""
83 return np.arange(self.bin_idx_min, self.bin_idx_max + 1)
84
85 @property
86 def bin_table(self):
87 """Create `~astropy.table.Table` with bins in the group.
88
89 Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type``
90 """
91 table = Table()
92 table['bin_idx'] = self.bin_idx_array
93 table['energy_group_idx'] = self.energy_group_idx
94 table['bin_type'] = self.bin_type
95 table['energy_min'] = self.energy_min
96 table['energy_max'] = self.energy_max
97 return table
98
99
100 class SpectrumEnergyGroups(UserList):
101 """List of `~gammapy.spectrum.SpectrumEnergyGroup` objects.
102
103 A helper class used by the `gammapy.spectrum.SpectrumEnergyGroupsMaker`.
104 """
105
106 def __repr__(self):
107 return '{}(len={})'.format(self.__class__.__name__, len(self))
108
109 def __str__(self):
110 ss = '{}:\n'.format(self.__class__.__name__)
111 lines = self.to_group_table().pformat(max_width=-1, max_lines=-1)
112 ss += '\n'.join(lines)
113 return ss + '\n'
114
115 def copy(self):
116 """Deep copy"""
117 return deepcopy(self)
118
119 @classmethod
120 def from_total_table(cls, table):
121 """Create list of SpectrumEnergyGroup objects from table."""
122 groups = cls()
123
124 for energy_group_idx in np.unique(table['energy_group_idx']):
125 mask = table['energy_group_idx'] == energy_group_idx
126 group_table = table[mask]
127 bin_idx_min = group_table['bin_idx'][0]
128 bin_idx_max = group_table['bin_idx'][-1]
129 if len(set(group_table['bin_type'])) > 1:
130 raise ValueError('Inconsistent bin_type within group.')
131 bin_type = group_table['bin_type'][0]
132 energy_min = group_table['energy_min'].quantity[0]
133 energy_max = group_table['energy_max'].quantity[-1]
134
135 group = SpectrumEnergyGroup(
136 energy_group_idx=energy_group_idx,
137 bin_idx_min=bin_idx_min,
138 bin_idx_max=bin_idx_max,
139 bin_type=bin_type,
140 energy_min=energy_min,
141 energy_max=energy_max,
142 )
143 groups.append(group)
144
145 return groups
146
147 @classmethod
148 def from_group_table(cls, table):
149 """Create from energy groups in `~astropy.table.Table` format."""
150 return cls([
151 SpectrumEnergyGroup.from_dict(table_row_to_dict(row))
152 for row in table
153 ])
154
155 def to_total_table(self):
156 """Table with one energy bin per row (`~astropy.table.Table`).
157
158 Columns:
159
160 * ``energy_group_idx`` - Energy group index (int)
161 * ``bin_idx`` - Energy bin index (int)
162 * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)
163
164 There are no energy columns, because the per-bin energy info
165 was lost during grouping.
166 """
167 tables = [group.bin_table for group in self]
168 return table_vstack(tables)
169
170 def to_group_table(self):
171 """Table with one energy group per row (`~astropy.table.Table`).
172
173 Columns:
174
175 * ``energy_group_idx`` - Energy group index (int)
176 * ``energy_group_n_bins`` - Number of bins in the energy group (int)
177 * ``bin_idx_min`` - First bin index in the energy group (int)
178 * ``bin_idx_max`` - Last bin index in the energy group (int)
179 * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)
180 * ``energy_min`` - Energy group start energy (Quantity)
181 * ``energy_max`` - Energy group end energy (Quantity)
182 """
183 rows = [group.to_dict() for group in self]
184 table = table_from_row_data(rows)
185 return table
186
187 @property
188 def energy_range(self):
189 """Total energy range (`~astropy.units.Quantity` of length 2)."""
190 return Quantity([self[0].energy_min, self[-1].energy_max])
191
192 @property
193 def energy_bounds(self):
194 """Energy group bounds (`~astropy.units.Quantity`)."""
195 energy = [_.energy_min for _ in self]
196 energy.append(self[-1].energy_max)
197 return Quantity(energy)
198
199
200 class SpectrumEnergyGroupMaker(object):
201 """Energy bin groups for spectral analysis.
202
203 This class contains both methods that run algorithms
204 that compute groupings as well as the results as data members
205 and methods to debug and assess the results.
206
207 The input ``obs`` is used read-only, to access the counts energy
208 binning, as well as some other info that is used for energy bin grouping.
209
210 Parameters
211 ----------
212 obs : `~gammapy.spectrum.SpectrumObservation`
213 Spectrum observation
214
215 Attributes
216 ----------
217 obs : `~gammapy.spectrum.SpectrumObservation`
218 Spectrum observation data
219 groups : `~gammapy.spectrum.SpectrumEnergyGroups`
220 List of energy groups
221
222 See also
223 --------
224 SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator
225 """
226
227 def __init__(self, obs):
228 self.obs = obs
229 self.groups = None
230
231 def groups_from_obs(self):
232 """Compute energy groups with one group per energy bin."""
233 ebounds_obs = self.obs.e_reco
234 size = ebounds_obs.nbins
235 table = Table()
236 table['bin_idx'] = np.arange(size)
237 table['energy_group_idx'] = np.arange(size)
238 table['bin_type'] = ['normal'] * size
239 table['energy_min'] = ebounds_obs.lower_bounds
240 table['energy_max'] = ebounds_obs.upper_bounds
241 self.groups = SpectrumEnergyGroups.from_total_table(table)
242
243 def compute_groups_fixed(self, ebounds):
244 """Apply grouping for a given fixed energy binning.
245
246 This groups the observation ``obs.e_reco`` binning and
247 ``ebounds`` using a nearest neighbor match on the bin edges.
248
249 Parameters
250 ----------
251 ebounds : `~astropy.units.Quantity`
252 Energy bounds array
253 """
254 ebounds_src = self.obs.e_reco
255 bin_edges_src = np.arange(len(ebounds_src))
256
257 temp = np.interp(ebounds, ebounds_src, bin_edges_src)
258 bin_edges = np.round(temp, decimals=0).astype(np.int)
259
260 # Check for duplicates
261 duplicates_removed = set(bin_edges)
262 if len(duplicates_removed) != len(bin_edges):
263 warn_str = "Input binning\n{}\n contains bins that are finer than the"
264 warn_str += " target binning\n{}\n or outside the valid range"
265 log.warning(warn_str.format(ebounds, ebounds_src))
266 bin_edges = sorted(duplicates_removed)
267
268 # Create normal bins
269 groups = []
270 for idx in np.arange(len(bin_edges) - 1):
271 group = SpectrumEnergyGroup(
272 energy_group_idx=-1,
273 bin_idx_min=bin_edges[idx],
274 bin_idx_max=bin_edges[idx + 1] - 1,
275 bin_type='normal',
276 energy_min=ebounds_src[bin_edges[idx]],
277 energy_max=ebounds_src[bin_edges[idx + 1]],
278 )
279 groups.append(group)
280
281 # Add underflow bin
282 start_edge = groups[0].bin_idx_min
283 if start_edge != 0:
284 underflow = SpectrumEnergyGroup(
285 energy_group_idx=-1,
286 bin_idx_min=0,
287 bin_idx_max=start_edge - 1,
288 bin_type='underflow',
289 energy_min=ebounds_src[0],
290 energy_max=ebounds_src[start_edge],
291 )
292 groups.insert(0, underflow)
293
294 # Add overflow bin
295 end_edge = groups[-1].bin_idx_max
296 if end_edge != ebounds_src.nbins - 1:
297 overflow = SpectrumEnergyGroup(
298 energy_group_idx=-1,
299 bin_idx_min=end_edge + 1,
300 bin_idx_max=ebounds_src.nbins - 1,
301 bin_type='overflow',
302 energy_min=ebounds_src[end_edge + 1],
303 energy_max=ebounds_src[-1],
304 )
305 groups.append(overflow)
306
307 # Set energy_group_idx
308 for group_idx, group in enumerate(groups):
309 group.energy_group_idx = group_idx
310
311 self.groups = SpectrumEnergyGroups(groups)
312
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gammapy/spectrum/energy_group.py b/gammapy/spectrum/energy_group.py
--- a/gammapy/spectrum/energy_group.py
+++ b/gammapy/spectrum/energy_group.py
@@ -251,7 +251,7 @@
ebounds : `~astropy.units.Quantity`
Energy bounds array
"""
- ebounds_src = self.obs.e_reco
+ ebounds_src = self.obs.e_reco.to(ebounds.unit)
bin_edges_src = np.arange(len(ebounds_src))
temp = np.interp(ebounds, ebounds_src, bin_edges_src)
@@ -278,6 +278,11 @@
)
groups.append(group)
+ if groups == []:
+ err_str = "Input binning\n{}\n has no overlap with"
+ err_str += " target binning\n{}"
+ raise ValueError(err_str.format(ebounds, ebounds_src))
+
# Add underflow bin
start_edge = groups[0].bin_idx_min
if start_edge != 0:
| {"golden_diff": "diff --git a/gammapy/spectrum/energy_group.py b/gammapy/spectrum/energy_group.py\n--- a/gammapy/spectrum/energy_group.py\n+++ b/gammapy/spectrum/energy_group.py\n@@ -251,7 +251,7 @@\n ebounds : `~astropy.units.Quantity`\n Energy bounds array\n \"\"\"\n- ebounds_src = self.obs.e_reco\n+ ebounds_src = self.obs.e_reco.to(ebounds.unit)\n bin_edges_src = np.arange(len(ebounds_src))\n \n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\n@@ -278,6 +278,11 @@\n )\n groups.append(group)\n \n+ if groups == []:\n+ err_str = \"Input binning\\n{}\\n has no overlap with\"\n+ err_str += \" target binning\\n{}\"\n+ raise ValueError(err_str.format(ebounds, ebounds_src))\n+\n # Add underflow bin\n start_edge = groups[0].bin_idx_min\n if start_edge != 0:\n", "issue": "IndexError from SpectrumEnergyGroupMaker\nIn this example CTA DC1 analysis the SpectrumEnergyGroupMaker give an IndexError if e.g. `emax=100 TeV` is chosen (see comment in cell 23):\r\n\r\nhttps://github.com/gammasky/cta-analyses/blob/bf571038b389b3eb13ce8ba81b35384ebd4b6750/dc-1-checks/hess_j1702/spectrum.ipynb\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Spectrum energy bin grouping.\n\nThere are three classes:\n\n* SpectrumEnergyGroup - one group\n* SpectrumEnergyGroups - one grouping, i.e. collection of groups\n* SpectrumEnergyGroupMaker - algorithms to compute groupings.\n\nAlgorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker.\nThe difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that\njust have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses\ninformation from SpectrumObservation (e.g. safe energy range or counts data) and\nimplements higher-level algorithms.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import OrderedDict\nfrom copy import deepcopy\nimport numpy as np\nimport logging\nfrom ..extern.six.moves import UserList\nfrom astropy.units import Quantity\nfrom astropy.table import Table\nfrom astropy.table import vstack as table_vstack\nfrom ..utils.table import table_from_row_data, table_row_to_dict\n\n__all__ = [\n 'SpectrumEnergyGroup',\n 'SpectrumEnergyGroups',\n 'SpectrumEnergyGroupMaker',\n]\n\nlog = logging.getLogger(__name__)\n\n\nclass SpectrumEnergyGroup(object):\n \"\"\"Spectrum energy group.\n\n Represents a consecutive range of bin indices (both ends inclusive).\n \"\"\"\n fields = [\n 'energy_group_idx', 'bin_idx_min', 'bin_idx_max',\n 'bin_type', 'energy_min', 'energy_max',\n ]\n \"\"\"List of data members of this class.\"\"\"\n\n valid_bin_types = ['normal', 'underflow', 'overflow']\n \"\"\"Valid values for ``bin_types`` attribute.\"\"\"\n\n def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type,\n energy_min, energy_max):\n self.energy_group_idx = energy_group_idx\n self.bin_idx_min = bin_idx_min\n self.bin_idx_max = bin_idx_max\n if bin_type not in self.valid_bin_types:\n raise ValueError('Invalid bin type: {}'.format(bin_type))\n self.bin_type = bin_type\n self.energy_min = Quantity(energy_min)\n self.energy_max = Quantity(energy_max)\n\n @classmethod\n def from_dict(cls, data):\n data = dict((_, data[_]) for _ in cls.fields)\n return cls(**data)\n\n @property\n def _data(self):\n return [(_, getattr(self, _)) for _ in self.fields]\n\n def __repr__(self):\n txt = ['{}={!r}'.format(k, v) for k, v in self._data]\n return '{}({})'.format(self.__class__.__name__, ', '.join(txt))\n\n def __eq__(self, other):\n return self.to_dict() == other.to_dict()\n\n def to_dict(self):\n return OrderedDict(self._data)\n\n @property\n def bin_idx_array(self):\n \"\"\"Numpy array of bin indices in the group.\"\"\"\n return np.arange(self.bin_idx_min, self.bin_idx_max + 1)\n\n @property\n def bin_table(self):\n \"\"\"Create `~astropy.table.Table` with bins in the group.\n\n Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type``\n \"\"\"\n table = Table()\n table['bin_idx'] = self.bin_idx_array\n table['energy_group_idx'] = self.energy_group_idx\n table['bin_type'] = self.bin_type\n table['energy_min'] = self.energy_min\n table['energy_max'] = self.energy_max\n return table\n\n\nclass SpectrumEnergyGroups(UserList):\n \"\"\"List of `~gammapy.spectrum.SpectrumEnergyGroup` objects.\n\n A helper class used by the `gammapy.spectrum.SpectrumEnergyGroupsMaker`.\n \"\"\"\n\n def __repr__(self):\n return '{}(len={})'.format(self.__class__.__name__, len(self))\n\n def __str__(self):\n ss = '{}:\\n'.format(self.__class__.__name__)\n lines = self.to_group_table().pformat(max_width=-1, max_lines=-1)\n ss += '\\n'.join(lines)\n return ss + '\\n'\n\n def copy(self):\n \"\"\"Deep copy\"\"\"\n return deepcopy(self)\n\n @classmethod\n def from_total_table(cls, table):\n \"\"\"Create list of SpectrumEnergyGroup objects from table.\"\"\"\n groups = cls()\n\n for energy_group_idx in np.unique(table['energy_group_idx']):\n mask = table['energy_group_idx'] == energy_group_idx\n group_table = table[mask]\n bin_idx_min = group_table['bin_idx'][0]\n bin_idx_max = group_table['bin_idx'][-1]\n if len(set(group_table['bin_type'])) > 1:\n raise ValueError('Inconsistent bin_type within group.')\n bin_type = group_table['bin_type'][0]\n energy_min = group_table['energy_min'].quantity[0]\n energy_max = group_table['energy_max'].quantity[-1]\n\n group = SpectrumEnergyGroup(\n energy_group_idx=energy_group_idx,\n bin_idx_min=bin_idx_min,\n bin_idx_max=bin_idx_max,\n bin_type=bin_type,\n energy_min=energy_min,\n energy_max=energy_max,\n )\n groups.append(group)\n\n return groups\n\n @classmethod\n def from_group_table(cls, table):\n \"\"\"Create from energy groups in `~astropy.table.Table` format.\"\"\"\n return cls([\n SpectrumEnergyGroup.from_dict(table_row_to_dict(row))\n for row in table\n ])\n\n def to_total_table(self):\n \"\"\"Table with one energy bin per row (`~astropy.table.Table`).\n\n Columns:\n\n * ``energy_group_idx`` - Energy group index (int)\n * ``bin_idx`` - Energy bin index (int)\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n\n There are no energy columns, because the per-bin energy info\n was lost during grouping.\n \"\"\"\n tables = [group.bin_table for group in self]\n return table_vstack(tables)\n\n def to_group_table(self):\n \"\"\"Table with one energy group per row (`~astropy.table.Table`).\n\n Columns:\n\n * ``energy_group_idx`` - Energy group index (int)\n * ``energy_group_n_bins`` - Number of bins in the energy group (int)\n * ``bin_idx_min`` - First bin index in the energy group (int)\n * ``bin_idx_max`` - Last bin index in the energy group (int)\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n * ``energy_min`` - Energy group start energy (Quantity)\n * ``energy_max`` - Energy group end energy (Quantity)\n \"\"\"\n rows = [group.to_dict() for group in self]\n table = table_from_row_data(rows)\n return table\n\n @property\n def energy_range(self):\n \"\"\"Total energy range (`~astropy.units.Quantity` of length 2).\"\"\"\n return Quantity([self[0].energy_min, self[-1].energy_max])\n\n @property\n def energy_bounds(self):\n \"\"\"Energy group bounds (`~astropy.units.Quantity`).\"\"\"\n energy = [_.energy_min for _ in self]\n energy.append(self[-1].energy_max)\n return Quantity(energy)\n\n\nclass SpectrumEnergyGroupMaker(object):\n \"\"\"Energy bin groups for spectral analysis.\n\n This class contains both methods that run algorithms\n that compute groupings as well as the results as data members\n and methods to debug and assess the results.\n\n The input ``obs`` is used read-only, to access the counts energy\n binning, as well as some other info that is used for energy bin grouping.\n\n Parameters\n ----------\n obs : `~gammapy.spectrum.SpectrumObservation`\n Spectrum observation\n\n Attributes\n ----------\n obs : `~gammapy.spectrum.SpectrumObservation`\n Spectrum observation data\n groups : `~gammapy.spectrum.SpectrumEnergyGroups`\n List of energy groups\n\n See also\n --------\n SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator\n \"\"\"\n\n def __init__(self, obs):\n self.obs = obs\n self.groups = None\n\n def groups_from_obs(self):\n \"\"\"Compute energy groups with one group per energy bin.\"\"\"\n ebounds_obs = self.obs.e_reco\n size = ebounds_obs.nbins\n table = Table()\n table['bin_idx'] = np.arange(size)\n table['energy_group_idx'] = np.arange(size)\n table['bin_type'] = ['normal'] * size\n table['energy_min'] = ebounds_obs.lower_bounds\n table['energy_max'] = ebounds_obs.upper_bounds\n self.groups = SpectrumEnergyGroups.from_total_table(table)\n\n def compute_groups_fixed(self, ebounds):\n \"\"\"Apply grouping for a given fixed energy binning.\n\n This groups the observation ``obs.e_reco`` binning and\n ``ebounds`` using a nearest neighbor match on the bin edges.\n\n Parameters\n ----------\n ebounds : `~astropy.units.Quantity`\n Energy bounds array\n \"\"\"\n ebounds_src = self.obs.e_reco\n bin_edges_src = np.arange(len(ebounds_src))\n\n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\n bin_edges = np.round(temp, decimals=0).astype(np.int)\n\n # Check for duplicates\n duplicates_removed = set(bin_edges)\n if len(duplicates_removed) != len(bin_edges):\n warn_str = \"Input binning\\n{}\\n contains bins that are finer than the\"\n warn_str += \" target binning\\n{}\\n or outside the valid range\"\n log.warning(warn_str.format(ebounds, ebounds_src))\n bin_edges = sorted(duplicates_removed)\n\n # Create normal bins\n groups = []\n for idx in np.arange(len(bin_edges) - 1):\n group = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=bin_edges[idx],\n bin_idx_max=bin_edges[idx + 1] - 1,\n bin_type='normal',\n energy_min=ebounds_src[bin_edges[idx]],\n energy_max=ebounds_src[bin_edges[idx + 1]],\n )\n groups.append(group)\n\n # Add underflow bin\n start_edge = groups[0].bin_idx_min\n if start_edge != 0:\n underflow = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=0,\n bin_idx_max=start_edge - 1,\n bin_type='underflow',\n energy_min=ebounds_src[0],\n energy_max=ebounds_src[start_edge],\n )\n groups.insert(0, underflow)\n\n # Add overflow bin\n end_edge = groups[-1].bin_idx_max\n if end_edge != ebounds_src.nbins - 1:\n overflow = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=end_edge + 1,\n bin_idx_max=ebounds_src.nbins - 1,\n bin_type='overflow',\n energy_min=ebounds_src[end_edge + 1],\n energy_max=ebounds_src[-1],\n )\n groups.append(overflow)\n\n # Set energy_group_idx\n for group_idx, group in enumerate(groups):\n group.energy_group_idx = group_idx\n\n self.groups = SpectrumEnergyGroups(groups)\n", "path": "gammapy/spectrum/energy_group.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Spectrum energy bin grouping.\n\nThere are three classes:\n\n* SpectrumEnergyGroup - one group\n* SpectrumEnergyGroups - one grouping, i.e. collection of groups\n* SpectrumEnergyGroupMaker - algorithms to compute groupings.\n\nAlgorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker.\nThe difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that\njust have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses\ninformation from SpectrumObservation (e.g. safe energy range or counts data) and\nimplements higher-level algorithms.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import OrderedDict\nfrom copy import deepcopy\nimport numpy as np\nimport logging\nfrom ..extern.six.moves import UserList\nfrom astropy.units import Quantity\nfrom astropy.table import Table\nfrom astropy.table import vstack as table_vstack\nfrom ..utils.table import table_from_row_data, table_row_to_dict\n\n__all__ = [\n 'SpectrumEnergyGroup',\n 'SpectrumEnergyGroups',\n 'SpectrumEnergyGroupMaker',\n]\n\nlog = logging.getLogger(__name__)\n\n\nclass SpectrumEnergyGroup(object):\n \"\"\"Spectrum energy group.\n\n Represents a consecutive range of bin indices (both ends inclusive).\n \"\"\"\n fields = [\n 'energy_group_idx', 'bin_idx_min', 'bin_idx_max',\n 'bin_type', 'energy_min', 'energy_max',\n ]\n \"\"\"List of data members of this class.\"\"\"\n\n valid_bin_types = ['normal', 'underflow', 'overflow']\n \"\"\"Valid values for ``bin_types`` attribute.\"\"\"\n\n def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type,\n energy_min, energy_max):\n self.energy_group_idx = energy_group_idx\n self.bin_idx_min = bin_idx_min\n self.bin_idx_max = bin_idx_max\n if bin_type not in self.valid_bin_types:\n raise ValueError('Invalid bin type: {}'.format(bin_type))\n self.bin_type = bin_type\n self.energy_min = Quantity(energy_min)\n self.energy_max = Quantity(energy_max)\n\n @classmethod\n def from_dict(cls, data):\n data = dict((_, data[_]) for _ in cls.fields)\n return cls(**data)\n\n @property\n def _data(self):\n return [(_, getattr(self, _)) for _ in self.fields]\n\n def __repr__(self):\n txt = ['{}={!r}'.format(k, v) for k, v in self._data]\n return '{}({})'.format(self.__class__.__name__, ', '.join(txt))\n\n def __eq__(self, other):\n return self.to_dict() == other.to_dict()\n\n def to_dict(self):\n return OrderedDict(self._data)\n\n @property\n def bin_idx_array(self):\n \"\"\"Numpy array of bin indices in the group.\"\"\"\n return np.arange(self.bin_idx_min, self.bin_idx_max + 1)\n\n @property\n def bin_table(self):\n \"\"\"Create `~astropy.table.Table` with bins in the group.\n\n Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type``\n \"\"\"\n table = Table()\n table['bin_idx'] = self.bin_idx_array\n table['energy_group_idx'] = self.energy_group_idx\n table['bin_type'] = self.bin_type\n table['energy_min'] = self.energy_min\n table['energy_max'] = self.energy_max\n return table\n\n\nclass SpectrumEnergyGroups(UserList):\n \"\"\"List of `~gammapy.spectrum.SpectrumEnergyGroup` objects.\n\n A helper class used by the `gammapy.spectrum.SpectrumEnergyGroupsMaker`.\n \"\"\"\n\n def __repr__(self):\n return '{}(len={})'.format(self.__class__.__name__, len(self))\n\n def __str__(self):\n ss = '{}:\\n'.format(self.__class__.__name__)\n lines = self.to_group_table().pformat(max_width=-1, max_lines=-1)\n ss += '\\n'.join(lines)\n return ss + '\\n'\n\n def copy(self):\n \"\"\"Deep copy\"\"\"\n return deepcopy(self)\n\n @classmethod\n def from_total_table(cls, table):\n \"\"\"Create list of SpectrumEnergyGroup objects from table.\"\"\"\n groups = cls()\n\n for energy_group_idx in np.unique(table['energy_group_idx']):\n mask = table['energy_group_idx'] == energy_group_idx\n group_table = table[mask]\n bin_idx_min = group_table['bin_idx'][0]\n bin_idx_max = group_table['bin_idx'][-1]\n if len(set(group_table['bin_type'])) > 1:\n raise ValueError('Inconsistent bin_type within group.')\n bin_type = group_table['bin_type'][0]\n energy_min = group_table['energy_min'].quantity[0]\n energy_max = group_table['energy_max'].quantity[-1]\n\n group = SpectrumEnergyGroup(\n energy_group_idx=energy_group_idx,\n bin_idx_min=bin_idx_min,\n bin_idx_max=bin_idx_max,\n bin_type=bin_type,\n energy_min=energy_min,\n energy_max=energy_max,\n )\n groups.append(group)\n\n return groups\n\n @classmethod\n def from_group_table(cls, table):\n \"\"\"Create from energy groups in `~astropy.table.Table` format.\"\"\"\n return cls([\n SpectrumEnergyGroup.from_dict(table_row_to_dict(row))\n for row in table\n ])\n\n def to_total_table(self):\n \"\"\"Table with one energy bin per row (`~astropy.table.Table`).\n\n Columns:\n\n * ``energy_group_idx`` - Energy group index (int)\n * ``bin_idx`` - Energy bin index (int)\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n\n There are no energy columns, because the per-bin energy info\n was lost during grouping.\n \"\"\"\n tables = [group.bin_table for group in self]\n return table_vstack(tables)\n\n def to_group_table(self):\n \"\"\"Table with one energy group per row (`~astropy.table.Table`).\n\n Columns:\n\n * ``energy_group_idx`` - Energy group index (int)\n * ``energy_group_n_bins`` - Number of bins in the energy group (int)\n * ``bin_idx_min`` - First bin index in the energy group (int)\n * ``bin_idx_max`` - Last bin index in the energy group (int)\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n * ``energy_min`` - Energy group start energy (Quantity)\n * ``energy_max`` - Energy group end energy (Quantity)\n \"\"\"\n rows = [group.to_dict() for group in self]\n table = table_from_row_data(rows)\n return table\n\n @property\n def energy_range(self):\n \"\"\"Total energy range (`~astropy.units.Quantity` of length 2).\"\"\"\n return Quantity([self[0].energy_min, self[-1].energy_max])\n\n @property\n def energy_bounds(self):\n \"\"\"Energy group bounds (`~astropy.units.Quantity`).\"\"\"\n energy = [_.energy_min for _ in self]\n energy.append(self[-1].energy_max)\n return Quantity(energy)\n\n\nclass SpectrumEnergyGroupMaker(object):\n \"\"\"Energy bin groups for spectral analysis.\n\n This class contains both methods that run algorithms\n that compute groupings as well as the results as data members\n and methods to debug and assess the results.\n\n The input ``obs`` is used read-only, to access the counts energy\n binning, as well as some other info that is used for energy bin grouping.\n\n Parameters\n ----------\n obs : `~gammapy.spectrum.SpectrumObservation`\n Spectrum observation\n\n Attributes\n ----------\n obs : `~gammapy.spectrum.SpectrumObservation`\n Spectrum observation data\n groups : `~gammapy.spectrum.SpectrumEnergyGroups`\n List of energy groups\n\n See also\n --------\n SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator\n \"\"\"\n\n def __init__(self, obs):\n self.obs = obs\n self.groups = None\n\n def groups_from_obs(self):\n \"\"\"Compute energy groups with one group per energy bin.\"\"\"\n ebounds_obs = self.obs.e_reco\n size = ebounds_obs.nbins\n table = Table()\n table['bin_idx'] = np.arange(size)\n table['energy_group_idx'] = np.arange(size)\n table['bin_type'] = ['normal'] * size\n table['energy_min'] = ebounds_obs.lower_bounds\n table['energy_max'] = ebounds_obs.upper_bounds\n self.groups = SpectrumEnergyGroups.from_total_table(table)\n\n def compute_groups_fixed(self, ebounds):\n \"\"\"Apply grouping for a given fixed energy binning.\n\n This groups the observation ``obs.e_reco`` binning and\n ``ebounds`` using a nearest neighbor match on the bin edges.\n\n Parameters\n ----------\n ebounds : `~astropy.units.Quantity`\n Energy bounds array\n \"\"\"\n ebounds_src = self.obs.e_reco.to(ebounds.unit)\n bin_edges_src = np.arange(len(ebounds_src))\n\n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\n bin_edges = np.round(temp, decimals=0).astype(np.int)\n\n # Check for duplicates\n duplicates_removed = set(bin_edges)\n if len(duplicates_removed) != len(bin_edges):\n warn_str = \"Input binning\\n{}\\n contains bins that are finer than the\"\n warn_str += \" target binning\\n{}\\n or outside the valid range\"\n log.warning(warn_str.format(ebounds, ebounds_src))\n bin_edges = sorted(duplicates_removed)\n\n # Create normal bins\n groups = []\n for idx in np.arange(len(bin_edges) - 1):\n group = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=bin_edges[idx],\n bin_idx_max=bin_edges[idx + 1] - 1,\n bin_type='normal',\n energy_min=ebounds_src[bin_edges[idx]],\n energy_max=ebounds_src[bin_edges[idx + 1]],\n )\n groups.append(group)\n\n if groups == []:\n err_str = \"Input binning\\n{}\\n has no overlap with\"\n err_str += \" target binning\\n{}\"\n raise ValueError(err_str.format(ebounds, ebounds_src))\n\n # Add underflow bin\n start_edge = groups[0].bin_idx_min\n if start_edge != 0:\n underflow = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=0,\n bin_idx_max=start_edge - 1,\n bin_type='underflow',\n energy_min=ebounds_src[0],\n energy_max=ebounds_src[start_edge],\n )\n groups.insert(0, underflow)\n\n # Add overflow bin\n end_edge = groups[-1].bin_idx_max\n if end_edge != ebounds_src.nbins - 1:\n overflow = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=end_edge + 1,\n bin_idx_max=ebounds_src.nbins - 1,\n bin_type='overflow',\n energy_min=ebounds_src[end_edge + 1],\n energy_max=ebounds_src[-1],\n )\n groups.append(overflow)\n\n # Set energy_group_idx\n for group_idx, group in enumerate(groups):\n group.energy_group_idx = group_idx\n\n self.groups = SpectrumEnergyGroups(groups)\n", "path": "gammapy/spectrum/energy_group.py"}]} | 3,703 | 242 |
gh_patches_debug_16730 | rasdani/github-patches | git_diff | encode__httpx-861 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redundant `boundary` if files/data is empty
If you try to send a post request with empty data and files — you will create a body with one `--boundary--` string, which can break the server.
It's because httpx create MultipartStream for `data={}, files={}`:
https://github.com/encode/httpx/blob/a82adcc933345c6b8cb1623b031eb85723e7665b/httpx/_content_streams.py#L332-L334
And this MultipartStream doesn't have special behavior for empty data/files:
https://github.com/encode/httpx/blob/a82adcc933345c6b8cb1623b031eb85723e7665b/httpx/_content_streams.py#L277-L283
Reproducible example:
```
from httpx.content_streams import encode
stream = encode(data={}, files={}, boundary=b"+++")
print(b"".join([part for part in stream]))
# b'--+++--\r\n'
```
I see two ways to solve it:
1. Check files not only for None but also for emptiness in `_content_streams.decode`
2. Check the count of fields in `MultipartStream.__init__` and doesn't write anything if fields are empty.
I already opened PR (#861) with a test example. If someone tells me which of these options is better, I will be happy to implement it 🙌
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_content_streams.py`
Content:
```
1 import binascii
2 import mimetypes
3 import os
4 import typing
5 from io import BytesIO
6 from json import dumps as json_dumps
7 from pathlib import Path
8 from urllib.parse import urlencode
9
10 from ._exceptions import StreamConsumed
11 from ._utils import format_form_param
12
13 RequestData = typing.Union[
14 dict, str, bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]
15 ]
16
17 RequestFiles = typing.Dict[
18 str,
19 typing.Union[
20 # file (or str)
21 typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],
22 # (filename, file (or str))
23 typing.Tuple[
24 typing.Optional[str], typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],
25 ],
26 # (filename, file (or str), content_type)
27 typing.Tuple[
28 typing.Optional[str],
29 typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],
30 typing.Optional[str],
31 ],
32 ],
33 ]
34
35
36 class ContentStream:
37 def get_headers(self) -> typing.Dict[str, str]:
38 """
39 Return a dictionary of headers that are implied by the encoding.
40 """
41 return {}
42
43 def can_replay(self) -> bool:
44 """
45 Return `True` if `__aiter__` can be called multiple times.
46
47 We need this in cases such determining if we can re-issue a request
48 body when we receive a redirect response.
49 """
50 return True
51
52 def __iter__(self) -> typing.Iterator[bytes]:
53 yield b""
54
55 def close(self) -> None:
56 pass
57
58 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
59 yield b""
60
61 async def aclose(self) -> None:
62 pass
63
64
65 class ByteStream(ContentStream):
66 """
67 Request content encoded as plain bytes.
68 """
69
70 def __init__(self, body: typing.Union[str, bytes]) -> None:
71 self.body = body.encode("utf-8") if isinstance(body, str) else body
72
73 def get_headers(self) -> typing.Dict[str, str]:
74 if not self.body:
75 return {}
76 content_length = str(len(self.body))
77 return {"Content-Length": content_length}
78
79 def __iter__(self) -> typing.Iterator[bytes]:
80 yield self.body
81
82 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
83 yield self.body
84
85
86 class IteratorStream(ContentStream):
87 """
88 Request content encoded as plain bytes, using an byte iterator.
89 """
90
91 def __init__(
92 self, iterator: typing.Iterator[bytes], close_func: typing.Callable = None
93 ) -> None:
94 self.iterator = iterator
95 self.close_func = close_func
96 self.is_stream_consumed = False
97
98 def can_replay(self) -> bool:
99 return False
100
101 def get_headers(self) -> typing.Dict[str, str]:
102 return {"Transfer-Encoding": "chunked"}
103
104 def __iter__(self) -> typing.Iterator[bytes]:
105 if self.is_stream_consumed:
106 raise StreamConsumed()
107 self.is_stream_consumed = True
108 for part in self.iterator:
109 yield part
110
111 def __aiter__(self) -> typing.AsyncIterator[bytes]:
112 raise RuntimeError("Attempted to call a async iterator on an sync stream.")
113
114 def close(self) -> None:
115 if self.close_func is not None:
116 self.close_func()
117
118
119 class AsyncIteratorStream(ContentStream):
120 """
121 Request content encoded as plain bytes, using an async byte iterator.
122 """
123
124 def __init__(
125 self, aiterator: typing.AsyncIterator[bytes], close_func: typing.Callable = None
126 ) -> None:
127 self.aiterator = aiterator
128 self.close_func = close_func
129 self.is_stream_consumed = False
130
131 def can_replay(self) -> bool:
132 return False
133
134 def get_headers(self) -> typing.Dict[str, str]:
135 return {"Transfer-Encoding": "chunked"}
136
137 def __iter__(self) -> typing.Iterator[bytes]:
138 raise RuntimeError("Attempted to call a sync iterator on an async stream.")
139
140 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
141 if self.is_stream_consumed:
142 raise StreamConsumed()
143 self.is_stream_consumed = True
144 async for part in self.aiterator:
145 yield part
146
147 async def aclose(self) -> None:
148 if self.close_func is not None:
149 await self.close_func()
150
151
152 class JSONStream(ContentStream):
153 """
154 Request content encoded as JSON.
155 """
156
157 def __init__(self, json: typing.Any) -> None:
158 self.body = json_dumps(json).encode("utf-8")
159
160 def get_headers(self) -> typing.Dict[str, str]:
161 content_length = str(len(self.body))
162 content_type = "application/json"
163 return {"Content-Length": content_length, "Content-Type": content_type}
164
165 def __iter__(self) -> typing.Iterator[bytes]:
166 yield self.body
167
168 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
169 yield self.body
170
171
172 class URLEncodedStream(ContentStream):
173 """
174 Request content as URL encoded form data.
175 """
176
177 def __init__(self, data: dict) -> None:
178 self.body = urlencode(data, doseq=True).encode("utf-8")
179
180 def get_headers(self) -> typing.Dict[str, str]:
181 content_length = str(len(self.body))
182 content_type = "application/x-www-form-urlencoded"
183 return {"Content-Length": content_length, "Content-Type": content_type}
184
185 def __iter__(self) -> typing.Iterator[bytes]:
186 yield self.body
187
188 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
189 yield self.body
190
191
192 class MultipartStream(ContentStream):
193 """
194 Request content as multipart encoded form data.
195 """
196
197 class DataField:
198 """
199 A single form field item, within a multipart form field.
200 """
201
202 def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:
203 if not isinstance(name, str):
204 raise TypeError("Invalid type for name. Expected str.")
205 if not isinstance(value, (str, bytes)):
206 raise TypeError("Invalid type for value. Expected str or bytes.")
207 self.name = name
208 self.value = value
209
210 def render_headers(self) -> bytes:
211 name = format_form_param("name", self.name)
212 return b"".join([b"Content-Disposition: form-data; ", name, b"\r\n\r\n"])
213
214 def render_data(self) -> bytes:
215 return (
216 self.value
217 if isinstance(self.value, bytes)
218 else self.value.encode("utf-8")
219 )
220
221 class FileField:
222 """
223 A single file field item, within a multipart form field.
224 """
225
226 def __init__(
227 self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple]
228 ) -> None:
229 self.name = name
230 if not isinstance(value, tuple):
231 self.filename = Path(str(getattr(value, "name", "upload"))).name
232 self.file = (
233 value
234 ) # type: typing.Union[typing.IO[str], typing.IO[bytes]]
235 self.content_type = self.guess_content_type()
236 else:
237 self.filename = value[0]
238 self.file = value[1]
239 self.content_type = (
240 value[2] if len(value) > 2 else self.guess_content_type()
241 )
242
243 def guess_content_type(self) -> typing.Optional[str]:
244 if self.filename:
245 return (
246 mimetypes.guess_type(self.filename)[0] or "application/octet-stream"
247 )
248 else:
249 return None
250
251 def render_headers(self) -> bytes:
252 parts = [
253 b"Content-Disposition: form-data; ",
254 format_form_param("name", self.name),
255 ]
256 if self.filename:
257 filename = format_form_param("filename", self.filename)
258 parts.extend([b"; ", filename])
259 if self.content_type is not None:
260 content_type = self.content_type.encode()
261 parts.extend([b"\r\nContent-Type: ", content_type])
262 parts.append(b"\r\n\r\n")
263 return b"".join(parts)
264
265 def render_data(self) -> bytes:
266 content: typing.Union[str, bytes]
267 if isinstance(self.file, str):
268 content = self.file
269 else:
270 content = self.file.read()
271 return content.encode("utf-8") if isinstance(content, str) else content
272
273 def __init__(self, data: dict, files: dict, boundary: bytes = None) -> None:
274 body = BytesIO()
275 if boundary is None:
276 boundary = binascii.hexlify(os.urandom(16))
277
278 for field in self.iter_fields(data, files):
279 body.write(b"--%s\r\n" % boundary)
280 body.write(field.render_headers())
281 body.write(field.render_data())
282 body.write(b"\r\n")
283
284 body.write(b"--%s--\r\n" % boundary)
285
286 self.content_type = "multipart/form-data; boundary=%s" % boundary.decode(
287 "ascii"
288 )
289 self.body = body.getvalue()
290
291 def iter_fields(
292 self, data: dict, files: dict
293 ) -> typing.Iterator[typing.Union["FileField", "DataField"]]:
294 for name, value in data.items():
295 if isinstance(value, list):
296 for item in value:
297 yield self.DataField(name=name, value=item)
298 else:
299 yield self.DataField(name=name, value=value)
300
301 for name, value in files.items():
302 yield self.FileField(name=name, value=value)
303
304 def get_headers(self) -> typing.Dict[str, str]:
305 content_length = str(len(self.body))
306 content_type = self.content_type
307 return {"Content-Length": content_length, "Content-Type": content_type}
308
309 def __iter__(self) -> typing.Iterator[bytes]:
310 yield self.body
311
312 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
313 yield self.body
314
315
316 def encode(
317 data: RequestData = None,
318 files: RequestFiles = None,
319 json: typing.Any = None,
320 boundary: bytes = None,
321 ) -> ContentStream:
322 """
323 Handles encoding the given `data`, `files`, and `json`, returning
324 a `ContentStream` implementation.
325 """
326 if data is None:
327 if json is not None:
328 return JSONStream(json=json)
329 elif files:
330 return MultipartStream(data={}, files=files, boundary=boundary)
331 else:
332 return ByteStream(body=b"")
333 elif isinstance(data, dict):
334 if files is not None:
335 return MultipartStream(data=data, files=files, boundary=boundary)
336 else:
337 return URLEncodedStream(data=data)
338 elif isinstance(data, (str, bytes)):
339 return ByteStream(body=data)
340 elif hasattr(data, "__aiter__"):
341 data = typing.cast(typing.AsyncIterator[bytes], data)
342 return AsyncIteratorStream(aiterator=data)
343 elif hasattr(data, "__iter__"):
344 data = typing.cast(typing.Iterator[bytes], data)
345 return IteratorStream(iterator=data)
346
347 raise TypeError(f"Unexpected type for 'data', {type(data)!r}")
348
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/_content_streams.py b/httpx/_content_streams.py
--- a/httpx/_content_streams.py
+++ b/httpx/_content_streams.py
@@ -323,7 +323,7 @@
Handles encoding the given `data`, `files`, and `json`, returning
a `ContentStream` implementation.
"""
- if data is None:
+ if not data:
if json is not None:
return JSONStream(json=json)
elif files:
@@ -331,7 +331,7 @@
else:
return ByteStream(body=b"")
elif isinstance(data, dict):
- if files is not None:
+ if files:
return MultipartStream(data=data, files=files, boundary=boundary)
else:
return URLEncodedStream(data=data)
| {"golden_diff": "diff --git a/httpx/_content_streams.py b/httpx/_content_streams.py\n--- a/httpx/_content_streams.py\n+++ b/httpx/_content_streams.py\n@@ -323,7 +323,7 @@\n Handles encoding the given `data`, `files`, and `json`, returning\n a `ContentStream` implementation.\n \"\"\"\n- if data is None:\n+ if not data:\n if json is not None:\n return JSONStream(json=json)\n elif files:\n@@ -331,7 +331,7 @@\n else:\n return ByteStream(body=b\"\")\n elif isinstance(data, dict):\n- if files is not None:\n+ if files:\n return MultipartStream(data=data, files=files, boundary=boundary)\n else:\n return URLEncodedStream(data=data)\n", "issue": "Redundant `boundary` if files/data is empty\nIf you try to send a post request with empty data and files \u2014 you will create a body with one `--boundary--` string, which can break the server.\r\n\r\nIt's because httpx create MultipartStream for `data={}, files={}`:\r\nhttps://github.com/encode/httpx/blob/a82adcc933345c6b8cb1623b031eb85723e7665b/httpx/_content_streams.py#L332-L334\r\n\r\nAnd this MultipartStream doesn't have special behavior for empty data/files:\r\nhttps://github.com/encode/httpx/blob/a82adcc933345c6b8cb1623b031eb85723e7665b/httpx/_content_streams.py#L277-L283\r\n\r\nReproducible example:\r\n\r\n```\r\nfrom httpx.content_streams import encode\r\n\r\nstream = encode(data={}, files={}, boundary=b\"+++\")\r\nprint(b\"\".join([part for part in stream]))\r\n# b'--+++--\\r\\n'\r\n```\r\n\r\nI see two ways to solve it:\r\n1. Check files not only for None but also for emptiness in `_content_streams.decode`\r\n2. Check the count of fields in `MultipartStream.__init__` and doesn't write anything if fields are empty.\r\n\r\nI already opened PR (#861) with a test example. If someone tells me which of these options is better, I will be happy to implement it \ud83d\ude4c\n", "before_files": [{"content": "import binascii\nimport mimetypes\nimport os\nimport typing\nfrom io import BytesIO\nfrom json import dumps as json_dumps\nfrom pathlib import Path\nfrom urllib.parse import urlencode\n\nfrom ._exceptions import StreamConsumed\nfrom ._utils import format_form_param\n\nRequestData = typing.Union[\n dict, str, bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]\n]\n\nRequestFiles = typing.Dict[\n str,\n typing.Union[\n # file (or str)\n typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],\n # (filename, file (or str))\n typing.Tuple[\n typing.Optional[str], typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],\n ],\n # (filename, file (or str), content_type)\n typing.Tuple[\n typing.Optional[str],\n typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],\n typing.Optional[str],\n ],\n ],\n]\n\n\nclass ContentStream:\n def get_headers(self) -> typing.Dict[str, str]:\n \"\"\"\n Return a dictionary of headers that are implied by the encoding.\n \"\"\"\n return {}\n\n def can_replay(self) -> bool:\n \"\"\"\n Return `True` if `__aiter__` can be called multiple times.\n\n We need this in cases such determining if we can re-issue a request\n body when we receive a redirect response.\n \"\"\"\n return True\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield b\"\"\n\n def close(self) -> None:\n pass\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b\"\"\n\n async def aclose(self) -> None:\n pass\n\n\nclass ByteStream(ContentStream):\n \"\"\"\n Request content encoded as plain bytes.\n \"\"\"\n\n def __init__(self, body: typing.Union[str, bytes]) -> None:\n self.body = body.encode(\"utf-8\") if isinstance(body, str) else body\n\n def get_headers(self) -> typing.Dict[str, str]:\n if not self.body:\n return {}\n content_length = str(len(self.body))\n return {\"Content-Length\": content_length}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\nclass IteratorStream(ContentStream):\n \"\"\"\n Request content encoded as plain bytes, using an byte iterator.\n \"\"\"\n\n def __init__(\n self, iterator: typing.Iterator[bytes], close_func: typing.Callable = None\n ) -> None:\n self.iterator = iterator\n self.close_func = close_func\n self.is_stream_consumed = False\n\n def can_replay(self) -> bool:\n return False\n\n def get_headers(self) -> typing.Dict[str, str]:\n return {\"Transfer-Encoding\": \"chunked\"}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n if self.is_stream_consumed:\n raise StreamConsumed()\n self.is_stream_consumed = True\n for part in self.iterator:\n yield part\n\n def __aiter__(self) -> typing.AsyncIterator[bytes]:\n raise RuntimeError(\"Attempted to call a async iterator on an sync stream.\")\n\n def close(self) -> None:\n if self.close_func is not None:\n self.close_func()\n\n\nclass AsyncIteratorStream(ContentStream):\n \"\"\"\n Request content encoded as plain bytes, using an async byte iterator.\n \"\"\"\n\n def __init__(\n self, aiterator: typing.AsyncIterator[bytes], close_func: typing.Callable = None\n ) -> None:\n self.aiterator = aiterator\n self.close_func = close_func\n self.is_stream_consumed = False\n\n def can_replay(self) -> bool:\n return False\n\n def get_headers(self) -> typing.Dict[str, str]:\n return {\"Transfer-Encoding\": \"chunked\"}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n raise RuntimeError(\"Attempted to call a sync iterator on an async stream.\")\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n if self.is_stream_consumed:\n raise StreamConsumed()\n self.is_stream_consumed = True\n async for part in self.aiterator:\n yield part\n\n async def aclose(self) -> None:\n if self.close_func is not None:\n await self.close_func()\n\n\nclass JSONStream(ContentStream):\n \"\"\"\n Request content encoded as JSON.\n \"\"\"\n\n def __init__(self, json: typing.Any) -> None:\n self.body = json_dumps(json).encode(\"utf-8\")\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(len(self.body))\n content_type = \"application/json\"\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\nclass URLEncodedStream(ContentStream):\n \"\"\"\n Request content as URL encoded form data.\n \"\"\"\n\n def __init__(self, data: dict) -> None:\n self.body = urlencode(data, doseq=True).encode(\"utf-8\")\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(len(self.body))\n content_type = \"application/x-www-form-urlencoded\"\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\nclass MultipartStream(ContentStream):\n \"\"\"\n Request content as multipart encoded form data.\n \"\"\"\n\n class DataField:\n \"\"\"\n A single form field item, within a multipart form field.\n \"\"\"\n\n def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:\n if not isinstance(name, str):\n raise TypeError(\"Invalid type for name. Expected str.\")\n if not isinstance(value, (str, bytes)):\n raise TypeError(\"Invalid type for value. Expected str or bytes.\")\n self.name = name\n self.value = value\n\n def render_headers(self) -> bytes:\n name = format_form_param(\"name\", self.name)\n return b\"\".join([b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"])\n\n def render_data(self) -> bytes:\n return (\n self.value\n if isinstance(self.value, bytes)\n else self.value.encode(\"utf-8\")\n )\n\n class FileField:\n \"\"\"\n A single file field item, within a multipart form field.\n \"\"\"\n\n def __init__(\n self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple]\n ) -> None:\n self.name = name\n if not isinstance(value, tuple):\n self.filename = Path(str(getattr(value, \"name\", \"upload\"))).name\n self.file = (\n value\n ) # type: typing.Union[typing.IO[str], typing.IO[bytes]]\n self.content_type = self.guess_content_type()\n else:\n self.filename = value[0]\n self.file = value[1]\n self.content_type = (\n value[2] if len(value) > 2 else self.guess_content_type()\n )\n\n def guess_content_type(self) -> typing.Optional[str]:\n if self.filename:\n return (\n mimetypes.guess_type(self.filename)[0] or \"application/octet-stream\"\n )\n else:\n return None\n\n def render_headers(self) -> bytes:\n parts = [\n b\"Content-Disposition: form-data; \",\n format_form_param(\"name\", self.name),\n ]\n if self.filename:\n filename = format_form_param(\"filename\", self.filename)\n parts.extend([b\"; \", filename])\n if self.content_type is not None:\n content_type = self.content_type.encode()\n parts.extend([b\"\\r\\nContent-Type: \", content_type])\n parts.append(b\"\\r\\n\\r\\n\")\n return b\"\".join(parts)\n\n def render_data(self) -> bytes:\n content: typing.Union[str, bytes]\n if isinstance(self.file, str):\n content = self.file\n else:\n content = self.file.read()\n return content.encode(\"utf-8\") if isinstance(content, str) else content\n\n def __init__(self, data: dict, files: dict, boundary: bytes = None) -> None:\n body = BytesIO()\n if boundary is None:\n boundary = binascii.hexlify(os.urandom(16))\n\n for field in self.iter_fields(data, files):\n body.write(b\"--%s\\r\\n\" % boundary)\n body.write(field.render_headers())\n body.write(field.render_data())\n body.write(b\"\\r\\n\")\n\n body.write(b\"--%s--\\r\\n\" % boundary)\n\n self.content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\n \"ascii\"\n )\n self.body = body.getvalue()\n\n def iter_fields(\n self, data: dict, files: dict\n ) -> typing.Iterator[typing.Union[\"FileField\", \"DataField\"]]:\n for name, value in data.items():\n if isinstance(value, list):\n for item in value:\n yield self.DataField(name=name, value=item)\n else:\n yield self.DataField(name=name, value=value)\n\n for name, value in files.items():\n yield self.FileField(name=name, value=value)\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(len(self.body))\n content_type = self.content_type\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\ndef encode(\n data: RequestData = None,\n files: RequestFiles = None,\n json: typing.Any = None,\n boundary: bytes = None,\n) -> ContentStream:\n \"\"\"\n Handles encoding the given `data`, `files`, and `json`, returning\n a `ContentStream` implementation.\n \"\"\"\n if data is None:\n if json is not None:\n return JSONStream(json=json)\n elif files:\n return MultipartStream(data={}, files=files, boundary=boundary)\n else:\n return ByteStream(body=b\"\")\n elif isinstance(data, dict):\n if files is not None:\n return MultipartStream(data=data, files=files, boundary=boundary)\n else:\n return URLEncodedStream(data=data)\n elif isinstance(data, (str, bytes)):\n return ByteStream(body=data)\n elif hasattr(data, \"__aiter__\"):\n data = typing.cast(typing.AsyncIterator[bytes], data)\n return AsyncIteratorStream(aiterator=data)\n elif hasattr(data, \"__iter__\"):\n data = typing.cast(typing.Iterator[bytes], data)\n return IteratorStream(iterator=data)\n\n raise TypeError(f\"Unexpected type for 'data', {type(data)!r}\")\n", "path": "httpx/_content_streams.py"}], "after_files": [{"content": "import binascii\nimport mimetypes\nimport os\nimport typing\nfrom io import BytesIO\nfrom json import dumps as json_dumps\nfrom pathlib import Path\nfrom urllib.parse import urlencode\n\nfrom ._exceptions import StreamConsumed\nfrom ._utils import format_form_param\n\nRequestData = typing.Union[\n dict, str, bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]\n]\n\nRequestFiles = typing.Dict[\n str,\n typing.Union[\n # file (or str)\n typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],\n # (filename, file (or str))\n typing.Tuple[\n typing.Optional[str], typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],\n ],\n # (filename, file (or str), content_type)\n typing.Tuple[\n typing.Optional[str],\n typing.Union[typing.IO[typing.AnyStr], typing.AnyStr],\n typing.Optional[str],\n ],\n ],\n]\n\n\nclass ContentStream:\n def get_headers(self) -> typing.Dict[str, str]:\n \"\"\"\n Return a dictionary of headers that are implied by the encoding.\n \"\"\"\n return {}\n\n def can_replay(self) -> bool:\n \"\"\"\n Return `True` if `__aiter__` can be called multiple times.\n\n We need this in cases such determining if we can re-issue a request\n body when we receive a redirect response.\n \"\"\"\n return True\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield b\"\"\n\n def close(self) -> None:\n pass\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b\"\"\n\n async def aclose(self) -> None:\n pass\n\n\nclass ByteStream(ContentStream):\n \"\"\"\n Request content encoded as plain bytes.\n \"\"\"\n\n def __init__(self, body: typing.Union[str, bytes]) -> None:\n self.body = body.encode(\"utf-8\") if isinstance(body, str) else body\n\n def get_headers(self) -> typing.Dict[str, str]:\n if not self.body:\n return {}\n content_length = str(len(self.body))\n return {\"Content-Length\": content_length}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\nclass IteratorStream(ContentStream):\n \"\"\"\n Request content encoded as plain bytes, using an byte iterator.\n \"\"\"\n\n def __init__(\n self, iterator: typing.Iterator[bytes], close_func: typing.Callable = None\n ) -> None:\n self.iterator = iterator\n self.close_func = close_func\n self.is_stream_consumed = False\n\n def can_replay(self) -> bool:\n return False\n\n def get_headers(self) -> typing.Dict[str, str]:\n return {\"Transfer-Encoding\": \"chunked\"}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n if self.is_stream_consumed:\n raise StreamConsumed()\n self.is_stream_consumed = True\n for part in self.iterator:\n yield part\n\n def __aiter__(self) -> typing.AsyncIterator[bytes]:\n raise RuntimeError(\"Attempted to call a async iterator on an sync stream.\")\n\n def close(self) -> None:\n if self.close_func is not None:\n self.close_func()\n\n\nclass AsyncIteratorStream(ContentStream):\n \"\"\"\n Request content encoded as plain bytes, using an async byte iterator.\n \"\"\"\n\n def __init__(\n self, aiterator: typing.AsyncIterator[bytes], close_func: typing.Callable = None\n ) -> None:\n self.aiterator = aiterator\n self.close_func = close_func\n self.is_stream_consumed = False\n\n def can_replay(self) -> bool:\n return False\n\n def get_headers(self) -> typing.Dict[str, str]:\n return {\"Transfer-Encoding\": \"chunked\"}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n raise RuntimeError(\"Attempted to call a sync iterator on an async stream.\")\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n if self.is_stream_consumed:\n raise StreamConsumed()\n self.is_stream_consumed = True\n async for part in self.aiterator:\n yield part\n\n async def aclose(self) -> None:\n if self.close_func is not None:\n await self.close_func()\n\n\nclass JSONStream(ContentStream):\n \"\"\"\n Request content encoded as JSON.\n \"\"\"\n\n def __init__(self, json: typing.Any) -> None:\n self.body = json_dumps(json).encode(\"utf-8\")\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(len(self.body))\n content_type = \"application/json\"\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\nclass URLEncodedStream(ContentStream):\n \"\"\"\n Request content as URL encoded form data.\n \"\"\"\n\n def __init__(self, data: dict) -> None:\n self.body = urlencode(data, doseq=True).encode(\"utf-8\")\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(len(self.body))\n content_type = \"application/x-www-form-urlencoded\"\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\nclass MultipartStream(ContentStream):\n \"\"\"\n Request content as multipart encoded form data.\n \"\"\"\n\n class DataField:\n \"\"\"\n A single form field item, within a multipart form field.\n \"\"\"\n\n def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:\n if not isinstance(name, str):\n raise TypeError(\"Invalid type for name. Expected str.\")\n if not isinstance(value, (str, bytes)):\n raise TypeError(\"Invalid type for value. Expected str or bytes.\")\n self.name = name\n self.value = value\n\n def render_headers(self) -> bytes:\n name = format_form_param(\"name\", self.name)\n return b\"\".join([b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"])\n\n def render_data(self) -> bytes:\n return (\n self.value\n if isinstance(self.value, bytes)\n else self.value.encode(\"utf-8\")\n )\n\n class FileField:\n \"\"\"\n A single file field item, within a multipart form field.\n \"\"\"\n\n def __init__(\n self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple]\n ) -> None:\n self.name = name\n if not isinstance(value, tuple):\n self.filename = Path(str(getattr(value, \"name\", \"upload\"))).name\n self.file = (\n value\n ) # type: typing.Union[typing.IO[str], typing.IO[bytes]]\n self.content_type = self.guess_content_type()\n else:\n self.filename = value[0]\n self.file = value[1]\n self.content_type = (\n value[2] if len(value) > 2 else self.guess_content_type()\n )\n\n def guess_content_type(self) -> typing.Optional[str]:\n if self.filename:\n return (\n mimetypes.guess_type(self.filename)[0] or \"application/octet-stream\"\n )\n else:\n return None\n\n def render_headers(self) -> bytes:\n parts = [\n b\"Content-Disposition: form-data; \",\n format_form_param(\"name\", self.name),\n ]\n if self.filename:\n filename = format_form_param(\"filename\", self.filename)\n parts.extend([b\"; \", filename])\n if self.content_type is not None:\n content_type = self.content_type.encode()\n parts.extend([b\"\\r\\nContent-Type: \", content_type])\n parts.append(b\"\\r\\n\\r\\n\")\n return b\"\".join(parts)\n\n def render_data(self) -> bytes:\n content: typing.Union[str, bytes]\n if isinstance(self.file, str):\n content = self.file\n else:\n content = self.file.read()\n return content.encode(\"utf-8\") if isinstance(content, str) else content\n\n def __init__(self, data: dict, files: dict, boundary: bytes = None) -> None:\n body = BytesIO()\n if boundary is None:\n boundary = binascii.hexlify(os.urandom(16))\n\n for field in self.iter_fields(data, files):\n body.write(b\"--%s\\r\\n\" % boundary)\n body.write(field.render_headers())\n body.write(field.render_data())\n body.write(b\"\\r\\n\")\n\n body.write(b\"--%s--\\r\\n\" % boundary)\n\n self.content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\n \"ascii\"\n )\n self.body = body.getvalue()\n\n def iter_fields(\n self, data: dict, files: dict\n ) -> typing.Iterator[typing.Union[\"FileField\", \"DataField\"]]:\n for name, value in data.items():\n if isinstance(value, list):\n for item in value:\n yield self.DataField(name=name, value=item)\n else:\n yield self.DataField(name=name, value=value)\n\n for name, value in files.items():\n yield self.FileField(name=name, value=value)\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(len(self.body))\n content_type = self.content_type\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self.body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self.body\n\n\ndef encode(\n data: RequestData = None,\n files: RequestFiles = None,\n json: typing.Any = None,\n boundary: bytes = None,\n) -> ContentStream:\n \"\"\"\n Handles encoding the given `data`, `files`, and `json`, returning\n a `ContentStream` implementation.\n \"\"\"\n if not data:\n if json is not None:\n return JSONStream(json=json)\n elif files:\n return MultipartStream(data={}, files=files, boundary=boundary)\n else:\n return ByteStream(body=b\"\")\n elif isinstance(data, dict):\n if files:\n return MultipartStream(data=data, files=files, boundary=boundary)\n else:\n return URLEncodedStream(data=data)\n elif isinstance(data, (str, bytes)):\n return ByteStream(body=data)\n elif hasattr(data, \"__aiter__\"):\n data = typing.cast(typing.AsyncIterator[bytes], data)\n return AsyncIteratorStream(aiterator=data)\n elif hasattr(data, \"__iter__\"):\n data = typing.cast(typing.Iterator[bytes], data)\n return IteratorStream(iterator=data)\n\n raise TypeError(f\"Unexpected type for 'data', {type(data)!r}\")\n", "path": "httpx/_content_streams.py"}]} | 3,993 | 182 |
gh_patches_debug_38843 | rasdani/github-patches | git_diff | lutris__lutris-1049 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restore monitor refresh rate
My monitor's default refresh rate is 60Hz, but I set it to 72Hz. When game exits, lutris does not restore monitor refresh rate to 72Hz. Is this feature available? If not can it be added?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/display.py`
Content:
```
1 import re
2 import time
3 import subprocess
4
5 from lutris.util import system
6 from lutris.util.log import logger
7
8 XRANDR_CACHE = None
9 XRANDR_CACHE_SET_AT = None
10 XGAMMA_FOUND = None
11
12
13 def cached(function):
14 def wrapper():
15 global XRANDR_CACHE
16 global XRANDR_CACHE_SET_AT
17
18 if XRANDR_CACHE and time.time() - XRANDR_CACHE_SET_AT < 60:
19 return XRANDR_CACHE
20 XRANDR_CACHE = function()
21 XRANDR_CACHE_SET_AT = time.time()
22 return XRANDR_CACHE
23 return wrapper
24
25
26 @cached
27 def get_vidmodes():
28 xrandr_output = subprocess.Popen(["xrandr"],
29 stdout=subprocess.PIPE).communicate()[0]
30 return list([line for line in xrandr_output.decode().split("\n")])
31
32
33 def get_outputs():
34 """Return list of tuples containing output name and geometry."""
35 outputs = []
36 vid_modes = get_vidmodes()
37 if not vid_modes:
38 logger.error("xrandr didn't return anything")
39 return []
40 for line in vid_modes:
41 parts = line.split()
42 if len(parts) < 2:
43 continue
44 if parts[1] == 'connected':
45 if len(parts) == 2:
46 continue
47 if parts[2] != 'primary':
48 geom = parts[2]
49 rotate = parts[3]
50 else:
51 geom = parts[3]
52 rotate = parts[4]
53 if geom.startswith('('): # Screen turned off, no geometry
54 continue
55 if rotate.startswith('('): # Screen not rotated, no need to include
56 outputs.append((parts[0], geom, "normal"))
57 else:
58 if rotate in ("left", "right"):
59 geom_parts = geom.split('+')
60 x_y = geom_parts[0].split('x')
61 geom = "{}x{}+{}+{}".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])
62 outputs.append((parts[0], geom, rotate))
63 return outputs
64
65
66 def get_output_names():
67 return [output[0] for output in get_outputs()]
68
69
70 def turn_off_except(display):
71 for output in get_outputs():
72 if output[0] != display:
73 subprocess.Popen(["xrandr", "--output", output[0], "--off"])
74
75
76 def get_resolutions():
77 """Return the list of supported screen resolutions."""
78 resolution_list = []
79 for line in get_vidmodes():
80 if line.startswith(" "):
81 resolution_match = re.match('.*?(\d+x\d+).*', line)
82 if resolution_match:
83 resolution_list.append(resolution_match.groups()[0])
84 return resolution_list
85
86
87 def get_unique_resolutions():
88 """Return available resolutions, without duplicates and ordered with highest resolution first"""
89 return sorted(set(get_resolutions()), key=lambda x: int(x.split('x')[0]), reverse=True)
90
91
92 def get_current_resolution(monitor=0):
93 """Return the current resolution for the desktop."""
94 resolution = list()
95 for line in get_vidmodes():
96 if line.startswith(" ") and "*" in line:
97 resolution_match = re.match('.*?(\d+x\d+).*', line)
98 if resolution_match:
99 resolution.append(resolution_match.groups()[0])
100 if monitor == 'all':
101 return resolution
102 else:
103 return resolution[monitor]
104
105
106 def change_resolution(resolution):
107 """Change display resolution.
108
109 Takes a string for single monitors or a list of displays as returned
110 by get_outputs().
111 """
112 if not resolution:
113 logger.warning("No resolution provided")
114 return
115 if isinstance(resolution, str):
116 logger.debug("Switching resolution to %s", resolution)
117
118 if resolution not in get_resolutions():
119 logger.warning("Resolution %s doesn't exist." % resolution)
120 else:
121 subprocess.Popen(["xrandr", "-s", resolution])
122 else:
123 for display in resolution:
124 display_name = display[0]
125 logger.debug("Switching to %s on %s", display[1], display[0])
126 display_geom = display[1].split('+')
127 display_resolution = display_geom[0]
128 position = (display_geom[1], display_geom[2])
129
130 if (
131 len(display) > 2 and
132 display[2] in ('normal', 'left', 'right', 'inverted')
133 ):
134 rotation = display[2]
135 else:
136 rotation = "normal"
137
138 subprocess.Popen([
139 "xrandr",
140 "--output", display_name,
141 "--mode", display_resolution,
142 "--pos", "{}x{}".format(position[0], position[1]),
143 "--rotate", rotation
144 ]).communicate()
145
146
147 def restore_gamma():
148 """Restores gamma to a normal level."""
149 global XGAMMA_FOUND
150 if XGAMMA_FOUND is None:
151 XGAMMA_FOUND = bool(system.find_executable('xgamma'))
152 if XGAMMA_FOUND is True:
153 subprocess.Popen(["xgamma", "-gamma", "1.0"])
154 else:
155 logger.warning('xgamma is not available on your system')
156
157
158 def get_xrandr_version():
159 """Return the major and minor version of XRandR utility"""
160 pattern = "version"
161 xrandr_output = subprocess.Popen(["xrandr", "--version"],
162 stdout=subprocess.PIPE).communicate()[0].decode()
163 position = xrandr_output.find(pattern) + len(pattern)
164 version_str = xrandr_output[position:].strip().split(".")
165 try:
166 return {"major": int(version_str[0]), "minor": int(version_str[1])}
167 except ValueError:
168 logger.error("Can't find version in: %s", xrandr_output)
169 return {"major": 0, "minor": 0}
170
171
172 def get_providers():
173 """Return the list of available graphic cards"""
174 pattern = "name:"
175 providers = list()
176 version = get_xrandr_version()
177
178 if version["major"] == 1 and version["minor"] >= 4:
179 xrandr_output = subprocess.Popen(["xrandr", "--listproviders"],
180 stdout=subprocess.PIPE).communicate()[0].decode()
181 for line in xrandr_output.split("\n"):
182 if line.find("Provider ") != 0:
183 continue
184 position = line.find(pattern) + len(pattern)
185 providers.append(line[position:].strip())
186
187 return providers
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/util/display.py b/lutris/util/display.py
--- a/lutris/util/display.py
+++ b/lutris/util/display.py
@@ -34,6 +34,11 @@
"""Return list of tuples containing output name and geometry."""
outputs = []
vid_modes = get_vidmodes()
+ display=None
+ mode=None
+ position=None
+ rotate=None
+ refresh_rate=None
if not vid_modes:
logger.error("xrandr didn't return anything")
return []
@@ -53,16 +58,18 @@
if geom.startswith('('): # Screen turned off, no geometry
continue
if rotate.startswith('('): # Screen not rotated, no need to include
- outputs.append((parts[0], geom, "normal"))
- else:
- if rotate in ("left", "right"):
- geom_parts = geom.split('+')
- x_y = geom_parts[0].split('x')
- geom = "{}x{}+{}+{}".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])
- outputs.append((parts[0], geom, rotate))
+ rotate="normal"
+ geo_split=geom.split('+')
+ position=geo_split[1] + "x" + geo_split[2]
+ display=parts[0]
+ elif '*' in line:
+ mode=parts[0]
+ for number in parts:
+ if '*' in number:
+ refresh_rate=number[:5]
+ outputs.append((display, mode, position, rotate, refresh_rate))
return outputs
-
def get_output_names():
return [output[0] for output in get_outputs()]
@@ -123,24 +130,25 @@
for display in resolution:
display_name = display[0]
logger.debug("Switching to %s on %s", display[1], display[0])
- display_geom = display[1].split('+')
- display_resolution = display_geom[0]
- position = (display_geom[1], display_geom[2])
+ display_mode=display[1]
+ position=display[2]
+ refresh_rate=display[4]
if (
len(display) > 2 and
- display[2] in ('normal', 'left', 'right', 'inverted')
+ display[3] in ('normal', 'left', 'right', 'inverted')
):
- rotation = display[2]
+ rotation = display[3]
else:
rotation = "normal"
subprocess.Popen([
"xrandr",
"--output", display_name,
- "--mode", display_resolution,
- "--pos", "{}x{}".format(position[0], position[1]),
- "--rotate", rotation
+ "--mode", display_mode,
+ "--pos", position,
+ "--rotate", rotation,
+ "--rate", refresh_rate
]).communicate()
| {"golden_diff": "diff --git a/lutris/util/display.py b/lutris/util/display.py\n--- a/lutris/util/display.py\n+++ b/lutris/util/display.py\n@@ -34,6 +34,11 @@\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n+ display=None\n+ mode=None\n+ position=None\n+ rotate=None\n+ refresh_rate=None\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n@@ -53,16 +58,18 @@\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n if rotate.startswith('('): # Screen not rotated, no need to include\n- outputs.append((parts[0], geom, \"normal\"))\n- else:\n- if rotate in (\"left\", \"right\"):\n- geom_parts = geom.split('+')\n- x_y = geom_parts[0].split('x')\n- geom = \"{}x{}+{}+{}\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\n- outputs.append((parts[0], geom, rotate))\n+ rotate=\"normal\"\n+ geo_split=geom.split('+')\n+ position=geo_split[1] + \"x\" + geo_split[2]\n+ display=parts[0]\n+ elif '*' in line:\n+ mode=parts[0]\n+ for number in parts:\n+ if '*' in number:\n+ refresh_rate=number[:5]\n+ outputs.append((display, mode, position, rotate, refresh_rate))\n return outputs\n \n-\n def get_output_names():\n return [output[0] for output in get_outputs()]\n \n@@ -123,24 +130,25 @@\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n- display_geom = display[1].split('+')\n- display_resolution = display_geom[0]\n- position = (display_geom[1], display_geom[2])\n+ display_mode=display[1]\n+ position=display[2]\n+ refresh_rate=display[4]\n \n if (\n len(display) > 2 and\n- display[2] in ('normal', 'left', 'right', 'inverted')\n+ display[3] in ('normal', 'left', 'right', 'inverted')\n ):\n- rotation = display[2]\n+ rotation = display[3]\n else:\n rotation = \"normal\"\n \n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n- \"--mode\", display_resolution,\n- \"--pos\", \"{}x{}\".format(position[0], position[1]),\n- \"--rotate\", rotation\n+ \"--mode\", display_mode,\n+ \"--pos\", position,\n+ \"--rotate\", rotation,\n+ \"--rate\", refresh_rate\n ]).communicate()\n", "issue": "Restore monitor refresh rate\nMy monitor's default refresh rate is 60Hz, but I set it to 72Hz. When game exits, lutris does not restore monitor refresh rate to 72Hz. Is this feature available? If not can it be added?\n", "before_files": [{"content": "import re\nimport time\nimport subprocess\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\n\nXRANDR_CACHE = None\nXRANDR_CACHE_SET_AT = None\nXGAMMA_FOUND = None\n\n\ndef cached(function):\n def wrapper():\n global XRANDR_CACHE\n global XRANDR_CACHE_SET_AT\n\n if XRANDR_CACHE and time.time() - XRANDR_CACHE_SET_AT < 60:\n return XRANDR_CACHE\n XRANDR_CACHE = function()\n XRANDR_CACHE_SET_AT = time.time()\n return XRANDR_CACHE\n return wrapper\n\n\n@cached\ndef get_vidmodes():\n xrandr_output = subprocess.Popen([\"xrandr\"],\n stdout=subprocess.PIPE).communicate()[0]\n return list([line for line in xrandr_output.decode().split(\"\\n\")])\n\n\ndef get_outputs():\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n for line in vid_modes:\n parts = line.split()\n if len(parts) < 2:\n continue\n if parts[1] == 'connected':\n if len(parts) == 2:\n continue\n if parts[2] != 'primary':\n geom = parts[2]\n rotate = parts[3]\n else:\n geom = parts[3]\n rotate = parts[4]\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n if rotate.startswith('('): # Screen not rotated, no need to include\n outputs.append((parts[0], geom, \"normal\"))\n else:\n if rotate in (\"left\", \"right\"):\n geom_parts = geom.split('+')\n x_y = geom_parts[0].split('x')\n geom = \"{}x{}+{}+{}\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\n outputs.append((parts[0], geom, rotate))\n return outputs\n\n\ndef get_output_names():\n return [output[0] for output in get_outputs()]\n\n\ndef turn_off_except(display):\n for output in get_outputs():\n if output[0] != display:\n subprocess.Popen([\"xrandr\", \"--output\", output[0], \"--off\"])\n\n\ndef get_resolutions():\n \"\"\"Return the list of supported screen resolutions.\"\"\"\n resolution_list = []\n for line in get_vidmodes():\n if line.startswith(\" \"):\n resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n if resolution_match:\n resolution_list.append(resolution_match.groups()[0])\n return resolution_list\n\n\ndef get_unique_resolutions():\n \"\"\"Return available resolutions, without duplicates and ordered with highest resolution first\"\"\"\n return sorted(set(get_resolutions()), key=lambda x: int(x.split('x')[0]), reverse=True)\n\n\ndef get_current_resolution(monitor=0):\n \"\"\"Return the current resolution for the desktop.\"\"\"\n resolution = list()\n for line in get_vidmodes():\n if line.startswith(\" \") and \"*\" in line:\n resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n if resolution_match:\n resolution.append(resolution_match.groups()[0])\n if monitor == 'all':\n return resolution\n else:\n return resolution[monitor]\n\n\ndef change_resolution(resolution):\n \"\"\"Change display resolution.\n\n Takes a string for single monitors or a list of displays as returned\n by get_outputs().\n \"\"\"\n if not resolution:\n logger.warning(\"No resolution provided\")\n return\n if isinstance(resolution, str):\n logger.debug(\"Switching resolution to %s\", resolution)\n\n if resolution not in get_resolutions():\n logger.warning(\"Resolution %s doesn't exist.\" % resolution)\n else:\n subprocess.Popen([\"xrandr\", \"-s\", resolution])\n else:\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n display_geom = display[1].split('+')\n display_resolution = display_geom[0]\n position = (display_geom[1], display_geom[2])\n\n if (\n len(display) > 2 and\n display[2] in ('normal', 'left', 'right', 'inverted')\n ):\n rotation = display[2]\n else:\n rotation = \"normal\"\n\n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n \"--mode\", display_resolution,\n \"--pos\", \"{}x{}\".format(position[0], position[1]),\n \"--rotate\", rotation\n ]).communicate()\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n global XGAMMA_FOUND\n if XGAMMA_FOUND is None:\n XGAMMA_FOUND = bool(system.find_executable('xgamma'))\n if XGAMMA_FOUND is True:\n subprocess.Popen([\"xgamma\", \"-gamma\", \"1.0\"])\n else:\n logger.warning('xgamma is not available on your system')\n\n\ndef get_xrandr_version():\n \"\"\"Return the major and minor version of XRandR utility\"\"\"\n pattern = \"version\"\n xrandr_output = subprocess.Popen([\"xrandr\", \"--version\"],\n stdout=subprocess.PIPE).communicate()[0].decode()\n position = xrandr_output.find(pattern) + len(pattern)\n version_str = xrandr_output[position:].strip().split(\".\")\n try:\n return {\"major\": int(version_str[0]), \"minor\": int(version_str[1])}\n except ValueError:\n logger.error(\"Can't find version in: %s\", xrandr_output)\n return {\"major\": 0, \"minor\": 0}\n\n\ndef get_providers():\n \"\"\"Return the list of available graphic cards\"\"\"\n pattern = \"name:\"\n providers = list()\n version = get_xrandr_version()\n\n if version[\"major\"] == 1 and version[\"minor\"] >= 4:\n xrandr_output = subprocess.Popen([\"xrandr\", \"--listproviders\"],\n stdout=subprocess.PIPE).communicate()[0].decode()\n for line in xrandr_output.split(\"\\n\"):\n if line.find(\"Provider \") != 0:\n continue\n position = line.find(pattern) + len(pattern)\n providers.append(line[position:].strip())\n\n return providers\n", "path": "lutris/util/display.py"}], "after_files": [{"content": "import re\nimport time\nimport subprocess\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\n\nXRANDR_CACHE = None\nXRANDR_CACHE_SET_AT = None\nXGAMMA_FOUND = None\n\n\ndef cached(function):\n def wrapper():\n global XRANDR_CACHE\n global XRANDR_CACHE_SET_AT\n\n if XRANDR_CACHE and time.time() - XRANDR_CACHE_SET_AT < 60:\n return XRANDR_CACHE\n XRANDR_CACHE = function()\n XRANDR_CACHE_SET_AT = time.time()\n return XRANDR_CACHE\n return wrapper\n\n\n@cached\ndef get_vidmodes():\n xrandr_output = subprocess.Popen([\"xrandr\"],\n stdout=subprocess.PIPE).communicate()[0]\n return list([line for line in xrandr_output.decode().split(\"\\n\")])\n\n\ndef get_outputs():\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n display=None\n mode=None\n position=None\n rotate=None\n refresh_rate=None\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n for line in vid_modes:\n parts = line.split()\n if len(parts) < 2:\n continue\n if parts[1] == 'connected':\n if len(parts) == 2:\n continue\n if parts[2] != 'primary':\n geom = parts[2]\n rotate = parts[3]\n else:\n geom = parts[3]\n rotate = parts[4]\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n if rotate.startswith('('): # Screen not rotated, no need to include\n rotate=\"normal\"\n geo_split=geom.split('+')\n position=geo_split[1] + \"x\" + geo_split[2]\n display=parts[0]\n elif '*' in line:\n mode=parts[0]\n for number in parts:\n if '*' in number:\n refresh_rate=number[:5]\n outputs.append((display, mode, position, rotate, refresh_rate))\n return outputs\n\ndef get_output_names():\n return [output[0] for output in get_outputs()]\n\n\ndef turn_off_except(display):\n for output in get_outputs():\n if output[0] != display:\n subprocess.Popen([\"xrandr\", \"--output\", output[0], \"--off\"])\n\n\ndef get_resolutions():\n \"\"\"Return the list of supported screen resolutions.\"\"\"\n resolution_list = []\n for line in get_vidmodes():\n if line.startswith(\" \"):\n resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n if resolution_match:\n resolution_list.append(resolution_match.groups()[0])\n return resolution_list\n\n\ndef get_unique_resolutions():\n \"\"\"Return available resolutions, without duplicates and ordered with highest resolution first\"\"\"\n return sorted(set(get_resolutions()), key=lambda x: int(x.split('x')[0]), reverse=True)\n\n\ndef get_current_resolution(monitor=0):\n \"\"\"Return the current resolution for the desktop.\"\"\"\n resolution = list()\n for line in get_vidmodes():\n if line.startswith(\" \") and \"*\" in line:\n resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n if resolution_match:\n resolution.append(resolution_match.groups()[0])\n if monitor == 'all':\n return resolution\n else:\n return resolution[monitor]\n\n\ndef change_resolution(resolution):\n \"\"\"Change display resolution.\n\n Takes a string for single monitors or a list of displays as returned\n by get_outputs().\n \"\"\"\n if not resolution:\n logger.warning(\"No resolution provided\")\n return\n if isinstance(resolution, str):\n logger.debug(\"Switching resolution to %s\", resolution)\n\n if resolution not in get_resolutions():\n logger.warning(\"Resolution %s doesn't exist.\" % resolution)\n else:\n subprocess.Popen([\"xrandr\", \"-s\", resolution])\n else:\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n display_mode=display[1]\n position=display[2]\n refresh_rate=display[4]\n\n if (\n len(display) > 2 and\n display[3] in ('normal', 'left', 'right', 'inverted')\n ):\n rotation = display[3]\n else:\n rotation = \"normal\"\n\n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n \"--mode\", display_mode,\n \"--pos\", position,\n \"--rotate\", rotation,\n \"--rate\", refresh_rate\n ]).communicate()\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n global XGAMMA_FOUND\n if XGAMMA_FOUND is None:\n XGAMMA_FOUND = bool(system.find_executable('xgamma'))\n if XGAMMA_FOUND is True:\n subprocess.Popen([\"xgamma\", \"-gamma\", \"1.0\"])\n else:\n logger.warning('xgamma is not available on your system')\n\n\ndef get_xrandr_version():\n \"\"\"Return the major and minor version of XRandR utility\"\"\"\n pattern = \"version\"\n xrandr_output = subprocess.Popen([\"xrandr\", \"--version\"],\n stdout=subprocess.PIPE).communicate()[0].decode()\n position = xrandr_output.find(pattern) + len(pattern)\n version_str = xrandr_output[position:].strip().split(\".\")\n try:\n return {\"major\": int(version_str[0]), \"minor\": int(version_str[1])}\n except ValueError:\n logger.error(\"Can't find version in: %s\", xrandr_output)\n return {\"major\": 0, \"minor\": 0}\n\n\ndef get_providers():\n \"\"\"Return the list of available graphic cards\"\"\"\n pattern = \"name:\"\n providers = list()\n version = get_xrandr_version()\n\n if version[\"major\"] == 1 and version[\"minor\"] >= 4:\n xrandr_output = subprocess.Popen([\"xrandr\", \"--listproviders\"],\n stdout=subprocess.PIPE).communicate()[0].decode()\n for line in xrandr_output.split(\"\\n\"):\n if line.find(\"Provider \") != 0:\n continue\n position = line.find(pattern) + len(pattern)\n providers.append(line[position:].strip())\n\n return providers\n", "path": "lutris/util/display.py"}]} | 2,172 | 665 |
gh_patches_debug_20686 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3325 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider labreweries is broken
During the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/labreweries.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4
5 from locations.items import GeojsonPointItem
6
7
8 class LaBreweriesSpider(scrapy.Spider):
9 name = "labreweries"
10 allowed_domains = ["labeerhop.com"]
11 start_urls = (
12 'http://labeerhop.com/breweries-sitemap.xml',
13 )
14
15 def store_hours(self, store_hours):
16 day_groups = []
17 this_day_group = None
18 for day in store_hours:
19 day = day.replace(' :-', ' 12:00 -')
20 day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>')
21 match = re.search(r'(closed|(\d{1,2})\S.\s*-\s*(\d{1,2})\S.)', day.lower())
22 open('/tmp/test1.txt', 'w').write(str(day))
23 (dow, f_hr, t_hr) = match.groups()
24 day_short = dow[:2]
25
26 f_hr = int(f_hr)
27 t_hr = int(t_hr)
28
29 hours = '{:02d}-{:02d}'.format(
30 f_hr,
31 t_hr,
32 )
33
34 if not this_day_group:
35 this_day_group = {
36 'from_day': day_short,
37 'to_day': day_short,
38 'hours': hours
39 }
40 elif this_day_group['hours'] != hours:
41 day_groups.append(this_day_group)
42 this_day_group = {
43 'from_day': day_short,
44 'to_day': day_short,
45 'hours': hours
46 }
47 elif this_day_group['hours'] == hours:
48 this_day_group['to_day'] = day_short
49
50 day_groups.append(this_day_group)
51
52 opening_hours = ""
53 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
54 opening_hours = '24/7'
55 else:
56 for day_group in day_groups:
57 if day_group['from_day'] == day_group['to_day']:
58 opening_hours += '{from_day} {hours}; '.format(**day_group)
59 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
60 opening_hours += '{hours}; '.format(**day_group)
61 else:
62 opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
63 opening_hours = opening_hours[:-2]
64
65 return opening_hours
66
67 def address(self, address):
68 if not address:
69 return None
70
71 addr_tags = {
72 "addr_full": address[0].split(',')[0].strip(),
73 "city": address[0].split(',')[1].strip(),
74 "state": address[0].split(' ')[-2].strip(),
75 "postcode": address[0].split(' ')[-1].strip(),
76 }
77
78 return addr_tags
79
80 def parse(self, response):
81 response.selector.remove_namespaces()
82 city_urls = response.xpath('//url/loc/text()').extract()
83 for path in city_urls:
84 if path not in "http://labeerhop.com/breweries/1056/":
85 yield scrapy.Request(
86 path.strip(),
87 callback=self.parse_store,
88 )
89
90 def parse_store(self, response):
91
92 properties = {
93 'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(),
94 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip("['']"),
95 'opening_hours': re.sub(r'\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(),
96 # 'lon': float(data['geo']['longitude']), # not lon on page
97 # 'lat': float(data['geo']['latitude']), # not lat on page
98 }
99
100 address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())
101 if address:
102 properties.update(address)
103
104
105 yield GeojsonPointItem(**properties)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py
--- a/locations/spiders/labreweries.py
+++ b/locations/spiders/labreweries.py
@@ -92,7 +92,7 @@
properties = {
'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(),
'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip("['']"),
- 'opening_hours': re.sub(r'\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(),
+ 'opening_hours': re.sub(r'\s+', ' ', response.xpath('//*[@id="content"]/div/div[2]/div[3]').extract()[0].split('<h5 class="mb-2">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(),
# 'lon': float(data['geo']['longitude']), # not lon on page
# 'lat': float(data['geo']['latitude']), # not lat on page
}
| {"golden_diff": "diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py\n--- a/locations/spiders/labreweries.py\n+++ b/locations/spiders/labreweries.py\n@@ -92,7 +92,7 @@\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n- 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n+ 'opening_hours': re.sub(r'\\s+', ' ', response.xpath('//*[@id=\"content\"]/div/div[2]/div[3]').extract()[0].split('<h5 class=\"mb-2\">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n", "issue": "Spider labreweries is broken\nDuring the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LaBreweriesSpider(scrapy.Spider):\n name = \"labreweries\"\n allowed_domains = [\"labeerhop.com\"]\n start_urls = (\n 'http://labeerhop.com/breweries-sitemap.xml',\n )\n\n def store_hours(self, store_hours):\n day_groups = []\n this_day_group = None\n for day in store_hours:\n day = day.replace(' :-', ' 12:00 -')\n day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>')\n match = re.search(r'(closed|(\\d{1,2})\\S.\\s*-\\s*(\\d{1,2})\\S.)', day.lower())\n open('/tmp/test1.txt', 'w').write(str(day))\n (dow, f_hr, t_hr) = match.groups()\n day_short = dow[:2]\n\n f_hr = int(f_hr)\n t_hr = int(t_hr)\n\n hours = '{:02d}-{:02d}'.format(\n f_hr,\n t_hr,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day_short\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def address(self, address):\n if not address:\n return None\n\n addr_tags = {\n \"addr_full\": address[0].split(',')[0].strip(),\n \"city\": address[0].split(',')[1].strip(),\n \"state\": address[0].split(' ')[-2].strip(),\n \"postcode\": address[0].split(' ')[-1].strip(),\n }\n\n return addr_tags\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path not in \"http://labeerhop.com/breweries/1056/\":\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n\n address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())\n if address:\n properties.update(address)\n\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/labreweries.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LaBreweriesSpider(scrapy.Spider):\n name = \"labreweries\"\n allowed_domains = [\"labeerhop.com\"]\n start_urls = (\n 'http://labeerhop.com/breweries-sitemap.xml',\n )\n\n def store_hours(self, store_hours):\n day_groups = []\n this_day_group = None\n for day in store_hours:\n day = day.replace(' :-', ' 12:00 -')\n day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>')\n match = re.search(r'(closed|(\\d{1,2})\\S.\\s*-\\s*(\\d{1,2})\\S.)', day.lower())\n open('/tmp/test1.txt', 'w').write(str(day))\n (dow, f_hr, t_hr) = match.groups()\n day_short = dow[:2]\n\n f_hr = int(f_hr)\n t_hr = int(t_hr)\n\n hours = '{:02d}-{:02d}'.format(\n f_hr,\n t_hr,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day_short\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def address(self, address):\n if not address:\n return None\n\n addr_tags = {\n \"addr_full\": address[0].split(',')[0].strip(),\n \"city\": address[0].split(',')[1].strip(),\n \"state\": address[0].split(' ')[-2].strip(),\n \"postcode\": address[0].split(' ')[-1].strip(),\n }\n\n return addr_tags\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path not in \"http://labeerhop.com/breweries/1056/\":\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n 'opening_hours': re.sub(r'\\s+', ' ', response.xpath('//*[@id=\"content\"]/div/div[2]/div[3]').extract()[0].split('<h5 class=\"mb-2\">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n\n address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())\n if address:\n properties.update(address)\n\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/labreweries.py"}]} | 1,609 | 299 |
gh_patches_debug_12898 | rasdani/github-patches | git_diff | CTFd__CTFd-1424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prevent importing data that doesn't meet our expected format
People like to manipulate import data despite it being not a great idea. This often leads to problems where the user is trying to import data that doesn't match our expected format.
We should prevent that automatically.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/utils/exports/__init__.py`
Content:
```
1 import datetime
2 import json
3 import os
4 import re
5 import tempfile
6 import zipfile
7
8 import dataset
9 import six
10 from alembic.util import CommandError
11 from flask import current_app as app
12 from flask_migrate import upgrade as migration_upgrade
13 from sqlalchemy.exc import OperationalError, ProgrammingError
14 from sqlalchemy.sql import sqltypes
15
16 from CTFd import __version__ as CTFD_VERSION
17 from CTFd.cache import cache
18 from CTFd.models import db, get_class_by_tablename
19 from CTFd.plugins import get_plugin_names
20 from CTFd.plugins.migrations import upgrade as plugin_upgrade, current as plugin_current
21 from CTFd.utils import get_app_config, set_config
22 from CTFd.utils.exports.freeze import freeze_export
23 from CTFd.utils.migrations import (
24 create_database,
25 drop_database,
26 get_current_revision,
27 stamp_latest_revision,
28 )
29 from CTFd.utils.uploads import get_uploader
30
31
32 def export_ctf():
33 # TODO: For some unknown reason dataset is only able to see alembic_version during tests.
34 # Even using a real sqlite database. This makes this test impossible to pass in sqlite.
35 db = dataset.connect(get_app_config("SQLALCHEMY_DATABASE_URI"))
36
37 # Backup database
38 backup = tempfile.NamedTemporaryFile()
39
40 backup_zip = zipfile.ZipFile(backup, "w")
41
42 tables = db.tables
43 for table in tables:
44 result = db[table].all()
45 result_file = six.BytesIO()
46 freeze_export(result, fileobj=result_file)
47 result_file.seek(0)
48 backup_zip.writestr("db/{}.json".format(table), result_file.read())
49
50 # # Guarantee that alembic_version is saved into the export
51 if "alembic_version" not in tables:
52 result = {
53 "count": 1,
54 "results": [{"version_num": get_current_revision()}],
55 "meta": {},
56 }
57 result_file = six.StringIO()
58 json.dump(result, result_file)
59 result_file.seek(0)
60 backup_zip.writestr("db/alembic_version.json", result_file.read())
61
62 # Backup uploads
63 uploader = get_uploader()
64 uploader.sync()
65
66 upload_folder = os.path.join(
67 os.path.normpath(app.root_path), app.config.get("UPLOAD_FOLDER")
68 )
69 for root, dirs, files in os.walk(upload_folder):
70 for file in files:
71 parent_dir = os.path.basename(root)
72 backup_zip.write(
73 os.path.join(root, file),
74 arcname=os.path.join("uploads", parent_dir, file),
75 )
76
77 backup_zip.close()
78 backup.seek(0)
79 return backup
80
81
82 def import_ctf(backup, erase=True):
83 if not zipfile.is_zipfile(backup):
84 raise zipfile.BadZipfile
85
86 backup = zipfile.ZipFile(backup)
87
88 members = backup.namelist()
89 max_content_length = get_app_config("MAX_CONTENT_LENGTH")
90 for f in members:
91 if f.startswith("/") or ".." in f:
92 # Abort on malicious zip files
93 raise zipfile.BadZipfile
94 info = backup.getinfo(f)
95 if max_content_length:
96 if info.file_size > max_content_length:
97 raise zipfile.LargeZipFile
98
99 try:
100 alembic_version = json.loads(backup.open("db/alembic_version.json").read())
101 alembic_version = alembic_version["results"][0]["version_num"]
102 except Exception:
103 raise Exception(
104 "Could not determine appropriate database version. This backup cannot be automatically imported."
105 )
106
107 # Check if the alembic version is from CTFd 1.x
108 if alembic_version in (
109 "1ec4a28fe0ff",
110 "2539d8b5082e",
111 "7e9efd084c5a",
112 "87733981ca0e",
113 "a4e30c94c360",
114 "c12d2a1b0926",
115 "c7225db614c1",
116 "cb3cfcc47e2f",
117 "cbf5620f8e15",
118 "d5a224bf5862",
119 "d6514ec92738",
120 "dab615389702",
121 "e62fd69bd417",
122 ):
123 raise Exception(
124 "The version of CTFd that this backup is from is too old to be automatically imported."
125 )
126
127 if erase:
128 # Clear out existing connections to release any locks
129 db.session.close()
130 db.engine.dispose()
131
132 # Drop database and recreate it to get to a clean state
133 drop_database()
134 create_database()
135 # We explicitly do not want to upgrade or stamp here.
136 # The import will have this information.
137
138 side_db = dataset.connect(get_app_config("SQLALCHEMY_DATABASE_URI"))
139 sqlite = get_app_config("SQLALCHEMY_DATABASE_URI").startswith("sqlite")
140 postgres = get_app_config("SQLALCHEMY_DATABASE_URI").startswith("postgres")
141
142 try:
143 if postgres:
144 side_db.query("SET session_replication_role=replica;")
145 else:
146 side_db.query("SET FOREIGN_KEY_CHECKS=0;")
147 except Exception:
148 print("Failed to disable foreign key checks. Continuing.")
149
150 first = [
151 "db/teams.json",
152 "db/users.json",
153 "db/challenges.json",
154 "db/dynamic_challenge.json",
155 "db/flags.json",
156 "db/hints.json",
157 "db/unlocks.json",
158 "db/awards.json",
159 "db/tags.json",
160 "db/submissions.json",
161 "db/solves.json",
162 "db/files.json",
163 "db/notifications.json",
164 "db/pages.json",
165 "db/tracking.json",
166 "db/config.json",
167 ]
168
169 # We want to insert certain database tables first so we are specifying
170 # the order with a list. The leftover tables are tables that are from a
171 # plugin (more likely) or a table where we do not care about insertion order
172 for item in first:
173 if item in members:
174 members.remove(item)
175
176 # Upgrade the database to the point in time that the import was taken from
177 migration_upgrade(revision=alembic_version)
178
179 members.remove("db/alembic_version.json")
180
181 # Combine the database insertion code into a function so that we can pause
182 # insertion between official database tables and plugin tables
183 def insertion(table_filenames):
184 for member in table_filenames:
185 if member.startswith("db/"):
186 table_name = member[3:-5]
187
188 try:
189 # Try to open a file but skip if it doesn't exist.
190 data = backup.open(member).read()
191 except KeyError:
192 continue
193
194 if data:
195 table = side_db[table_name]
196
197 saved = json.loads(data)
198 for entry in saved["results"]:
199 # This is a hack to get SQLite to properly accept datetime values from dataset
200 # See Issue #246
201 if sqlite:
202 direct_table = get_class_by_tablename(table.name)
203 for k, v in entry.items():
204 if isinstance(v, six.string_types):
205 # We only want to apply this hack to columns that are expecting a datetime object
206 try:
207 is_dt_column = (
208 type(getattr(direct_table, k).type)
209 == sqltypes.DateTime
210 )
211 except AttributeError:
212 is_dt_column = False
213
214 # If the table is expecting a datetime, we should check if the string is one and convert it
215 if is_dt_column:
216 match = re.match(
217 r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d",
218 v,
219 )
220 if match:
221 entry[k] = datetime.datetime.strptime(
222 v, "%Y-%m-%dT%H:%M:%S.%f"
223 )
224 continue
225 match = re.match(
226 r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", v
227 )
228 if match:
229 entry[k] = datetime.datetime.strptime(
230 v, "%Y-%m-%dT%H:%M:%S"
231 )
232 continue
233 # From v2.0.0 to v2.1.0 requirements could have been a string or JSON because of a SQLAlchemy issue
234 # This is a hack to ensure we can still accept older exports. See #867
235 if member in (
236 "db/challenges.json",
237 "db/hints.json",
238 "db/awards.json",
239 ):
240 requirements = entry.get("requirements")
241 if requirements and isinstance(
242 requirements, six.string_types
243 ):
244 entry["requirements"] = json.loads(requirements)
245
246 try:
247 table.insert(entry)
248 except ProgrammingError:
249 # MariaDB does not like JSON objects and prefers strings because it internally
250 # represents JSON with LONGTEXT.
251 # See Issue #973
252 requirements = entry.get("requirements")
253 if requirements and isinstance(requirements, dict):
254 entry["requirements"] = json.dumps(requirements)
255 table.insert(entry)
256
257 db.session.commit()
258 if postgres:
259 # This command is to set the next primary key ID for the re-inserted tables in Postgres. However,
260 # this command is very difficult to translate into SQLAlchemy code. Because Postgres is not
261 # officially supported, no major work will go into this functionality.
262 # https://stackoverflow.com/a/37972960
263 if '"' not in table_name and "'" not in table_name:
264 query = "SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM \"{table_name}\"".format( # nosec
265 table_name=table_name
266 )
267 side_db.engine.execute(query)
268 else:
269 raise Exception(
270 "Table name {table_name} contains quotes".format(
271 table_name=table_name
272 )
273 )
274
275 # Insert data from official tables
276 insertion(first)
277
278 # Create tables created by plugins
279 try:
280 # Run plugin migrations
281 plugins = get_plugin_names()
282 try:
283 for plugin in plugins:
284 revision = plugin_current(plugin_name=plugin)
285 plugin_upgrade(plugin_name=plugin, revision=revision)
286 finally:
287 # Create tables that don't have migrations
288 app.db.create_all()
289 except OperationalError as e:
290 if not postgres:
291 raise e
292 else:
293 print("Allowing error during app.db.create_all() due to Postgres")
294
295 # Insert data for plugin tables
296 insertion(members)
297
298 # Bring plugin tables up to head revision
299 plugins = get_plugin_names()
300 for plugin in plugins:
301 plugin_upgrade(plugin_name=plugin)
302
303 # Extracting files
304 files = [f for f in backup.namelist() if f.startswith("uploads/")]
305 uploader = get_uploader()
306 for f in files:
307 filename = f.split(os.sep, 1)
308
309 if (
310 len(filename) < 2 or os.path.basename(filename[1]) == ""
311 ): # just an empty uploads directory (e.g. uploads/) or any directory
312 continue
313
314 filename = filename[1] # Get the second entry in the list (the actual filename)
315 source = backup.open(f)
316 uploader.store(fileobj=source, filename=filename)
317
318 # Alembic sqlite support is lacking so we should just create_all anyway
319 try:
320 migration_upgrade(revision="head")
321 except (OperationalError, CommandError, RuntimeError, SystemExit, Exception):
322 app.db.create_all()
323 stamp_latest_revision()
324
325 try:
326 if postgres:
327 side_db.query("SET session_replication_role=DEFAULT;")
328 else:
329 side_db.query("SET FOREIGN_KEY_CHECKS=1;")
330 except Exception:
331 print("Failed to enable foreign key checks. Continuing.")
332
333 # Invalidate all cached data
334 cache.clear()
335
336 # Set default theme in case the current instance or the import does not provide it
337 set_config("ctf_theme", "core")
338 set_config("ctf_version", CTFD_VERSION)
339
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/utils/exports/__init__.py b/CTFd/utils/exports/__init__.py
--- a/CTFd/utils/exports/__init__.py
+++ b/CTFd/utils/exports/__init__.py
@@ -96,6 +96,14 @@
if info.file_size > max_content_length:
raise zipfile.LargeZipFile
+ # Get list of directories in zipfile
+ member_dirs = [os.path.split(m)[0] for m in members if "/" in m]
+ if "db" not in member_dirs:
+ raise Exception(
+ 'CTFd couldn\'t find the "db" folder in this backup. '
+ "The backup may be malformed or corrupted and the import process cannot continue."
+ )
+
try:
alembic_version = json.loads(backup.open("db/alembic_version.json").read())
alembic_version = alembic_version["results"][0]["version_num"]
| {"golden_diff": "diff --git a/CTFd/utils/exports/__init__.py b/CTFd/utils/exports/__init__.py\n--- a/CTFd/utils/exports/__init__.py\n+++ b/CTFd/utils/exports/__init__.py\n@@ -96,6 +96,14 @@\n if info.file_size > max_content_length:\n raise zipfile.LargeZipFile\n \n+ # Get list of directories in zipfile\n+ member_dirs = [os.path.split(m)[0] for m in members if \"/\" in m]\n+ if \"db\" not in member_dirs:\n+ raise Exception(\n+ 'CTFd couldn\\'t find the \"db\" folder in this backup. '\n+ \"The backup may be malformed or corrupted and the import process cannot continue.\"\n+ )\n+\n try:\n alembic_version = json.loads(backup.open(\"db/alembic_version.json\").read())\n alembic_version = alembic_version[\"results\"][0][\"version_num\"]\n", "issue": "Prevent importing data that doesn't meet our expected format\nPeople like to manipulate import data despite it being not a great idea. This often leads to problems where the user is trying to import data that doesn't match our expected format. \r\n\r\nWe should prevent that automatically. \n", "before_files": [{"content": "import datetime\nimport json\nimport os\nimport re\nimport tempfile\nimport zipfile\n\nimport dataset\nimport six\nfrom alembic.util import CommandError\nfrom flask import current_app as app\nfrom flask_migrate import upgrade as migration_upgrade\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\nfrom sqlalchemy.sql import sqltypes\n\nfrom CTFd import __version__ as CTFD_VERSION\nfrom CTFd.cache import cache\nfrom CTFd.models import db, get_class_by_tablename\nfrom CTFd.plugins import get_plugin_names\nfrom CTFd.plugins.migrations import upgrade as plugin_upgrade, current as plugin_current\nfrom CTFd.utils import get_app_config, set_config\nfrom CTFd.utils.exports.freeze import freeze_export\nfrom CTFd.utils.migrations import (\n create_database,\n drop_database,\n get_current_revision,\n stamp_latest_revision,\n)\nfrom CTFd.utils.uploads import get_uploader\n\n\ndef export_ctf():\n # TODO: For some unknown reason dataset is only able to see alembic_version during tests.\n # Even using a real sqlite database. This makes this test impossible to pass in sqlite.\n db = dataset.connect(get_app_config(\"SQLALCHEMY_DATABASE_URI\"))\n\n # Backup database\n backup = tempfile.NamedTemporaryFile()\n\n backup_zip = zipfile.ZipFile(backup, \"w\")\n\n tables = db.tables\n for table in tables:\n result = db[table].all()\n result_file = six.BytesIO()\n freeze_export(result, fileobj=result_file)\n result_file.seek(0)\n backup_zip.writestr(\"db/{}.json\".format(table), result_file.read())\n\n # # Guarantee that alembic_version is saved into the export\n if \"alembic_version\" not in tables:\n result = {\n \"count\": 1,\n \"results\": [{\"version_num\": get_current_revision()}],\n \"meta\": {},\n }\n result_file = six.StringIO()\n json.dump(result, result_file)\n result_file.seek(0)\n backup_zip.writestr(\"db/alembic_version.json\", result_file.read())\n\n # Backup uploads\n uploader = get_uploader()\n uploader.sync()\n\n upload_folder = os.path.join(\n os.path.normpath(app.root_path), app.config.get(\"UPLOAD_FOLDER\")\n )\n for root, dirs, files in os.walk(upload_folder):\n for file in files:\n parent_dir = os.path.basename(root)\n backup_zip.write(\n os.path.join(root, file),\n arcname=os.path.join(\"uploads\", parent_dir, file),\n )\n\n backup_zip.close()\n backup.seek(0)\n return backup\n\n\ndef import_ctf(backup, erase=True):\n if not zipfile.is_zipfile(backup):\n raise zipfile.BadZipfile\n\n backup = zipfile.ZipFile(backup)\n\n members = backup.namelist()\n max_content_length = get_app_config(\"MAX_CONTENT_LENGTH\")\n for f in members:\n if f.startswith(\"/\") or \"..\" in f:\n # Abort on malicious zip files\n raise zipfile.BadZipfile\n info = backup.getinfo(f)\n if max_content_length:\n if info.file_size > max_content_length:\n raise zipfile.LargeZipFile\n\n try:\n alembic_version = json.loads(backup.open(\"db/alembic_version.json\").read())\n alembic_version = alembic_version[\"results\"][0][\"version_num\"]\n except Exception:\n raise Exception(\n \"Could not determine appropriate database version. This backup cannot be automatically imported.\"\n )\n\n # Check if the alembic version is from CTFd 1.x\n if alembic_version in (\n \"1ec4a28fe0ff\",\n \"2539d8b5082e\",\n \"7e9efd084c5a\",\n \"87733981ca0e\",\n \"a4e30c94c360\",\n \"c12d2a1b0926\",\n \"c7225db614c1\",\n \"cb3cfcc47e2f\",\n \"cbf5620f8e15\",\n \"d5a224bf5862\",\n \"d6514ec92738\",\n \"dab615389702\",\n \"e62fd69bd417\",\n ):\n raise Exception(\n \"The version of CTFd that this backup is from is too old to be automatically imported.\"\n )\n\n if erase:\n # Clear out existing connections to release any locks\n db.session.close()\n db.engine.dispose()\n\n # Drop database and recreate it to get to a clean state\n drop_database()\n create_database()\n # We explicitly do not want to upgrade or stamp here.\n # The import will have this information.\n\n side_db = dataset.connect(get_app_config(\"SQLALCHEMY_DATABASE_URI\"))\n sqlite = get_app_config(\"SQLALCHEMY_DATABASE_URI\").startswith(\"sqlite\")\n postgres = get_app_config(\"SQLALCHEMY_DATABASE_URI\").startswith(\"postgres\")\n\n try:\n if postgres:\n side_db.query(\"SET session_replication_role=replica;\")\n else:\n side_db.query(\"SET FOREIGN_KEY_CHECKS=0;\")\n except Exception:\n print(\"Failed to disable foreign key checks. Continuing.\")\n\n first = [\n \"db/teams.json\",\n \"db/users.json\",\n \"db/challenges.json\",\n \"db/dynamic_challenge.json\",\n \"db/flags.json\",\n \"db/hints.json\",\n \"db/unlocks.json\",\n \"db/awards.json\",\n \"db/tags.json\",\n \"db/submissions.json\",\n \"db/solves.json\",\n \"db/files.json\",\n \"db/notifications.json\",\n \"db/pages.json\",\n \"db/tracking.json\",\n \"db/config.json\",\n ]\n\n # We want to insert certain database tables first so we are specifying\n # the order with a list. The leftover tables are tables that are from a\n # plugin (more likely) or a table where we do not care about insertion order\n for item in first:\n if item in members:\n members.remove(item)\n\n # Upgrade the database to the point in time that the import was taken from\n migration_upgrade(revision=alembic_version)\n\n members.remove(\"db/alembic_version.json\")\n\n # Combine the database insertion code into a function so that we can pause\n # insertion between official database tables and plugin tables\n def insertion(table_filenames):\n for member in table_filenames:\n if member.startswith(\"db/\"):\n table_name = member[3:-5]\n\n try:\n # Try to open a file but skip if it doesn't exist.\n data = backup.open(member).read()\n except KeyError:\n continue\n\n if data:\n table = side_db[table_name]\n\n saved = json.loads(data)\n for entry in saved[\"results\"]:\n # This is a hack to get SQLite to properly accept datetime values from dataset\n # See Issue #246\n if sqlite:\n direct_table = get_class_by_tablename(table.name)\n for k, v in entry.items():\n if isinstance(v, six.string_types):\n # We only want to apply this hack to columns that are expecting a datetime object\n try:\n is_dt_column = (\n type(getattr(direct_table, k).type)\n == sqltypes.DateTime\n )\n except AttributeError:\n is_dt_column = False\n\n # If the table is expecting a datetime, we should check if the string is one and convert it\n if is_dt_column:\n match = re.match(\n r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d\",\n v,\n )\n if match:\n entry[k] = datetime.datetime.strptime(\n v, \"%Y-%m-%dT%H:%M:%S.%f\"\n )\n continue\n match = re.match(\n r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\", v\n )\n if match:\n entry[k] = datetime.datetime.strptime(\n v, \"%Y-%m-%dT%H:%M:%S\"\n )\n continue\n # From v2.0.0 to v2.1.0 requirements could have been a string or JSON because of a SQLAlchemy issue\n # This is a hack to ensure we can still accept older exports. See #867\n if member in (\n \"db/challenges.json\",\n \"db/hints.json\",\n \"db/awards.json\",\n ):\n requirements = entry.get(\"requirements\")\n if requirements and isinstance(\n requirements, six.string_types\n ):\n entry[\"requirements\"] = json.loads(requirements)\n\n try:\n table.insert(entry)\n except ProgrammingError:\n # MariaDB does not like JSON objects and prefers strings because it internally\n # represents JSON with LONGTEXT.\n # See Issue #973\n requirements = entry.get(\"requirements\")\n if requirements and isinstance(requirements, dict):\n entry[\"requirements\"] = json.dumps(requirements)\n table.insert(entry)\n\n db.session.commit()\n if postgres:\n # This command is to set the next primary key ID for the re-inserted tables in Postgres. However,\n # this command is very difficult to translate into SQLAlchemy code. Because Postgres is not\n # officially supported, no major work will go into this functionality.\n # https://stackoverflow.com/a/37972960\n if '\"' not in table_name and \"'\" not in table_name:\n query = \"SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM \\\"{table_name}\\\"\".format( # nosec\n table_name=table_name\n )\n side_db.engine.execute(query)\n else:\n raise Exception(\n \"Table name {table_name} contains quotes\".format(\n table_name=table_name\n )\n )\n\n # Insert data from official tables\n insertion(first)\n\n # Create tables created by plugins\n try:\n # Run plugin migrations\n plugins = get_plugin_names()\n try:\n for plugin in plugins:\n revision = plugin_current(plugin_name=plugin)\n plugin_upgrade(plugin_name=plugin, revision=revision)\n finally:\n # Create tables that don't have migrations\n app.db.create_all()\n except OperationalError as e:\n if not postgres:\n raise e\n else:\n print(\"Allowing error during app.db.create_all() due to Postgres\")\n\n # Insert data for plugin tables\n insertion(members)\n\n # Bring plugin tables up to head revision\n plugins = get_plugin_names()\n for plugin in plugins:\n plugin_upgrade(plugin_name=plugin)\n\n # Extracting files\n files = [f for f in backup.namelist() if f.startswith(\"uploads/\")]\n uploader = get_uploader()\n for f in files:\n filename = f.split(os.sep, 1)\n\n if (\n len(filename) < 2 or os.path.basename(filename[1]) == \"\"\n ): # just an empty uploads directory (e.g. uploads/) or any directory\n continue\n\n filename = filename[1] # Get the second entry in the list (the actual filename)\n source = backup.open(f)\n uploader.store(fileobj=source, filename=filename)\n\n # Alembic sqlite support is lacking so we should just create_all anyway\n try:\n migration_upgrade(revision=\"head\")\n except (OperationalError, CommandError, RuntimeError, SystemExit, Exception):\n app.db.create_all()\n stamp_latest_revision()\n\n try:\n if postgres:\n side_db.query(\"SET session_replication_role=DEFAULT;\")\n else:\n side_db.query(\"SET FOREIGN_KEY_CHECKS=1;\")\n except Exception:\n print(\"Failed to enable foreign key checks. Continuing.\")\n\n # Invalidate all cached data\n cache.clear()\n\n # Set default theme in case the current instance or the import does not provide it\n set_config(\"ctf_theme\", \"core\")\n set_config(\"ctf_version\", CTFD_VERSION)\n", "path": "CTFd/utils/exports/__init__.py"}], "after_files": [{"content": "import datetime\nimport json\nimport os\nimport re\nimport tempfile\nimport zipfile\n\nimport dataset\nimport six\nfrom alembic.util import CommandError\nfrom flask import current_app as app\nfrom flask_migrate import upgrade as migration_upgrade\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\nfrom sqlalchemy.sql import sqltypes\n\nfrom CTFd import __version__ as CTFD_VERSION\nfrom CTFd.cache import cache\nfrom CTFd.models import db, get_class_by_tablename\nfrom CTFd.plugins import get_plugin_names\nfrom CTFd.plugins.migrations import upgrade as plugin_upgrade, current as plugin_current\nfrom CTFd.utils import get_app_config, set_config\nfrom CTFd.utils.exports.freeze import freeze_export\nfrom CTFd.utils.migrations import (\n create_database,\n drop_database,\n get_current_revision,\n stamp_latest_revision,\n)\nfrom CTFd.utils.uploads import get_uploader\n\n\ndef export_ctf():\n # TODO: For some unknown reason dataset is only able to see alembic_version during tests.\n # Even using a real sqlite database. This makes this test impossible to pass in sqlite.\n db = dataset.connect(get_app_config(\"SQLALCHEMY_DATABASE_URI\"))\n\n # Backup database\n backup = tempfile.NamedTemporaryFile()\n\n backup_zip = zipfile.ZipFile(backup, \"w\")\n\n tables = db.tables\n for table in tables:\n result = db[table].all()\n result_file = six.BytesIO()\n freeze_export(result, fileobj=result_file)\n result_file.seek(0)\n backup_zip.writestr(\"db/{}.json\".format(table), result_file.read())\n\n # # Guarantee that alembic_version is saved into the export\n if \"alembic_version\" not in tables:\n result = {\n \"count\": 1,\n \"results\": [{\"version_num\": get_current_revision()}],\n \"meta\": {},\n }\n result_file = six.StringIO()\n json.dump(result, result_file)\n result_file.seek(0)\n backup_zip.writestr(\"db/alembic_version.json\", result_file.read())\n\n # Backup uploads\n uploader = get_uploader()\n uploader.sync()\n\n upload_folder = os.path.join(\n os.path.normpath(app.root_path), app.config.get(\"UPLOAD_FOLDER\")\n )\n for root, dirs, files in os.walk(upload_folder):\n for file in files:\n parent_dir = os.path.basename(root)\n backup_zip.write(\n os.path.join(root, file),\n arcname=os.path.join(\"uploads\", parent_dir, file),\n )\n\n backup_zip.close()\n backup.seek(0)\n return backup\n\n\ndef import_ctf(backup, erase=True):\n if not zipfile.is_zipfile(backup):\n raise zipfile.BadZipfile\n\n backup = zipfile.ZipFile(backup)\n\n members = backup.namelist()\n max_content_length = get_app_config(\"MAX_CONTENT_LENGTH\")\n for f in members:\n if f.startswith(\"/\") or \"..\" in f:\n # Abort on malicious zip files\n raise zipfile.BadZipfile\n info = backup.getinfo(f)\n if max_content_length:\n if info.file_size > max_content_length:\n raise zipfile.LargeZipFile\n\n # Get list of directories in zipfile\n member_dirs = [os.path.split(m)[0] for m in members if \"/\" in m]\n if \"db\" not in member_dirs:\n raise Exception(\n 'CTFd couldn\\'t find the \"db\" folder in this backup. '\n \"The backup may be malformed or corrupted and the import process cannot continue.\"\n )\n\n try:\n alembic_version = json.loads(backup.open(\"db/alembic_version.json\").read())\n alembic_version = alembic_version[\"results\"][0][\"version_num\"]\n except Exception:\n raise Exception(\n \"Could not determine appropriate database version. This backup cannot be automatically imported.\"\n )\n\n # Check if the alembic version is from CTFd 1.x\n if alembic_version in (\n \"1ec4a28fe0ff\",\n \"2539d8b5082e\",\n \"7e9efd084c5a\",\n \"87733981ca0e\",\n \"a4e30c94c360\",\n \"c12d2a1b0926\",\n \"c7225db614c1\",\n \"cb3cfcc47e2f\",\n \"cbf5620f8e15\",\n \"d5a224bf5862\",\n \"d6514ec92738\",\n \"dab615389702\",\n \"e62fd69bd417\",\n ):\n raise Exception(\n \"The version of CTFd that this backup is from is too old to be automatically imported.\"\n )\n\n if erase:\n # Clear out existing connections to release any locks\n db.session.close()\n db.engine.dispose()\n\n # Drop database and recreate it to get to a clean state\n drop_database()\n create_database()\n # We explicitly do not want to upgrade or stamp here.\n # The import will have this information.\n\n side_db = dataset.connect(get_app_config(\"SQLALCHEMY_DATABASE_URI\"))\n sqlite = get_app_config(\"SQLALCHEMY_DATABASE_URI\").startswith(\"sqlite\")\n postgres = get_app_config(\"SQLALCHEMY_DATABASE_URI\").startswith(\"postgres\")\n\n try:\n if postgres:\n side_db.query(\"SET session_replication_role=replica;\")\n else:\n side_db.query(\"SET FOREIGN_KEY_CHECKS=0;\")\n except Exception:\n print(\"Failed to disable foreign key checks. Continuing.\")\n\n first = [\n \"db/teams.json\",\n \"db/users.json\",\n \"db/challenges.json\",\n \"db/dynamic_challenge.json\",\n \"db/flags.json\",\n \"db/hints.json\",\n \"db/unlocks.json\",\n \"db/awards.json\",\n \"db/tags.json\",\n \"db/submissions.json\",\n \"db/solves.json\",\n \"db/files.json\",\n \"db/notifications.json\",\n \"db/pages.json\",\n \"db/tracking.json\",\n \"db/config.json\",\n ]\n\n # We want to insert certain database tables first so we are specifying\n # the order with a list. The leftover tables are tables that are from a\n # plugin (more likely) or a table where we do not care about insertion order\n for item in first:\n if item in members:\n members.remove(item)\n\n # Upgrade the database to the point in time that the import was taken from\n migration_upgrade(revision=alembic_version)\n\n members.remove(\"db/alembic_version.json\")\n\n # Combine the database insertion code into a function so that we can pause\n # insertion between official database tables and plugin tables\n def insertion(table_filenames):\n for member in table_filenames:\n if member.startswith(\"db/\"):\n table_name = member[3:-5]\n\n try:\n # Try to open a file but skip if it doesn't exist.\n data = backup.open(member).read()\n except KeyError:\n continue\n\n if data:\n table = side_db[table_name]\n\n saved = json.loads(data)\n for entry in saved[\"results\"]:\n # This is a hack to get SQLite to properly accept datetime values from dataset\n # See Issue #246\n if sqlite:\n direct_table = get_class_by_tablename(table.name)\n for k, v in entry.items():\n if isinstance(v, six.string_types):\n # We only want to apply this hack to columns that are expecting a datetime object\n try:\n is_dt_column = (\n type(getattr(direct_table, k).type)\n == sqltypes.DateTime\n )\n except AttributeError:\n is_dt_column = False\n\n # If the table is expecting a datetime, we should check if the string is one and convert it\n if is_dt_column:\n match = re.match(\n r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d\",\n v,\n )\n if match:\n entry[k] = datetime.datetime.strptime(\n v, \"%Y-%m-%dT%H:%M:%S.%f\"\n )\n continue\n match = re.match(\n r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\", v\n )\n if match:\n entry[k] = datetime.datetime.strptime(\n v, \"%Y-%m-%dT%H:%M:%S\"\n )\n continue\n # From v2.0.0 to v2.1.0 requirements could have been a string or JSON because of a SQLAlchemy issue\n # This is a hack to ensure we can still accept older exports. See #867\n if member in (\n \"db/challenges.json\",\n \"db/hints.json\",\n \"db/awards.json\",\n ):\n requirements = entry.get(\"requirements\")\n if requirements and isinstance(\n requirements, six.string_types\n ):\n entry[\"requirements\"] = json.loads(requirements)\n\n try:\n table.insert(entry)\n except ProgrammingError:\n # MariaDB does not like JSON objects and prefers strings because it internally\n # represents JSON with LONGTEXT.\n # See Issue #973\n requirements = entry.get(\"requirements\")\n if requirements and isinstance(requirements, dict):\n entry[\"requirements\"] = json.dumps(requirements)\n table.insert(entry)\n\n db.session.commit()\n if postgres:\n # This command is to set the next primary key ID for the re-inserted tables in Postgres. However,\n # this command is very difficult to translate into SQLAlchemy code. Because Postgres is not\n # officially supported, no major work will go into this functionality.\n # https://stackoverflow.com/a/37972960\n if '\"' not in table_name and \"'\" not in table_name:\n query = \"SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM \\\"{table_name}\\\"\".format( # nosec\n table_name=table_name\n )\n side_db.engine.execute(query)\n else:\n raise Exception(\n \"Table name {table_name} contains quotes\".format(\n table_name=table_name\n )\n )\n\n # Insert data from official tables\n insertion(first)\n\n # Create tables created by plugins\n try:\n # Run plugin migrations\n plugins = get_plugin_names()\n try:\n for plugin in plugins:\n revision = plugin_current(plugin_name=plugin)\n plugin_upgrade(plugin_name=plugin, revision=revision)\n finally:\n # Create tables that don't have migrations\n app.db.create_all()\n except OperationalError as e:\n if not postgres:\n raise e\n else:\n print(\"Allowing error during app.db.create_all() due to Postgres\")\n\n # Insert data for plugin tables\n insertion(members)\n\n # Bring plugin tables up to head revision\n plugins = get_plugin_names()\n for plugin in plugins:\n plugin_upgrade(plugin_name=plugin)\n\n # Extracting files\n files = [f for f in backup.namelist() if f.startswith(\"uploads/\")]\n uploader = get_uploader()\n for f in files:\n filename = f.split(os.sep, 1)\n\n if (\n len(filename) < 2 or os.path.basename(filename[1]) == \"\"\n ): # just an empty uploads directory (e.g. uploads/) or any directory\n continue\n\n filename = filename[1] # Get the second entry in the list (the actual filename)\n source = backup.open(f)\n uploader.store(fileobj=source, filename=filename)\n\n # Alembic sqlite support is lacking so we should just create_all anyway\n try:\n migration_upgrade(revision=\"head\")\n except (OperationalError, CommandError, RuntimeError, SystemExit, Exception):\n app.db.create_all()\n stamp_latest_revision()\n\n try:\n if postgres:\n side_db.query(\"SET session_replication_role=DEFAULT;\")\n else:\n side_db.query(\"SET FOREIGN_KEY_CHECKS=1;\")\n except Exception:\n print(\"Failed to enable foreign key checks. Continuing.\")\n\n # Invalidate all cached data\n cache.clear()\n\n # Set default theme in case the current instance or the import does not provide it\n set_config(\"ctf_theme\", \"core\")\n set_config(\"ctf_version\", CTFD_VERSION)\n", "path": "CTFd/utils/exports/__init__.py"}]} | 3,903 | 213 |
gh_patches_debug_3566 | rasdani/github-patches | git_diff | sopel-irc__sopel-1417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reddit: support all reddit subdomains
#1397 adds support for links to `old.reddit.com`, but any two-letter subdomain of `reddit.com` is valid as well. Reddit uses these for internationalization (e.g. `it.reddit.com` -> Italian UI) and also to allow subreddits to add custom styles (a common example is using `np.reddit.com` to trigger a "no participation" stylesheet that hides voting arrows).
Sopel's reddit module should support these subdomains.
There are also really old three-letter subdomains (`pay`, `ssl`) that still work. Most three-letter combos I tried redirect to `www.reddit.com/r/combohere` though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/reddit.py`
Content:
```
1 # coding=utf-8
2 # Author: Elsie Powell, embolalia.com
3 from __future__ import unicode_literals, absolute_import, print_function, division
4
5 from sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP
6 from sopel.formatting import bold, color, colors
7 from sopel.web import USER_AGENT
8 from sopel.tools import SopelMemory, time
9 import datetime as dt
10 import praw
11 import re
12 import sys
13 if sys.version_info.major >= 3:
14 unicode = str
15 if sys.version_info.minor >= 4:
16 from html import unescape
17 else:
18 from html.parser import HTMLParser
19 unescape = HTMLParser().unescape
20 else:
21 from HTMLParser import HTMLParser
22 unescape = HTMLParser().unescape
23
24
25 domain = r'https?://(?:www\.|np\.|old\.)?reddit\.com'
26 post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain
27 user_url = r'%s/u(ser)?/([\w-]+)' % domain
28 post_regex = re.compile(post_url)
29 user_regex = re.compile(user_url)
30 spoiler_subs = [
31 'stevenuniverse',
32 'onepunchman',
33 ]
34
35
36 def setup(bot):
37 if not bot.memory.contains('url_callbacks'):
38 bot.memory['url_callbacks'] = SopelMemory()
39 bot.memory['url_callbacks'][post_regex] = rpost_info
40 bot.memory['url_callbacks'][user_regex] = redditor_info
41
42
43 def shutdown(bot):
44 del bot.memory['url_callbacks'][post_regex]
45 del bot.memory['url_callbacks'][user_regex]
46
47
48 @rule('.*%s.*' % post_url)
49 def rpost_info(bot, trigger, match=None):
50 match = match or trigger
51 try:
52 r = praw.Reddit(
53 user_agent=USER_AGENT,
54 client_id='6EiphT6SSQq7FQ',
55 client_secret=None,
56 )
57 s = r.submission(id=match.group(2))
58 except Exception:
59 r = praw.Reddit(user_agent=USER_AGENT)
60 s = r.get_submission(submission_id=match.group(2))
61
62 message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '
63 '{comments} comments | Posted by {author} | '
64 'Created at {created}')
65
66 subreddit = s.subreddit.display_name
67 if s.is_self:
68 link = '(self.{})'.format(subreddit)
69 else:
70 link = '({}) to r/{}'.format(s.url, subreddit)
71
72 if s.over_18:
73 if subreddit.lower() in spoiler_subs:
74 nsfw = bold(color(' [SPOILERS]', colors.RED))
75 else:
76 nsfw = bold(color(' [NSFW]', colors.RED))
77
78 sfw = bot.db.get_channel_value(trigger.sender, 'sfw')
79 if sfw:
80 link = '(link hidden)'
81 bot.write(['KICK', trigger.sender, trigger.nick,
82 'Linking to NSFW content in a SFW channel.'])
83 else:
84 nsfw = ''
85
86 if s.author:
87 author = s.author.name
88 else:
89 author = '[deleted]'
90
91 tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,
92 trigger.sender)
93 time_created = dt.datetime.utcfromtimestamp(s.created_utc)
94 created = time.format_time(bot.db, bot.config, tz, trigger.nick,
95 trigger.sender, time_created)
96
97 if s.score > 0:
98 point_color = colors.GREEN
99 else:
100 point_color = colors.RED
101
102 percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)
103
104 title = unescape(s.title)
105 message = message.format(
106 title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,
107 comments=s.num_comments, author=author, created=created)
108
109 bot.say(message)
110
111
112 # If you change this, you'll have to change some other things...
113 @commands('redditor')
114 @example('.redditor poem_for_your_sprog')
115 def redditor_info(bot, trigger, match=None):
116 """Show information about the given Redditor"""
117 commanded = re.match(bot.config.core.prefix + 'redditor', trigger)
118 r = praw.Reddit(
119 user_agent=USER_AGENT,
120 client_id='6EiphT6SSQq7FQ',
121 client_secret=None,
122 )
123 match = match or trigger
124 try:
125 u = r.get_redditor(match.group(2))
126 except Exception: # TODO: Be specific
127 if commanded:
128 bot.say('No such Redditor.')
129 return NOLIMIT
130 else:
131 return
132 # Fail silently if it wasn't an explicit command.
133
134 message = '[REDDITOR] ' + u.name
135 now = dt.datetime.utcnow()
136 cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)
137 cakeday_start = cakeday_start.replace(year=now.year)
138 day = dt.timedelta(days=1)
139 year_div_by_400 = now.year % 400 == 0
140 year_div_by_100 = now.year % 100 == 0
141 year_div_by_4 = now.year % 4 == 0
142 is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)
143 if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):
144 # If cake day is 2/29 and it's not a leap year, cake day is 1/3.
145 # Cake day begins at exact account creation time.
146 is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)
147 else:
148 is_cakeday = cakeday_start <= now <= cakeday_start + day
149
150 if is_cakeday:
151 message = message + ' | 13Cake day'
152 if commanded:
153 message = message + ' | https://reddit.com/u/' + u.name
154 if u.is_gold:
155 message = message + ' | 08Gold'
156 if u.is_mod:
157 message = message + ' | 05Mod'
158 message = message + (' | Link: ' + str(u.link_karma) +
159 ' | Comment: ' + str(u.comment_karma))
160
161 bot.say(message)
162
163
164 # If you change the groups here, you'll have to change some things above.
165 @rule('.*%s.*' % user_url)
166 def auto_redditor_info(bot, trigger):
167 redditor_info(bot, trigger)
168
169
170 @require_chanmsg('.setsfw is only permitted in channels')
171 @commands('setsafeforwork', 'setsfw')
172 @example('.setsfw true')
173 @example('.setsfw false')
174 def update_channel(bot, trigger):
175 """
176 Sets the Safe for Work status (true or false) for the current
177 channel. Defaults to false.
178 """
179 if bot.privileges[trigger.sender][trigger.nick] < OP:
180 return
181 else:
182 param = 'true'
183 if trigger.group(2) and trigger.group(3):
184 param = trigger.group(3).strip().lower()
185 sfw = param == 'true'
186 bot.db.set_channel_value(trigger.sender, 'sfw', sfw)
187 if sfw:
188 bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)
189 else:
190 bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)
191
192
193 @commands('getsafeforwork', 'getsfw')
194 @example('.getsfw [channel]')
195 def get_channel_sfw(bot, trigger):
196 """
197 Gets the preferred channel's Safe for Work status, or the current
198 channel's status if no channel given.
199 """
200 channel = trigger.group(2)
201 if not channel:
202 channel = trigger.sender
203 if channel.is_nick():
204 return bot.say('.getsfw with no channel param is only permitted in channels')
205
206 channel = channel.strip()
207
208 sfw = bot.db.get_channel_value(channel, 'sfw')
209 if sfw:
210 bot.say('%s is flagged as SFW' % channel)
211 else:
212 bot.say('%s is flagged as NSFW' % channel)
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py
--- a/sopel/modules/reddit.py
+++ b/sopel/modules/reddit.py
@@ -22,7 +22,7 @@
unescape = HTMLParser().unescape
-domain = r'https?://(?:www\.|np\.|old\.)?reddit\.com'
+domain = r'https?://(?:www\.|old\.|pay\.|ssl\.|[a-z]{2}\.)?reddit\.com'
post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain
user_url = r'%s/u(ser)?/([\w-]+)' % domain
post_regex = re.compile(post_url)
| {"golden_diff": "diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py\n--- a/sopel/modules/reddit.py\n+++ b/sopel/modules/reddit.py\n@@ -22,7 +22,7 @@\n unescape = HTMLParser().unescape\n \n \n-domain = r'https?://(?:www\\.|np\\.|old\\.)?reddit\\.com'\n+domain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\n post_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\n user_url = r'%s/u(ser)?/([\\w-]+)' % domain\n post_regex = re.compile(post_url)\n", "issue": "reddit: support all reddit subdomains\n#1397 adds support for links to `old.reddit.com`, but any two-letter subdomain of `reddit.com` is valid as well. Reddit uses these for internationalization (e.g. `it.reddit.com` -> Italian UI) and also to allow subreddits to add custom styles (a common example is using `np.reddit.com` to trigger a \"no participation\" stylesheet that hides voting arrows).\r\n\r\nSopel's reddit module should support these subdomains.\r\n\r\nThere are also really old three-letter subdomains (`pay`, `ssl`) that still work. Most three-letter combos I tried redirect to `www.reddit.com/r/combohere` though.\n", "before_files": [{"content": "# coding=utf-8\n# Author: Elsie Powell, embolalia.com\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\nfrom sopel.formatting import bold, color, colors\nfrom sopel.web import USER_AGENT\nfrom sopel.tools import SopelMemory, time\nimport datetime as dt\nimport praw\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n if sys.version_info.minor >= 4:\n from html import unescape\n else:\n from html.parser import HTMLParser\n unescape = HTMLParser().unescape\nelse:\n from HTMLParser import HTMLParser\n unescape = HTMLParser().unescape\n\n\ndomain = r'https?://(?:www\\.|np\\.|old\\.)?reddit\\.com'\npost_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\nuser_url = r'%s/u(ser)?/([\\w-]+)' % domain\npost_regex = re.compile(post_url)\nuser_regex = re.compile(user_url)\nspoiler_subs = [\n 'stevenuniverse',\n 'onepunchman',\n]\n\n\ndef setup(bot):\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = SopelMemory()\n bot.memory['url_callbacks'][post_regex] = rpost_info\n bot.memory['url_callbacks'][user_regex] = redditor_info\n\n\ndef shutdown(bot):\n del bot.memory['url_callbacks'][post_regex]\n del bot.memory['url_callbacks'][user_regex]\n\n\n@rule('.*%s.*' % post_url)\ndef rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n s = r.submission(id=match.group(2))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n s = r.get_submission(submission_id=match.group(2))\n\n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n 'Created at {created}')\n\n subreddit = s.subreddit.display_name\n if s.is_self:\n link = '(self.{})'.format(subreddit)\n else:\n link = '({}) to r/{}'.format(s.url, subreddit)\n\n if s.over_18:\n if subreddit.lower() in spoiler_subs:\n nsfw = bold(color(' [SPOILERS]', colors.RED))\n else:\n nsfw = bold(color(' [NSFW]', colors.RED))\n\n sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\n if sfw:\n link = '(link hidden)'\n bot.write(['KICK', trigger.sender, trigger.nick,\n 'Linking to NSFW content in a SFW channel.'])\n else:\n nsfw = ''\n\n if s.author:\n author = s.author.name\n else:\n author = '[deleted]'\n\n tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\n trigger.sender)\n time_created = dt.datetime.utcfromtimestamp(s.created_utc)\n created = time.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, time_created)\n\n if s.score > 0:\n point_color = colors.GREEN\n else:\n point_color = colors.RED\n\n percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\n\n title = unescape(s.title)\n message = message.format(\n title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\n comments=s.num_comments, author=author, created=created)\n\n bot.say(message)\n\n\n# If you change this, you'll have to change some other things...\n@commands('redditor')\n@example('.redditor poem_for_your_sprog')\ndef redditor_info(bot, trigger, match=None):\n \"\"\"Show information about the given Redditor\"\"\"\n commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n match = match or trigger\n try:\n u = r.get_redditor(match.group(2))\n except Exception: # TODO: Be specific\n if commanded:\n bot.say('No such Redditor.')\n return NOLIMIT\n else:\n return\n # Fail silently if it wasn't an explicit command.\n\n message = '[REDDITOR] ' + u.name\n now = dt.datetime.utcnow()\n cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\n cakeday_start = cakeday_start.replace(year=now.year)\n day = dt.timedelta(days=1)\n year_div_by_400 = now.year % 400 == 0\n year_div_by_100 = now.year % 100 == 0\n year_div_by_4 = now.year % 4 == 0\n is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\n if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\n # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\n # Cake day begins at exact account creation time.\n is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\n else:\n is_cakeday = cakeday_start <= now <= cakeday_start + day\n\n if is_cakeday:\n message = message + ' | \u0002\u000313Cake day\u0003\u0002'\n if commanded:\n message = message + ' | https://reddit.com/u/' + u.name\n if u.is_gold:\n message = message + ' | \u0002\u000308Gold\u0003\u0002'\n if u.is_mod:\n message = message + ' | \u0002\u000305Mod\u0003\u0002'\n message = message + (' | Link: ' + str(u.link_karma) +\n ' | Comment: ' + str(u.comment_karma))\n\n bot.say(message)\n\n\n# If you change the groups here, you'll have to change some things above.\n@rule('.*%s.*' % user_url)\ndef auto_redditor_info(bot, trigger):\n redditor_info(bot, trigger)\n\n\n@require_chanmsg('.setsfw is only permitted in channels')\n@commands('setsafeforwork', 'setsfw')\n@example('.setsfw true')\n@example('.setsfw false')\ndef update_channel(bot, trigger):\n \"\"\"\n Sets the Safe for Work status (true or false) for the current\n channel. Defaults to false.\n \"\"\"\n if bot.privileges[trigger.sender][trigger.nick] < OP:\n return\n else:\n param = 'true'\n if trigger.group(2) and trigger.group(3):\n param = trigger.group(3).strip().lower()\n sfw = param == 'true'\n bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\n if sfw:\n bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\n else:\n bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\n\n\n@commands('getsafeforwork', 'getsfw')\n@example('.getsfw [channel]')\ndef get_channel_sfw(bot, trigger):\n \"\"\"\n Gets the preferred channel's Safe for Work status, or the current\n channel's status if no channel given.\n \"\"\"\n channel = trigger.group(2)\n if not channel:\n channel = trigger.sender\n if channel.is_nick():\n return bot.say('.getsfw with no channel param is only permitted in channels')\n\n channel = channel.strip()\n\n sfw = bot.db.get_channel_value(channel, 'sfw')\n if sfw:\n bot.say('%s is flagged as SFW' % channel)\n else:\n bot.say('%s is flagged as NSFW' % channel)\n", "path": "sopel/modules/reddit.py"}], "after_files": [{"content": "# coding=utf-8\n# Author: Elsie Powell, embolalia.com\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\nfrom sopel.formatting import bold, color, colors\nfrom sopel.web import USER_AGENT\nfrom sopel.tools import SopelMemory, time\nimport datetime as dt\nimport praw\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n if sys.version_info.minor >= 4:\n from html import unescape\n else:\n from html.parser import HTMLParser\n unescape = HTMLParser().unescape\nelse:\n from HTMLParser import HTMLParser\n unescape = HTMLParser().unescape\n\n\ndomain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\npost_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\nuser_url = r'%s/u(ser)?/([\\w-]+)' % domain\npost_regex = re.compile(post_url)\nuser_regex = re.compile(user_url)\nspoiler_subs = [\n 'stevenuniverse',\n 'onepunchman',\n]\n\n\ndef setup(bot):\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = SopelMemory()\n bot.memory['url_callbacks'][post_regex] = rpost_info\n bot.memory['url_callbacks'][user_regex] = redditor_info\n\n\ndef shutdown(bot):\n del bot.memory['url_callbacks'][post_regex]\n del bot.memory['url_callbacks'][user_regex]\n\n\n@rule('.*%s.*' % post_url)\ndef rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n s = r.submission(id=match.group(2))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n s = r.get_submission(submission_id=match.group(2))\n\n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n 'Created at {created}')\n\n subreddit = s.subreddit.display_name\n if s.is_self:\n link = '(self.{})'.format(subreddit)\n else:\n link = '({}) to r/{}'.format(s.url, subreddit)\n\n if s.over_18:\n if subreddit.lower() in spoiler_subs:\n nsfw = bold(color(' [SPOILERS]', colors.RED))\n else:\n nsfw = bold(color(' [NSFW]', colors.RED))\n\n sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\n if sfw:\n link = '(link hidden)'\n bot.write(['KICK', trigger.sender, trigger.nick,\n 'Linking to NSFW content in a SFW channel.'])\n else:\n nsfw = ''\n\n if s.author:\n author = s.author.name\n else:\n author = '[deleted]'\n\n tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\n trigger.sender)\n time_created = dt.datetime.utcfromtimestamp(s.created_utc)\n created = time.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, time_created)\n\n if s.score > 0:\n point_color = colors.GREEN\n else:\n point_color = colors.RED\n\n percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\n\n title = unescape(s.title)\n message = message.format(\n title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\n comments=s.num_comments, author=author, created=created)\n\n bot.say(message)\n\n\n# If you change this, you'll have to change some other things...\n@commands('redditor')\n@example('.redditor poem_for_your_sprog')\ndef redditor_info(bot, trigger, match=None):\n \"\"\"Show information about the given Redditor\"\"\"\n commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n match = match or trigger\n try:\n u = r.get_redditor(match.group(2))\n except Exception: # TODO: Be specific\n if commanded:\n bot.say('No such Redditor.')\n return NOLIMIT\n else:\n return\n # Fail silently if it wasn't an explicit command.\n\n message = '[REDDITOR] ' + u.name\n now = dt.datetime.utcnow()\n cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\n cakeday_start = cakeday_start.replace(year=now.year)\n day = dt.timedelta(days=1)\n year_div_by_400 = now.year % 400 == 0\n year_div_by_100 = now.year % 100 == 0\n year_div_by_4 = now.year % 4 == 0\n is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\n if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\n # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\n # Cake day begins at exact account creation time.\n is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\n else:\n is_cakeday = cakeday_start <= now <= cakeday_start + day\n\n if is_cakeday:\n message = message + ' | \u0002\u000313Cake day\u0003\u0002'\n if commanded:\n message = message + ' | https://reddit.com/u/' + u.name\n if u.is_gold:\n message = message + ' | \u0002\u000308Gold\u0003\u0002'\n if u.is_mod:\n message = message + ' | \u0002\u000305Mod\u0003\u0002'\n message = message + (' | Link: ' + str(u.link_karma) +\n ' | Comment: ' + str(u.comment_karma))\n\n bot.say(message)\n\n\n# If you change the groups here, you'll have to change some things above.\n@rule('.*%s.*' % user_url)\ndef auto_redditor_info(bot, trigger):\n redditor_info(bot, trigger)\n\n\n@require_chanmsg('.setsfw is only permitted in channels')\n@commands('setsafeforwork', 'setsfw')\n@example('.setsfw true')\n@example('.setsfw false')\ndef update_channel(bot, trigger):\n \"\"\"\n Sets the Safe for Work status (true or false) for the current\n channel. Defaults to false.\n \"\"\"\n if bot.privileges[trigger.sender][trigger.nick] < OP:\n return\n else:\n param = 'true'\n if trigger.group(2) and trigger.group(3):\n param = trigger.group(3).strip().lower()\n sfw = param == 'true'\n bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\n if sfw:\n bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\n else:\n bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\n\n\n@commands('getsafeforwork', 'getsfw')\n@example('.getsfw [channel]')\ndef get_channel_sfw(bot, trigger):\n \"\"\"\n Gets the preferred channel's Safe for Work status, or the current\n channel's status if no channel given.\n \"\"\"\n channel = trigger.group(2)\n if not channel:\n channel = trigger.sender\n if channel.is_nick():\n return bot.say('.getsfw with no channel param is only permitted in channels')\n\n channel = channel.strip()\n\n sfw = bot.db.get_channel_value(channel, 'sfw')\n if sfw:\n bot.say('%s is flagged as SFW' % channel)\n else:\n bot.say('%s is flagged as NSFW' % channel)\n", "path": "sopel/modules/reddit.py"}]} | 2,795 | 163 |
gh_patches_debug_54382 | rasdani/github-patches | git_diff | conan-io__conan-4380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Virtualenv generator prepends duplicated values in env vars from profile
With a *conanfile.txt* empty and a profile with:
```
[env]
PREPEND_VAR = ['kk','pp']
```
Commands:
```
$ conan install . -g virtualenv
```
In activate.sh there is:
```
PREPEND_VAR="kk":"pp":"kk":"pp"
```
and in *activate.bat* you alaso have:
```
SET PREPEND_VAR=kk;pp;kk;pp
```
So at least variables that should be pretended are applied twice.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/loader.py`
Content:
```
1 import imp
2 import inspect
3 import os
4 import sys
5 import uuid
6
7 from conans.client.generators import registered_generators
8 from conans.client.loader_txt import ConanFileTextLoader
9 from conans.client.tools.files import chdir
10 from conans.errors import ConanException, NotFoundException
11 from conans.model.conan_file import ConanFile
12 from conans.model.conan_generator import Generator
13 from conans.model.options import OptionsValues
14 from conans.model.ref import ConanFileReference
15 from conans.model.settings import Settings
16 from conans.model.values import Values
17 from conans.util.files import load
18
19
20 class ProcessedProfile(object):
21 def __init__(self, profile, create_reference=None):
22 self._settings = profile.processed_settings
23 self._user_options = profile.options.copy()
24
25 self._package_settings = profile.package_settings_values
26 self._env_values = profile.env_values
27 # Make sure the paths are normalized first, so env_values can be just a copy
28 self._dev_reference = create_reference
29
30
31 class ConanFileLoader(object):
32 def __init__(self, runner, output, python_requires):
33 self._runner = runner
34 self._output = output
35 self._python_requires = python_requires
36 sys.modules["conans"].python_requires = python_requires
37 self.cached_conanfiles = {}
38
39 def invalidate_caches(self):
40 self.cached_conanfiles = {}
41 self._python_requires.invalidate_caches()
42
43 def load_class(self, conanfile_path):
44 try:
45 return self.cached_conanfiles[conanfile_path]
46 except KeyError:
47 self._python_requires.valid = True
48 _, conanfile = parse_conanfile(conanfile_path, self._python_requires)
49 self._python_requires.valid = False
50 self.cached_conanfiles[conanfile_path] = conanfile
51 return conanfile
52
53 def load_export(self, conanfile_path, name, version, user, channel):
54 conanfile = self.load_class(conanfile_path)
55 # Export does a check on existing name & version
56 if "name" in conanfile.__dict__:
57 if name and name != conanfile.name:
58 raise ConanException("Package recipe exported with name %s!=%s"
59 % (name, conanfile.name))
60 elif not name:
61 raise ConanException("conanfile didn't specify name")
62 else:
63 conanfile.name = name
64
65 if "version" in conanfile.__dict__:
66 if version and version != conanfile.version:
67 raise ConanException("Package recipe exported with version %s!=%s"
68 % (version, conanfile.version))
69 elif not version:
70 raise ConanException("conanfile didn't specify version")
71 else:
72 conanfile.version = version
73 ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)
74 return conanfile(self._output, self._runner, str(ref), user, channel)
75
76 @staticmethod
77 def _initialize_conanfile(conanfile, processed_profile):
78 # Prepare the settings for the loaded conanfile
79 # Mixing the global settings with the specified for that name if exist
80 tmp_settings = processed_profile._settings.copy()
81 if (processed_profile._package_settings and
82 conanfile.name in processed_profile._package_settings):
83 # Update the values, keeping old ones (confusing assign)
84 values_tuple = processed_profile._package_settings[conanfile.name]
85 tmp_settings.values = Values.from_list(values_tuple)
86
87 conanfile.initialize(tmp_settings, processed_profile._env_values)
88
89 def load_consumer(self, conanfile_path, processed_profile, name=None, version=None, user=None,
90 channel=None, test=None):
91
92 conanfile_class = self.load_class(conanfile_path)
93 if name and conanfile_class.name and name != conanfile_class.name:
94 raise ConanException("Package recipe name %s!=%s" % (name, conanfile_class.name))
95 if version and conanfile_class.version and version != conanfile_class.version:
96 raise ConanException("Package recipe version %s!=%s"
97 % (version, conanfile_class.version))
98 conanfile_class.name = name or conanfile_class.name
99 conanfile_class.version = version or conanfile_class.version
100 if test:
101 display_name = "%s (test package)" % test
102 else:
103 ref = ConanFileReference(conanfile_class.name, conanfile_class.version, user, channel,
104 validate=False)
105 if ref.name or ref.version or ref.user or ref.channel:
106 display_name = "%s (%s)" % (os.path.basename(conanfile_path), ref)
107 else:
108 display_name = os.path.basename(conanfile_path)
109 conanfile = conanfile_class(self._output, self._runner, display_name, user, channel)
110 conanfile.in_local_cache = False
111 try:
112 self._initialize_conanfile(conanfile, processed_profile)
113
114 # The consumer specific
115 conanfile.develop = True
116 processed_profile._user_options.descope_options(conanfile.name)
117 conanfile.options.initialize_upstream(processed_profile._user_options,
118 name=conanfile.name)
119 processed_profile._user_options.clear_unscoped_options()
120
121 return conanfile
122 except Exception as e: # re-raise with file name
123 raise ConanException("%s: %s" % (conanfile_path, str(e)))
124
125 def load_conanfile(self, conanfile_path, processed_profile, ref):
126 conanfile_class = self.load_class(conanfile_path)
127 conanfile_class.name = ref.name
128 conanfile_class.version = ref.version
129 conanfile = conanfile_class(self._output, self._runner, str(ref), ref.user, ref.channel)
130 if processed_profile._dev_reference and processed_profile._dev_reference == ref:
131 conanfile.develop = True
132 try:
133 self._initialize_conanfile(conanfile, processed_profile)
134 return conanfile
135 except Exception as e: # re-raise with file name
136 raise ConanException("%s: %s" % (conanfile_path, str(e)))
137
138 def load_conanfile_txt(self, conan_txt_path, processed_profile, ref=None):
139 if not os.path.exists(conan_txt_path):
140 raise NotFoundException("Conanfile not found!")
141
142 contents = load(conan_txt_path)
143 path, basename = os.path.split(conan_txt_path)
144 display_name = "%s (%s)" % (basename, ref) if ref and ref.name else basename
145 conanfile = self._parse_conan_txt(contents, path, display_name, processed_profile)
146 return conanfile
147
148 def _parse_conan_txt(self, contents, path, display_name, processed_profile):
149 conanfile = ConanFile(self._output, self._runner, display_name)
150 conanfile.initialize(Settings(), processed_profile._env_values)
151 # It is necessary to copy the settings, because the above is only a constraint of
152 # conanfile settings, and a txt doesn't define settings. Necessary for generators,
153 # as cmake_multi, that check build_type.
154 conanfile.settings = processed_profile._settings.copy_values()
155
156 try:
157 parser = ConanFileTextLoader(contents)
158 except Exception as e:
159 raise ConanException("%s:\n%s" % (path, str(e)))
160 for reference in parser.requirements:
161 ConanFileReference.loads(reference) # Raise if invalid
162 conanfile.requires.add(reference)
163 for build_reference in parser.build_requirements:
164 ConanFileReference.loads(build_reference)
165 if not hasattr(conanfile, "build_requires"):
166 conanfile.build_requires = []
167 conanfile.build_requires.append(build_reference)
168
169 conanfile.generators = parser.generators
170
171 options = OptionsValues.loads(parser.options)
172 conanfile.options.values = options
173 conanfile.options.initialize_upstream(processed_profile._user_options)
174
175 # imports method
176 conanfile.imports = parser.imports_method(conanfile)
177 conanfile._conan_env_values.update(processed_profile._env_values)
178 return conanfile
179
180 def load_virtual(self, references, processed_profile, scope_options=True,
181 build_requires_options=None):
182 # If user don't specify namespace in options, assume that it is
183 # for the reference (keep compatibility)
184 conanfile = ConanFile(self._output, self._runner, display_name="virtual")
185 conanfile.initialize(processed_profile._settings.copy(), processed_profile._env_values)
186 conanfile.settings = processed_profile._settings.copy_values()
187
188 for reference in references:
189 conanfile.requires.add(reference.full_repr()) # Convert to string necessary
190 # Allows options without package namespace in conan install commands:
191 # conan install zlib/1.2.8@lasote/stable -o shared=True
192 if scope_options:
193 assert len(references) == 1
194 processed_profile._user_options.scope_options(references[0].name)
195 if build_requires_options:
196 conanfile.options.initialize_upstream(build_requires_options)
197 else:
198 conanfile.options.initialize_upstream(processed_profile._user_options)
199
200 conanfile.generators = [] # remove the default txt generator
201 return conanfile
202
203
204 def _parse_module(conanfile_module, module_id):
205 """ Parses a python in-memory module, to extract the classes, mainly the main
206 class defining the Recipe, but also process possible existing generators
207 @param conanfile_module: the module to be processed
208 @return: the main ConanFile class from the module
209 """
210 result = None
211 for name, attr in conanfile_module.__dict__.items():
212 if (name.startswith("_") or not inspect.isclass(attr) or
213 attr.__dict__.get("__module__") != module_id):
214 continue
215
216 if issubclass(attr, ConanFile) and attr != ConanFile:
217 if result is None:
218 result = attr
219 else:
220 raise ConanException("More than 1 conanfile in the file")
221 elif issubclass(attr, Generator) and attr != Generator:
222 registered_generators.add(attr.__name__, attr, custom=True)
223
224 if result is None:
225 raise ConanException("No subclass of ConanFile")
226
227 return result
228
229
230 def parse_conanfile(conanfile_path, python_requires):
231 with python_requires.capture_requires() as py_requires:
232 module, filename = _parse_conanfile(conanfile_path)
233 try:
234 conanfile = _parse_module(module, filename)
235 conanfile.python_requires = py_requires
236 return module, conanfile
237 except Exception as e: # re-raise with file name
238 raise ConanException("%s: %s" % (conanfile_path, str(e)))
239
240
241 def _parse_conanfile(conan_file_path):
242 """ From a given path, obtain the in memory python import module
243 """
244
245 if not os.path.exists(conan_file_path):
246 raise NotFoundException("%s not found!" % conan_file_path)
247
248 module_id = str(uuid.uuid1())
249 current_dir = os.path.dirname(conan_file_path)
250 sys.path.insert(0, current_dir)
251 try:
252 old_modules = list(sys.modules.keys())
253 with chdir(current_dir):
254 sys.dont_write_bytecode = True
255 loaded = imp.load_source(module_id, conan_file_path)
256 sys.dont_write_bytecode = False
257
258 # These lines are necessary, otherwise local conanfile imports with same name
259 # collide, but no error, and overwrite other packages imports!!
260 added_modules = set(sys.modules).difference(old_modules)
261 for added in added_modules:
262 module = sys.modules[added]
263 if module:
264 try:
265 folder = os.path.dirname(module.__file__)
266 except AttributeError: # some module doesn't have __file__
267 pass
268 else:
269 if folder.startswith(current_dir):
270 module = sys.modules.pop(added)
271 sys.modules["%s.%s" % (module_id, added)] = module
272 except Exception:
273 import traceback
274 trace = traceback.format_exc().split('\n')
275 raise ConanException("Unable to load conanfile in %s\n%s" % (conan_file_path,
276 '\n'.join(trace[3:])))
277 finally:
278 sys.path.pop(0)
279
280 return loaded, module_id
281
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/loader.py b/conans/client/loader.py
--- a/conans/client/loader.py
+++ b/conans/client/loader.py
@@ -174,7 +174,6 @@
# imports method
conanfile.imports = parser.imports_method(conanfile)
- conanfile._conan_env_values.update(processed_profile._env_values)
return conanfile
def load_virtual(self, references, processed_profile, scope_options=True,
| {"golden_diff": "diff --git a/conans/client/loader.py b/conans/client/loader.py\n--- a/conans/client/loader.py\n+++ b/conans/client/loader.py\n@@ -174,7 +174,6 @@\n \n # imports method\n conanfile.imports = parser.imports_method(conanfile)\n- conanfile._conan_env_values.update(processed_profile._env_values)\n return conanfile\n \n def load_virtual(self, references, processed_profile, scope_options=True,\n", "issue": "Virtualenv generator prepends duplicated values in env vars from profile\nWith a *conanfile.txt* empty and a profile with:\r\n\r\n```\r\n[env]\r\nPREPEND_VAR = ['kk','pp']\r\n```\r\n\r\nCommands:\r\n\r\n```\r\n$ conan install . -g virtualenv\r\n```\r\n\r\nIn activate.sh there is:\r\n```\r\nPREPEND_VAR=\"kk\":\"pp\":\"kk\":\"pp\"\r\n```\r\nand in *activate.bat* you alaso have:\r\n```\r\nSET PREPEND_VAR=kk;pp;kk;pp\r\n```\r\n\r\nSo at least variables that should be pretended are applied twice.\r\n\r\n\n", "before_files": [{"content": "import imp\nimport inspect\nimport os\nimport sys\nimport uuid\n\nfrom conans.client.generators import registered_generators\nfrom conans.client.loader_txt import ConanFileTextLoader\nfrom conans.client.tools.files import chdir\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.model.conan_file import ConanFile\nfrom conans.model.conan_generator import Generator\nfrom conans.model.options import OptionsValues\nfrom conans.model.ref import ConanFileReference\nfrom conans.model.settings import Settings\nfrom conans.model.values import Values\nfrom conans.util.files import load\n\n\nclass ProcessedProfile(object):\n def __init__(self, profile, create_reference=None):\n self._settings = profile.processed_settings\n self._user_options = profile.options.copy()\n\n self._package_settings = profile.package_settings_values\n self._env_values = profile.env_values\n # Make sure the paths are normalized first, so env_values can be just a copy\n self._dev_reference = create_reference\n\n\nclass ConanFileLoader(object):\n def __init__(self, runner, output, python_requires):\n self._runner = runner\n self._output = output\n self._python_requires = python_requires\n sys.modules[\"conans\"].python_requires = python_requires\n self.cached_conanfiles = {}\n\n def invalidate_caches(self):\n self.cached_conanfiles = {}\n self._python_requires.invalidate_caches()\n\n def load_class(self, conanfile_path):\n try:\n return self.cached_conanfiles[conanfile_path]\n except KeyError:\n self._python_requires.valid = True\n _, conanfile = parse_conanfile(conanfile_path, self._python_requires)\n self._python_requires.valid = False\n self.cached_conanfiles[conanfile_path] = conanfile\n return conanfile\n\n def load_export(self, conanfile_path, name, version, user, channel):\n conanfile = self.load_class(conanfile_path)\n # Export does a check on existing name & version\n if \"name\" in conanfile.__dict__:\n if name and name != conanfile.name:\n raise ConanException(\"Package recipe exported with name %s!=%s\"\n % (name, conanfile.name))\n elif not name:\n raise ConanException(\"conanfile didn't specify name\")\n else:\n conanfile.name = name\n\n if \"version\" in conanfile.__dict__:\n if version and version != conanfile.version:\n raise ConanException(\"Package recipe exported with version %s!=%s\"\n % (version, conanfile.version))\n elif not version:\n raise ConanException(\"conanfile didn't specify version\")\n else:\n conanfile.version = version\n ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)\n return conanfile(self._output, self._runner, str(ref), user, channel)\n\n @staticmethod\n def _initialize_conanfile(conanfile, processed_profile):\n # Prepare the settings for the loaded conanfile\n # Mixing the global settings with the specified for that name if exist\n tmp_settings = processed_profile._settings.copy()\n if (processed_profile._package_settings and\n conanfile.name in processed_profile._package_settings):\n # Update the values, keeping old ones (confusing assign)\n values_tuple = processed_profile._package_settings[conanfile.name]\n tmp_settings.values = Values.from_list(values_tuple)\n\n conanfile.initialize(tmp_settings, processed_profile._env_values)\n\n def load_consumer(self, conanfile_path, processed_profile, name=None, version=None, user=None,\n channel=None, test=None):\n\n conanfile_class = self.load_class(conanfile_path)\n if name and conanfile_class.name and name != conanfile_class.name:\n raise ConanException(\"Package recipe name %s!=%s\" % (name, conanfile_class.name))\n if version and conanfile_class.version and version != conanfile_class.version:\n raise ConanException(\"Package recipe version %s!=%s\"\n % (version, conanfile_class.version))\n conanfile_class.name = name or conanfile_class.name\n conanfile_class.version = version or conanfile_class.version\n if test:\n display_name = \"%s (test package)\" % test\n else:\n ref = ConanFileReference(conanfile_class.name, conanfile_class.version, user, channel,\n validate=False)\n if ref.name or ref.version or ref.user or ref.channel:\n display_name = \"%s (%s)\" % (os.path.basename(conanfile_path), ref)\n else:\n display_name = os.path.basename(conanfile_path)\n conanfile = conanfile_class(self._output, self._runner, display_name, user, channel)\n conanfile.in_local_cache = False\n try:\n self._initialize_conanfile(conanfile, processed_profile)\n\n # The consumer specific\n conanfile.develop = True\n processed_profile._user_options.descope_options(conanfile.name)\n conanfile.options.initialize_upstream(processed_profile._user_options,\n name=conanfile.name)\n processed_profile._user_options.clear_unscoped_options()\n\n return conanfile\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n def load_conanfile(self, conanfile_path, processed_profile, ref):\n conanfile_class = self.load_class(conanfile_path)\n conanfile_class.name = ref.name\n conanfile_class.version = ref.version\n conanfile = conanfile_class(self._output, self._runner, str(ref), ref.user, ref.channel)\n if processed_profile._dev_reference and processed_profile._dev_reference == ref:\n conanfile.develop = True\n try:\n self._initialize_conanfile(conanfile, processed_profile)\n return conanfile\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n def load_conanfile_txt(self, conan_txt_path, processed_profile, ref=None):\n if not os.path.exists(conan_txt_path):\n raise NotFoundException(\"Conanfile not found!\")\n\n contents = load(conan_txt_path)\n path, basename = os.path.split(conan_txt_path)\n display_name = \"%s (%s)\" % (basename, ref) if ref and ref.name else basename\n conanfile = self._parse_conan_txt(contents, path, display_name, processed_profile)\n return conanfile\n\n def _parse_conan_txt(self, contents, path, display_name, processed_profile):\n conanfile = ConanFile(self._output, self._runner, display_name)\n conanfile.initialize(Settings(), processed_profile._env_values)\n # It is necessary to copy the settings, because the above is only a constraint of\n # conanfile settings, and a txt doesn't define settings. Necessary for generators,\n # as cmake_multi, that check build_type.\n conanfile.settings = processed_profile._settings.copy_values()\n\n try:\n parser = ConanFileTextLoader(contents)\n except Exception as e:\n raise ConanException(\"%s:\\n%s\" % (path, str(e)))\n for reference in parser.requirements:\n ConanFileReference.loads(reference) # Raise if invalid\n conanfile.requires.add(reference)\n for build_reference in parser.build_requirements:\n ConanFileReference.loads(build_reference)\n if not hasattr(conanfile, \"build_requires\"):\n conanfile.build_requires = []\n conanfile.build_requires.append(build_reference)\n\n conanfile.generators = parser.generators\n\n options = OptionsValues.loads(parser.options)\n conanfile.options.values = options\n conanfile.options.initialize_upstream(processed_profile._user_options)\n\n # imports method\n conanfile.imports = parser.imports_method(conanfile)\n conanfile._conan_env_values.update(processed_profile._env_values)\n return conanfile\n\n def load_virtual(self, references, processed_profile, scope_options=True,\n build_requires_options=None):\n # If user don't specify namespace in options, assume that it is\n # for the reference (keep compatibility)\n conanfile = ConanFile(self._output, self._runner, display_name=\"virtual\")\n conanfile.initialize(processed_profile._settings.copy(), processed_profile._env_values)\n conanfile.settings = processed_profile._settings.copy_values()\n\n for reference in references:\n conanfile.requires.add(reference.full_repr()) # Convert to string necessary\n # Allows options without package namespace in conan install commands:\n # conan install zlib/1.2.8@lasote/stable -o shared=True\n if scope_options:\n assert len(references) == 1\n processed_profile._user_options.scope_options(references[0].name)\n if build_requires_options:\n conanfile.options.initialize_upstream(build_requires_options)\n else:\n conanfile.options.initialize_upstream(processed_profile._user_options)\n\n conanfile.generators = [] # remove the default txt generator\n return conanfile\n\n\ndef _parse_module(conanfile_module, module_id):\n \"\"\" Parses a python in-memory module, to extract the classes, mainly the main\n class defining the Recipe, but also process possible existing generators\n @param conanfile_module: the module to be processed\n @return: the main ConanFile class from the module\n \"\"\"\n result = None\n for name, attr in conanfile_module.__dict__.items():\n if (name.startswith(\"_\") or not inspect.isclass(attr) or\n attr.__dict__.get(\"__module__\") != module_id):\n continue\n\n if issubclass(attr, ConanFile) and attr != ConanFile:\n if result is None:\n result = attr\n else:\n raise ConanException(\"More than 1 conanfile in the file\")\n elif issubclass(attr, Generator) and attr != Generator:\n registered_generators.add(attr.__name__, attr, custom=True)\n\n if result is None:\n raise ConanException(\"No subclass of ConanFile\")\n\n return result\n\n\ndef parse_conanfile(conanfile_path, python_requires):\n with python_requires.capture_requires() as py_requires:\n module, filename = _parse_conanfile(conanfile_path)\n try:\n conanfile = _parse_module(module, filename)\n conanfile.python_requires = py_requires\n return module, conanfile\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n\ndef _parse_conanfile(conan_file_path):\n \"\"\" From a given path, obtain the in memory python import module\n \"\"\"\n\n if not os.path.exists(conan_file_path):\n raise NotFoundException(\"%s not found!\" % conan_file_path)\n\n module_id = str(uuid.uuid1())\n current_dir = os.path.dirname(conan_file_path)\n sys.path.insert(0, current_dir)\n try:\n old_modules = list(sys.modules.keys())\n with chdir(current_dir):\n sys.dont_write_bytecode = True\n loaded = imp.load_source(module_id, conan_file_path)\n sys.dont_write_bytecode = False\n\n # These lines are necessary, otherwise local conanfile imports with same name\n # collide, but no error, and overwrite other packages imports!!\n added_modules = set(sys.modules).difference(old_modules)\n for added in added_modules:\n module = sys.modules[added]\n if module:\n try:\n folder = os.path.dirname(module.__file__)\n except AttributeError: # some module doesn't have __file__\n pass\n else:\n if folder.startswith(current_dir):\n module = sys.modules.pop(added)\n sys.modules[\"%s.%s\" % (module_id, added)] = module\n except Exception:\n import traceback\n trace = traceback.format_exc().split('\\n')\n raise ConanException(\"Unable to load conanfile in %s\\n%s\" % (conan_file_path,\n '\\n'.join(trace[3:])))\n finally:\n sys.path.pop(0)\n\n return loaded, module_id\n", "path": "conans/client/loader.py"}], "after_files": [{"content": "import imp\nimport inspect\nimport os\nimport sys\nimport uuid\n\nfrom conans.client.generators import registered_generators\nfrom conans.client.loader_txt import ConanFileTextLoader\nfrom conans.client.tools.files import chdir\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.model.conan_file import ConanFile\nfrom conans.model.conan_generator import Generator\nfrom conans.model.options import OptionsValues\nfrom conans.model.ref import ConanFileReference\nfrom conans.model.settings import Settings\nfrom conans.model.values import Values\nfrom conans.util.files import load\n\n\nclass ProcessedProfile(object):\n def __init__(self, profile, create_reference=None):\n self._settings = profile.processed_settings\n self._user_options = profile.options.copy()\n\n self._package_settings = profile.package_settings_values\n self._env_values = profile.env_values\n # Make sure the paths are normalized first, so env_values can be just a copy\n self._dev_reference = create_reference\n\n\nclass ConanFileLoader(object):\n def __init__(self, runner, output, python_requires):\n self._runner = runner\n self._output = output\n self._python_requires = python_requires\n sys.modules[\"conans\"].python_requires = python_requires\n self.cached_conanfiles = {}\n\n def invalidate_caches(self):\n self.cached_conanfiles = {}\n self._python_requires.invalidate_caches()\n\n def load_class(self, conanfile_path):\n try:\n return self.cached_conanfiles[conanfile_path]\n except KeyError:\n self._python_requires.valid = True\n _, conanfile = parse_conanfile(conanfile_path, self._python_requires)\n self._python_requires.valid = False\n self.cached_conanfiles[conanfile_path] = conanfile\n return conanfile\n\n def load_export(self, conanfile_path, name, version, user, channel):\n conanfile = self.load_class(conanfile_path)\n # Export does a check on existing name & version\n if \"name\" in conanfile.__dict__:\n if name and name != conanfile.name:\n raise ConanException(\"Package recipe exported with name %s!=%s\"\n % (name, conanfile.name))\n elif not name:\n raise ConanException(\"conanfile didn't specify name\")\n else:\n conanfile.name = name\n\n if \"version\" in conanfile.__dict__:\n if version and version != conanfile.version:\n raise ConanException(\"Package recipe exported with version %s!=%s\"\n % (version, conanfile.version))\n elif not version:\n raise ConanException(\"conanfile didn't specify version\")\n else:\n conanfile.version = version\n ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)\n return conanfile(self._output, self._runner, str(ref), user, channel)\n\n @staticmethod\n def _initialize_conanfile(conanfile, processed_profile):\n # Prepare the settings for the loaded conanfile\n # Mixing the global settings with the specified for that name if exist\n tmp_settings = processed_profile._settings.copy()\n if (processed_profile._package_settings and\n conanfile.name in processed_profile._package_settings):\n # Update the values, keeping old ones (confusing assign)\n values_tuple = processed_profile._package_settings[conanfile.name]\n tmp_settings.values = Values.from_list(values_tuple)\n\n conanfile.initialize(tmp_settings, processed_profile._env_values)\n\n def load_consumer(self, conanfile_path, processed_profile, name=None, version=None, user=None,\n channel=None, test=None):\n\n conanfile_class = self.load_class(conanfile_path)\n if name and conanfile_class.name and name != conanfile_class.name:\n raise ConanException(\"Package recipe name %s!=%s\" % (name, conanfile_class.name))\n if version and conanfile_class.version and version != conanfile_class.version:\n raise ConanException(\"Package recipe version %s!=%s\"\n % (version, conanfile_class.version))\n conanfile_class.name = name or conanfile_class.name\n conanfile_class.version = version or conanfile_class.version\n if test:\n display_name = \"%s (test package)\" % test\n else:\n ref = ConanFileReference(conanfile_class.name, conanfile_class.version, user, channel,\n validate=False)\n if ref.name or ref.version or ref.user or ref.channel:\n display_name = \"%s (%s)\" % (os.path.basename(conanfile_path), ref)\n else:\n display_name = os.path.basename(conanfile_path)\n conanfile = conanfile_class(self._output, self._runner, display_name, user, channel)\n conanfile.in_local_cache = False\n try:\n self._initialize_conanfile(conanfile, processed_profile)\n\n # The consumer specific\n conanfile.develop = True\n processed_profile._user_options.descope_options(conanfile.name)\n conanfile.options.initialize_upstream(processed_profile._user_options,\n name=conanfile.name)\n processed_profile._user_options.clear_unscoped_options()\n\n return conanfile\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n def load_conanfile(self, conanfile_path, processed_profile, ref):\n conanfile_class = self.load_class(conanfile_path)\n conanfile_class.name = ref.name\n conanfile_class.version = ref.version\n conanfile = conanfile_class(self._output, self._runner, str(ref), ref.user, ref.channel)\n if processed_profile._dev_reference and processed_profile._dev_reference == ref:\n conanfile.develop = True\n try:\n self._initialize_conanfile(conanfile, processed_profile)\n return conanfile\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n def load_conanfile_txt(self, conan_txt_path, processed_profile, ref=None):\n if not os.path.exists(conan_txt_path):\n raise NotFoundException(\"Conanfile not found!\")\n\n contents = load(conan_txt_path)\n path, basename = os.path.split(conan_txt_path)\n display_name = \"%s (%s)\" % (basename, ref) if ref and ref.name else basename\n conanfile = self._parse_conan_txt(contents, path, display_name, processed_profile)\n return conanfile\n\n def _parse_conan_txt(self, contents, path, display_name, processed_profile):\n conanfile = ConanFile(self._output, self._runner, display_name)\n conanfile.initialize(Settings(), processed_profile._env_values)\n # It is necessary to copy the settings, because the above is only a constraint of\n # conanfile settings, and a txt doesn't define settings. Necessary for generators,\n # as cmake_multi, that check build_type.\n conanfile.settings = processed_profile._settings.copy_values()\n\n try:\n parser = ConanFileTextLoader(contents)\n except Exception as e:\n raise ConanException(\"%s:\\n%s\" % (path, str(e)))\n for reference in parser.requirements:\n ConanFileReference.loads(reference) # Raise if invalid\n conanfile.requires.add(reference)\n for build_reference in parser.build_requirements:\n ConanFileReference.loads(build_reference)\n if not hasattr(conanfile, \"build_requires\"):\n conanfile.build_requires = []\n conanfile.build_requires.append(build_reference)\n\n conanfile.generators = parser.generators\n\n options = OptionsValues.loads(parser.options)\n conanfile.options.values = options\n conanfile.options.initialize_upstream(processed_profile._user_options)\n\n # imports method\n conanfile.imports = parser.imports_method(conanfile)\n return conanfile\n\n def load_virtual(self, references, processed_profile, scope_options=True,\n build_requires_options=None):\n # If user don't specify namespace in options, assume that it is\n # for the reference (keep compatibility)\n conanfile = ConanFile(self._output, self._runner, display_name=\"virtual\")\n conanfile.initialize(processed_profile._settings.copy(), processed_profile._env_values)\n conanfile.settings = processed_profile._settings.copy_values()\n\n for reference in references:\n conanfile.requires.add(reference.full_repr()) # Convert to string necessary\n # Allows options without package namespace in conan install commands:\n # conan install zlib/1.2.8@lasote/stable -o shared=True\n if scope_options:\n assert len(references) == 1\n processed_profile._user_options.scope_options(references[0].name)\n if build_requires_options:\n conanfile.options.initialize_upstream(build_requires_options)\n else:\n conanfile.options.initialize_upstream(processed_profile._user_options)\n\n conanfile.generators = [] # remove the default txt generator\n return conanfile\n\n\ndef _parse_module(conanfile_module, module_id):\n \"\"\" Parses a python in-memory module, to extract the classes, mainly the main\n class defining the Recipe, but also process possible existing generators\n @param conanfile_module: the module to be processed\n @return: the main ConanFile class from the module\n \"\"\"\n result = None\n for name, attr in conanfile_module.__dict__.items():\n if (name.startswith(\"_\") or not inspect.isclass(attr) or\n attr.__dict__.get(\"__module__\") != module_id):\n continue\n\n if issubclass(attr, ConanFile) and attr != ConanFile:\n if result is None:\n result = attr\n else:\n raise ConanException(\"More than 1 conanfile in the file\")\n elif issubclass(attr, Generator) and attr != Generator:\n registered_generators.add(attr.__name__, attr, custom=True)\n\n if result is None:\n raise ConanException(\"No subclass of ConanFile\")\n\n return result\n\n\ndef parse_conanfile(conanfile_path, python_requires):\n with python_requires.capture_requires() as py_requires:\n module, filename = _parse_conanfile(conanfile_path)\n try:\n conanfile = _parse_module(module, filename)\n conanfile.python_requires = py_requires\n return module, conanfile\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n\ndef _parse_conanfile(conan_file_path):\n \"\"\" From a given path, obtain the in memory python import module\n \"\"\"\n\n if not os.path.exists(conan_file_path):\n raise NotFoundException(\"%s not found!\" % conan_file_path)\n\n module_id = str(uuid.uuid1())\n current_dir = os.path.dirname(conan_file_path)\n sys.path.insert(0, current_dir)\n try:\n old_modules = list(sys.modules.keys())\n with chdir(current_dir):\n sys.dont_write_bytecode = True\n loaded = imp.load_source(module_id, conan_file_path)\n sys.dont_write_bytecode = False\n\n # These lines are necessary, otherwise local conanfile imports with same name\n # collide, but no error, and overwrite other packages imports!!\n added_modules = set(sys.modules).difference(old_modules)\n for added in added_modules:\n module = sys.modules[added]\n if module:\n try:\n folder = os.path.dirname(module.__file__)\n except AttributeError: # some module doesn't have __file__\n pass\n else:\n if folder.startswith(current_dir):\n module = sys.modules.pop(added)\n sys.modules[\"%s.%s\" % (module_id, added)] = module\n except Exception:\n import traceback\n trace = traceback.format_exc().split('\\n')\n raise ConanException(\"Unable to load conanfile in %s\\n%s\" % (conan_file_path,\n '\\n'.join(trace[3:])))\n finally:\n sys.path.pop(0)\n\n return loaded, module_id\n", "path": "conans/client/loader.py"}]} | 3,824 | 106 |
gh_patches_debug_47927 | rasdani/github-patches | git_diff | uccser__cs-unplugged-885 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor adjustments to navbar and homepage
## Navbar
- [x] There should be more space between logo and 'Topics'.
- [x] The search bar can be ~20% smaller.
## Hompeage
- [x] Navbar should be transparent and fade in when user scrolls down.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/general/views.py`
Content:
```
1 """Views for the general application."""
2
3 from django.views.generic import TemplateView
4 from django.http import HttpResponse
5
6
7 class GeneralIndexView(TemplateView):
8 """View for the homepage that renders from a template."""
9
10 template_name = "general/index.html"
11
12
13 class GeneralAboutView(TemplateView):
14 """View for the about page that renders from a template."""
15
16 template_name = "general/about.html"
17
18
19 class GeneralContactView(TemplateView):
20 """View for the contact page that renders from a template."""
21
22 template_name = "general/contact.html"
23
24
25 class GeneralPeopleView(TemplateView):
26 """View for the people page that renders from a template."""
27
28 template_name = "general/people.html"
29
30
31 class GeneralPrinciplesView(TemplateView):
32 """View for the princples page that renders from a template."""
33
34 template_name = "general/principles.html"
35
36
37 class WhatIsCSView(TemplateView):
38 """View for the 'What is Computer Science?' page that renders from a template."""
39
40 template_name = "general/what-is-computer-science.html"
41
42
43 class ComputationalThinkingView(TemplateView):
44 """View for the Computational Thinking page that renders from a template."""
45
46 template_name = "general/computational-thinking.html"
47
48
49 class HowDoITeachCSUnpluggedView(TemplateView):
50 """View for the 'How do I teach CS Unplugged?' page that renders from a template."""
51
52 template_name = "general/how-do-i-teach-cs-unplugged.html"
53
54
55 def health_check(request):
56 """Return heath check response for Google App Engine.
57
58 Returns a 200 HTTP response for Google App Engine to detect the system
59 is running.
60 """
61 return HttpResponse(status=200)
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -9,6 +9,16 @@
template_name = "general/index.html"
+ def get_context_data(self, **kwargs):
+ """Provide the context data for the homepage.
+
+ Returns:
+ Dictionary of context data.
+ """
+ context = super(GeneralIndexView, self).get_context_data(**kwargs)
+ context["homepage"] = True
+ return context
+
class GeneralAboutView(TemplateView):
"""View for the about page that renders from a template."""
| {"golden_diff": "diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -9,6 +9,16 @@\n \n template_name = \"general/index.html\"\n \n+ def get_context_data(self, **kwargs):\n+ \"\"\"Provide the context data for the homepage.\n+\n+ Returns:\n+ Dictionary of context data.\n+ \"\"\"\n+ context = super(GeneralIndexView, self).get_context_data(**kwargs)\n+ context[\"homepage\"] = True\n+ return context\n+\n \n class GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n", "issue": "Minor adjustments to navbar and homepage\n## Navbar\r\n\r\n- [x] There should be more space between logo and 'Topics'.\r\n- [x] The search bar can be ~20% smaller.\r\n\r\n## Hompeage\r\n\r\n- [x] Navbar should be transparent and fade in when user scrolls down.\n", "before_files": [{"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass WhatIsCSView(TemplateView):\n \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n\n template_name = \"general/what-is-computer-science.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\nclass HowDoITeachCSUnpluggedView(TemplateView):\n \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n\n template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}], "after_files": [{"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the homepage.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n context = super(GeneralIndexView, self).get_context_data(**kwargs)\n context[\"homepage\"] = True\n return context\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass WhatIsCSView(TemplateView):\n \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n\n template_name = \"general/what-is-computer-science.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\nclass HowDoITeachCSUnpluggedView(TemplateView):\n \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n\n template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]} | 822 | 154 |
gh_patches_debug_40993 | rasdani/github-patches | git_diff | apluslms__a-plus-1062 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature request: send email when teacher adds news
Hi
It would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.
What do you think?
Thanks!
Feature request: send email when teacher adds news
Hi
It would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.
What do you think?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `news/forms.py`
Content:
```
1 from django import forms
2
3 from .models import News
4
5
6 class NewsForm(forms.ModelForm):
7
8 class Meta:
9 model = News
10 fields = [
11 'audience',
12 'publish',
13 'pin',
14 'title',
15 'body',
16 ]
17
```
Path: `lib/email_messages.py`
Content:
```
1 import logging
2 import traceback
3 from django.conf import settings
4 from django.core.mail import send_mail
5 from django.urls import reverse
6 from .helpers import build_aplus_url
7
8
9 logger = logging.getLogger('aplus.lib.email_messages')
10
11
12 def email_course_instance(instance, subject, message, everyone=False) -> bool:
13 """
14 Sends an email to a course instance's technical support emails or teachers if technical support not set.
15 If everyone == True, sends emails to teachers anyway.
16 """
17 recipients = []
18 if instance.technical_error_emails:
19 recipients = instance.technical_error_emails.split(",")
20 if everyone or not recipients:
21 recipients = instance.teachers.exclude(user__email='').values_list("user__email", flat=True)
22
23 if not recipients:
24 raise ValueError("No recipients")
25
26 try:
27 return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1
28 except:
29 logger.exception('Failed to send course instance emails.')
30 raise
31
32
33 def email_course_error(request, exercise, message, exception=True):
34 """
35 Sends error message to course instance's teachers or technical support emails if set.
36 """
37 instance = exercise.course_instance
38
39 error_trace = "-"
40 if exception:
41 error_trace = traceback.format_exc()
42
43 if request:
44 request_fields = repr(request)
45 else:
46 request_fields = "No request available"
47
48 subject = settings.EXERCISE_ERROR_SUBJECT.format(
49 course=instance.course.code,
50 exercise=str(exercise))
51 body = settings.EXERCISE_ERROR_DESCRIPTION.format(
52 message=message,
53 exercise_url=build_aplus_url(
54 exercise.get_absolute_url(), user_url=True),
55 course_edit_url=build_aplus_url(
56 instance.get_url('course-details'), user_url=True),
57 error_trace=error_trace,
58 request_fields=request_fields)
59
60 try:
61 email_course_instance(instance, subject, body)
62 except:
63 pass
64
```
Path: `news/views.py`
Content:
```
1 from django.core.exceptions import PermissionDenied
2 from django.http import Http404
3 from django.shortcuts import get_object_or_404
4
5 from authorization.permissions import ACCESS
6 from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin
7 from lib.viewbase import BaseFormView, BaseRedirectView
8 from .forms import NewsForm
9 from .models import News
10
11
12 class ListNewsView(CourseInstanceBaseView):
13 access_mode = ACCESS.TEACHER
14 template_name = "news/list.html"
15
16 def get_common_objects(self):
17 super().get_common_objects()
18 self.news = self.instance.news.all()
19 self.note("news")
20
21
22 class EditNewsView(CourseInstanceMixin, BaseFormView):
23 access_mode = ACCESS.TEACHER
24 template_name = "news/edit.html"
25 form_class = NewsForm
26 news_item_kw = "news_id"
27
28 def get_form_kwargs(self):
29 kwargs = super().get_form_kwargs()
30
31 news_id = self._get_kwarg(self.news_item_kw, default=None)
32 if news_id:
33 self.news_item = get_object_or_404(
34 News,
35 pk=news_id,
36 course_instance=self.instance
37 )
38 self.note("news_item")
39 else:
40 self.news_item = News(course_instance=self.instance)
41
42 kwargs["instance"] = self.news_item
43 return kwargs
44
45 def get_success_url(self):
46 return self.instance.get_url("news-list")
47
48 def form_valid(self, form):
49 form.save()
50 return super().form_valid(form)
51
52
53 class RemoveNewsView(CourseInstanceMixin, BaseRedirectView):
54 access_mode = ACCESS.TEACHER
55 news_item_kw = "news_id"
56
57 def get_resource_objects(self):
58 super().get_resource_objects()
59 self.news_item = get_object_or_404(
60 News,
61 id=self._get_kwarg(self.news_item_kw),
62 course_instance=self.instance,
63 )
64 self.note("news_item")
65
66 def post(self, request, *args, **kwargs):
67 self.news_item.delete()
68 return self.redirect(self.instance.get_url("news-list"))
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/email_messages.py b/lib/email_messages.py
--- a/lib/email_messages.py
+++ b/lib/email_messages.py
@@ -1,9 +1,10 @@
import logging
import traceback
from django.conf import settings
-from django.core.mail import send_mail
-from django.urls import reverse
-from .helpers import build_aplus_url
+from django.core.mail import send_mail, send_mass_mail
+
+from .helpers import Enum, build_aplus_url
+from course.models import CourseInstance
logger = logging.getLogger('aplus.lib.email_messages')
@@ -61,3 +62,30 @@
email_course_instance(instance, subject, body)
except:
pass
+
+
+def email_course_students(
+ instance: CourseInstance,
+ subject: str,
+ message: str,
+ audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,
+ ) -> int:
+ """
+ Sends an email to students on the course. Audience parameter controls whether the mail goes
+ to all (default), just internal, or just external students.
+ Returns number of emails sent, or -1 in case of error.
+ """
+ students = instance.students
+ if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:
+ students = students.filter(organization=settings.LOCAL_ORGANIZATION)
+ elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:
+ students = students.exclude(organization=settings.LOCAL_ORGANIZATION)
+
+ recipients = students.exclude(user__email='').values_list("user__email", flat=True)
+ emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))
+
+ try:
+ return send_mass_mail(emails)
+ except:
+ logger.exception('Failed to send course instance emails.')
+ return -1
diff --git a/news/forms.py b/news/forms.py
--- a/news/forms.py
+++ b/news/forms.py
@@ -1,16 +1,25 @@
+from typing import Any
+
from django import forms
+from django.utils.translation import gettext_lazy as _
from .models import News
class NewsForm(forms.ModelForm):
+ email = forms.BooleanField(
+ required=False,
+ label=_("SEND_EMAIL_TO_STUDENTS"),
+ )
+
class Meta:
model = News
fields = [
'audience',
'publish',
'pin',
+ 'email',
'title',
'body',
]
diff --git a/news/views.py b/news/views.py
--- a/news/views.py
+++ b/news/views.py
@@ -1,10 +1,14 @@
+from django.conf import settings
+from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404
+from django.utils.translation import gettext_lazy as _
from authorization.permissions import ACCESS
from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin
from lib.viewbase import BaseFormView, BaseRedirectView
+from lib.email_messages import email_course_students
from .forms import NewsForm
from .models import News
@@ -47,6 +51,15 @@
def form_valid(self, form):
form.save()
+ if form.cleaned_data['email']:
+ subject = f"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}"
+ if email_course_students(
+ self.instance,
+ subject,
+ self.news_item.body,
+ self.news_item.audience,
+ ) < 0:
+ messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))
return super().form_valid(form)
| {"golden_diff": "diff --git a/lib/email_messages.py b/lib/email_messages.py\n--- a/lib/email_messages.py\n+++ b/lib/email_messages.py\n@@ -1,9 +1,10 @@\n import logging\n import traceback\n from django.conf import settings\n-from django.core.mail import send_mail\n-from django.urls import reverse\n-from .helpers import build_aplus_url\n+from django.core.mail import send_mail, send_mass_mail\n+\n+from .helpers import Enum, build_aplus_url\n+from course.models import CourseInstance\n \n \n logger = logging.getLogger('aplus.lib.email_messages')\n@@ -61,3 +62,30 @@\n email_course_instance(instance, subject, body)\n except:\n pass\n+\n+\n+def email_course_students(\n+ instance: CourseInstance,\n+ subject: str,\n+ message: str,\n+ audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,\n+ ) -> int:\n+ \"\"\"\n+ Sends an email to students on the course. Audience parameter controls whether the mail goes\n+ to all (default), just internal, or just external students.\n+ Returns number of emails sent, or -1 in case of error.\n+ \"\"\"\n+ students = instance.students\n+ if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:\n+ students = students.filter(organization=settings.LOCAL_ORGANIZATION)\n+ elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:\n+ students = students.exclude(organization=settings.LOCAL_ORGANIZATION)\n+\n+ recipients = students.exclude(user__email='').values_list(\"user__email\", flat=True)\n+ emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))\n+\n+ try:\n+ return send_mass_mail(emails)\n+ except:\n+ logger.exception('Failed to send course instance emails.')\n+ return -1\ndiff --git a/news/forms.py b/news/forms.py\n--- a/news/forms.py\n+++ b/news/forms.py\n@@ -1,16 +1,25 @@\n+from typing import Any\n+\n from django import forms\n+from django.utils.translation import gettext_lazy as _\n \n from .models import News\n \n \n class NewsForm(forms.ModelForm):\n \n+ email = forms.BooleanField(\n+ required=False,\n+ label=_(\"SEND_EMAIL_TO_STUDENTS\"),\n+ )\n+\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n+ 'email',\n 'title',\n 'body',\n ]\ndiff --git a/news/views.py b/news/views.py\n--- a/news/views.py\n+++ b/news/views.py\n@@ -1,10 +1,14 @@\n+from django.conf import settings\n+from django.contrib import messages\n from django.core.exceptions import PermissionDenied\n from django.http import Http404\n from django.shortcuts import get_object_or_404\n+from django.utils.translation import gettext_lazy as _\n \n from authorization.permissions import ACCESS\n from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\n from lib.viewbase import BaseFormView, BaseRedirectView\n+from lib.email_messages import email_course_students\n from .forms import NewsForm\n from .models import News\n \n@@ -47,6 +51,15 @@\n \n def form_valid(self, form):\n form.save()\n+ if form.cleaned_data['email']:\n+ subject = f\"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}\"\n+ if email_course_students(\n+ self.instance,\n+ subject,\n+ self.news_item.body,\n+ self.news_item.audience,\n+ ) < 0:\n+ messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))\n return super().form_valid(form)\n", "issue": "Feature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\nFeature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\n", "before_files": [{"content": "from django import forms\n\nfrom .models import News\n\n\nclass NewsForm(forms.ModelForm):\n\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n 'title',\n 'body',\n ]\n", "path": "news/forms.py"}, {"content": "import logging\nimport traceback\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.urls import reverse\nfrom .helpers import build_aplus_url\n\n\nlogger = logging.getLogger('aplus.lib.email_messages')\n\n\ndef email_course_instance(instance, subject, message, everyone=False) -> bool:\n \"\"\"\n Sends an email to a course instance's technical support emails or teachers if technical support not set.\n If everyone == True, sends emails to teachers anyway.\n \"\"\"\n recipients = []\n if instance.technical_error_emails:\n recipients = instance.technical_error_emails.split(\",\")\n if everyone or not recipients:\n recipients = instance.teachers.exclude(user__email='').values_list(\"user__email\", flat=True)\n\n if not recipients:\n raise ValueError(\"No recipients\")\n\n try:\n return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1\n except:\n logger.exception('Failed to send course instance emails.')\n raise\n\n\ndef email_course_error(request, exercise, message, exception=True):\n \"\"\"\n Sends error message to course instance's teachers or technical support emails if set.\n \"\"\"\n instance = exercise.course_instance\n\n error_trace = \"-\"\n if exception:\n error_trace = traceback.format_exc()\n\n if request:\n request_fields = repr(request)\n else:\n request_fields = \"No request available\"\n\n subject = settings.EXERCISE_ERROR_SUBJECT.format(\n course=instance.course.code,\n exercise=str(exercise))\n body = settings.EXERCISE_ERROR_DESCRIPTION.format(\n message=message,\n exercise_url=build_aplus_url(\n exercise.get_absolute_url(), user_url=True),\n course_edit_url=build_aplus_url(\n instance.get_url('course-details'), user_url=True),\n error_trace=error_trace,\n request_fields=request_fields)\n\n try:\n email_course_instance(instance, subject, body)\n except:\n pass\n", "path": "lib/email_messages.py"}, {"content": "from django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\n\nfrom authorization.permissions import ACCESS\nfrom course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom .forms import NewsForm\nfrom .models import News\n\n\nclass ListNewsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/list.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.news = self.instance.news.all()\n self.note(\"news\")\n\n\nclass EditNewsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/edit.html\"\n form_class = NewsForm\n news_item_kw = \"news_id\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n\n news_id = self._get_kwarg(self.news_item_kw, default=None)\n if news_id:\n self.news_item = get_object_or_404(\n News,\n pk=news_id,\n course_instance=self.instance\n )\n self.note(\"news_item\")\n else:\n self.news_item = News(course_instance=self.instance)\n\n kwargs[\"instance\"] = self.news_item\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url(\"news-list\")\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n\n\nclass RemoveNewsView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n news_item_kw = \"news_id\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.news_item = get_object_or_404(\n News,\n id=self._get_kwarg(self.news_item_kw),\n course_instance=self.instance,\n )\n self.note(\"news_item\")\n\n def post(self, request, *args, **kwargs):\n self.news_item.delete()\n return self.redirect(self.instance.get_url(\"news-list\"))\n", "path": "news/views.py"}], "after_files": [{"content": "from typing import Any\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .models import News\n\n\nclass NewsForm(forms.ModelForm):\n\n email = forms.BooleanField(\n required=False,\n label=_(\"SEND_EMAIL_TO_STUDENTS\"),\n )\n\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n 'email',\n 'title',\n 'body',\n ]\n", "path": "news/forms.py"}, {"content": "import logging\nimport traceback\nfrom django.conf import settings\nfrom django.core.mail import send_mail, send_mass_mail\n\nfrom .helpers import Enum, build_aplus_url\nfrom course.models import CourseInstance\n\n\nlogger = logging.getLogger('aplus.lib.email_messages')\n\n\ndef email_course_instance(instance, subject, message, everyone=False) -> bool:\n \"\"\"\n Sends an email to a course instance's technical support emails or teachers if technical support not set.\n If everyone == True, sends emails to teachers anyway.\n \"\"\"\n recipients = []\n if instance.technical_error_emails:\n recipients = instance.technical_error_emails.split(\",\")\n if everyone or not recipients:\n recipients = instance.teachers.exclude(user__email='').values_list(\"user__email\", flat=True)\n\n if not recipients:\n raise ValueError(\"No recipients\")\n\n try:\n return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1\n except:\n logger.exception('Failed to send course instance emails.')\n raise\n\n\ndef email_course_error(request, exercise, message, exception=True):\n \"\"\"\n Sends error message to course instance's teachers or technical support emails if set.\n \"\"\"\n instance = exercise.course_instance\n\n error_trace = \"-\"\n if exception:\n error_trace = traceback.format_exc()\n\n if request:\n request_fields = repr(request)\n else:\n request_fields = \"No request available\"\n\n subject = settings.EXERCISE_ERROR_SUBJECT.format(\n course=instance.course.code,\n exercise=str(exercise))\n body = settings.EXERCISE_ERROR_DESCRIPTION.format(\n message=message,\n exercise_url=build_aplus_url(\n exercise.get_absolute_url(), user_url=True),\n course_edit_url=build_aplus_url(\n instance.get_url('course-details'), user_url=True),\n error_trace=error_trace,\n request_fields=request_fields)\n\n try:\n email_course_instance(instance, subject, body)\n except:\n pass\n\n\ndef email_course_students(\n instance: CourseInstance,\n subject: str,\n message: str,\n audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,\n ) -> int:\n \"\"\"\n Sends an email to students on the course. Audience parameter controls whether the mail goes\n to all (default), just internal, or just external students.\n Returns number of emails sent, or -1 in case of error.\n \"\"\"\n students = instance.students\n if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:\n students = students.filter(organization=settings.LOCAL_ORGANIZATION)\n elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:\n students = students.exclude(organization=settings.LOCAL_ORGANIZATION)\n\n recipients = students.exclude(user__email='').values_list(\"user__email\", flat=True)\n emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))\n\n try:\n return send_mass_mail(emails)\n except:\n logger.exception('Failed to send course instance emails.')\n return -1\n", "path": "lib/email_messages.py"}, {"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import gettext_lazy as _\n\nfrom authorization.permissions import ACCESS\nfrom course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom lib.email_messages import email_course_students\nfrom .forms import NewsForm\nfrom .models import News\n\n\nclass ListNewsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/list.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.news = self.instance.news.all()\n self.note(\"news\")\n\n\nclass EditNewsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/edit.html\"\n form_class = NewsForm\n news_item_kw = \"news_id\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n\n news_id = self._get_kwarg(self.news_item_kw, default=None)\n if news_id:\n self.news_item = get_object_or_404(\n News,\n pk=news_id,\n course_instance=self.instance\n )\n self.note(\"news_item\")\n else:\n self.news_item = News(course_instance=self.instance)\n\n kwargs[\"instance\"] = self.news_item\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url(\"news-list\")\n\n def form_valid(self, form):\n form.save()\n if form.cleaned_data['email']:\n subject = f\"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}\"\n if email_course_students(\n self.instance,\n subject,\n self.news_item.body,\n self.news_item.audience,\n ) < 0:\n messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))\n return super().form_valid(form)\n\n\nclass RemoveNewsView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n news_item_kw = \"news_id\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.news_item = get_object_or_404(\n News,\n id=self._get_kwarg(self.news_item_kw),\n course_instance=self.instance,\n )\n self.note(\"news_item\")\n\n def post(self, request, *args, **kwargs):\n self.news_item.delete()\n return self.redirect(self.instance.get_url(\"news-list\"))\n", "path": "news/views.py"}]} | 1,596 | 812 |
gh_patches_debug_358 | rasdani/github-patches | git_diff | spacetelescope__jwql-550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cron jobs for monitors currently failing
Traceback (most recent call last):
File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py", line 58, in <module>
from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks
File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py", line 77, in <module>
from jwql.jwql_monitors import monitor_mast
File "/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py", line 25, in <module>
from bokeh.embed import components
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py", line 81, in <module>
from .util import logconfig
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py", line 87, in <module>
level = settings.py_log_level()
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 310, in __call__
return self._convert(os.environ[self._env_var])
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 236, in convert_logging
raise ValueError("Cannot convert {} to log level, valid values are: {}".format(value, ", ".join(_log_levels)))
ValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.22.0'
6
7 AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']
13 REQUIRES = [
14 'asdf>=2.3.3',
15 'astropy>=3.2.1',
16 'astroquery>=0.3.9',
17 'authlib',
18 'bokeh>=1.0',
19 'codecov',
20 'django>=2.0',
21 'flake8',
22 'inflection',
23 'ipython',
24 'jinja2',
25 'jsonschema==2.6.0',
26 'jwedb>=0.0.3',
27 'matplotlib',
28 'numpy',
29 'numpydoc',
30 'pandas',
31 'psycopg2',
32 'pysiaf',
33 'pytest',
34 'pytest-cov',
35 'scipy',
36 'sphinx',
37 'sqlalchemy',
38 'stsci_rtd_theme',
39 'twine'
40 ]
41
42 setup(
43 name='jwql',
44 version=VERSION,
45 description=DESCRIPTION,
46 url='https://github.com/spacetelescope/jwql.git',
47 author=AUTHORS,
48 author_email='[email protected]',
49 license='BSD',
50 keywords=['astronomy', 'python'],
51 classifiers=['Programming Language :: Python'],
52 packages=find_packages(),
53 install_requires=REQUIRES,
54 dependency_links=DEPENDENCY_LINKS,
55 include_package_data=True,
56 include_dirs=[np.get_include()],
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
- 'bokeh>=1.0',
+ 'bokeh>=1.0,<1.4',
'codecov',
'django>=2.0',
'flake8',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n- 'bokeh>=1.0',\n+ 'bokeh>=1.0,<1.4',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n", "issue": "Cron jobs for monitors currently failing \nTraceback (most recent call last):\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py\", line 58, in <module>\r\n from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py\", line 77, in <module>\r\n from jwql.jwql_monitors import monitor_mast\r\n File \"/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py\", line 25, in <module>\r\n from bokeh.embed import components\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py\", line 81, in <module>\r\n from .util import logconfig\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py\", line 87, in <module>\r\n level = settings.py_log_level()\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 310, in __call__\r\n return self._convert(os.environ[self._env_var])\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 236, in convert_logging\r\n raise ValueError(\"Cannot convert {} to log level, valid values are: {}\".format(value, \", \".join(_log_levels)))\r\nValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,221 | 101 |
gh_patches_debug_1710 | rasdani/github-patches | git_diff | encode__httpx-407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ASGIDispatch and WSGIDispatch should be importable from the top-level httpx package
From #396:
> ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/dispatch/__init__.py`
Content:
```
1 """
2 Dispatch classes handle the raw network connections and the implementation
3 details of making the HTTP request and receiving the response.
4 """
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py
--- a/httpx/dispatch/__init__.py
+++ b/httpx/dispatch/__init__.py
@@ -2,3 +2,7 @@
Dispatch classes handle the raw network connections and the implementation
details of making the HTTP request and receiving the response.
"""
+from .asgi import ASGIDispatch
+from .wsgi import WSGIDispatch
+
+__all__ = ["ASGIDispatch", "WSGIDispatch"]
| {"golden_diff": "diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py\n--- a/httpx/dispatch/__init__.py\n+++ b/httpx/dispatch/__init__.py\n@@ -2,3 +2,7 @@\n Dispatch classes handle the raw network connections and the implementation\n details of making the HTTP request and receiving the response.\n \"\"\"\n+from .asgi import ASGIDispatch\n+from .wsgi import WSGIDispatch\n+\n+__all__ = [\"ASGIDispatch\", \"WSGIDispatch\"]\n", "issue": "ASGIDispatch and WSGIDispatch should be importable from the top-level httpx package\nFrom #396:\r\n\r\n> ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level.\n", "before_files": [{"content": "\"\"\"\nDispatch classes handle the raw network connections and the implementation\ndetails of making the HTTP request and receiving the response.\n\"\"\"\n", "path": "httpx/dispatch/__init__.py"}], "after_files": [{"content": "\"\"\"\nDispatch classes handle the raw network connections and the implementation\ndetails of making the HTTP request and receiving the response.\n\"\"\"\nfrom .asgi import ASGIDispatch\nfrom .wsgi import WSGIDispatch\n\n__all__ = [\"ASGIDispatch\", \"WSGIDispatch\"]\n", "path": "httpx/dispatch/__init__.py"}]} | 368 | 121 |
gh_patches_debug_21028 | rasdani/github-patches | git_diff | techmatters__terraso-backend-141 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Landscape creation and update
<!--
Use a concise title that describes the request.
Bad: localization
Good: Translate site into Spanish
Bad: customize hostname
Good: Determine hostname at build time from environment
-->
## Description
- Default landscape group should be created when a new landscape is created
- Manager should be assigned at the creation of a landscape
- Only managers can update landscape data
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `terraso_backend/apps/core/models/landscapes.py`
Content:
```
1 import structlog
2 from django.db import models
3
4 from apps.core import permission_rules as perm_rules
5
6 from .commons import BaseModel, SlugModel
7 from .groups import Group
8 from .users import User
9
10 logger = structlog.get_logger(__name__)
11
12
13 class Landscape(SlugModel):
14 """
15 This model represents a Landscape on Terraso platform.
16
17 A Landscape is a socio-ecological system that consists of natural
18 and/or human-modified ecosystems. Defined by its stakeholds, a
19 Landscape usually has geographical boundaries. It may correspond to,
20 or be a combination of, natural boundaries, distinct land features,
21 socially defined areas such as indigenous territories, and/or
22 jurisdictional and administrative boundaries. The boundaries of a
23 Landscape can cross several countries.
24 """
25
26 name = models.CharField(max_length=128, unique=True)
27 description = models.TextField(max_length=512, blank=True, default="")
28 website = models.URLField(blank=True, default="")
29 location = models.CharField(max_length=128, blank=True, default="")
30 area_polygon = models.JSONField(blank=True, null=True)
31
32 created_by = models.ForeignKey(
33 User,
34 blank=True,
35 null=True,
36 on_delete=models.PROTECT,
37 related_name="created_landscapes",
38 )
39 groups = models.ManyToManyField(Group, through="LandscapeGroup")
40
41 field_to_slug = "name"
42
43 class Meta(SlugModel.Meta):
44 rules_permissions = {
45 "change": perm_rules.allowed_to_change_landscape,
46 "delete": perm_rules.allowed_to_delete_landscape,
47 }
48
49 def get_default_group(self):
50 """
51 A default Group in a Landscape is that Group where any
52 individual (associated or not with other Groups) is added when
53 associating directly with a Landscape.
54 """
55 try:
56 # associated_groups is the related_name defined on
57 # LandscapeGroup relationship with Landscape. It returns a
58 # queryset of LandscapeGroup
59 landscape_group = self.associated_groups.get(is_default_landscape_group=True)
60 except LandscapeGroup.DoesNotExist:
61 logger.error(
62 "Landscape has no default group, but it must have", extra={"landscape_id": self.pk}
63 )
64 return None
65
66 return landscape_group.group
67
68 def __str__(self):
69 return self.name
70
71
72 class LandscapeGroup(BaseModel):
73 """
74 This model represents the association between a Landscape and a Group on
75 Terraso platform.
76 """
77
78 landscape = models.ForeignKey(
79 Landscape, on_delete=models.CASCADE, related_name="associated_groups"
80 )
81 group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name="associated_landscapes")
82
83 is_default_landscape_group = models.BooleanField(blank=True, default=False)
84
85 class Meta:
86 rules_permissions = {
87 "add": perm_rules.allowed_to_add_landscape_group,
88 "delete": perm_rules.allowed_to_delete_landscape_group,
89 }
90 constraints = (
91 models.UniqueConstraint(
92 fields=("group", "landscape"),
93 condition=models.Q(deleted_at__isnull=True),
94 name="unique_active_landscape_group",
95 ),
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py
--- a/terraso_backend/apps/core/models/landscapes.py
+++ b/terraso_backend/apps/core/models/landscapes.py
@@ -1,5 +1,5 @@
import structlog
-from django.db import models
+from django.db import models, transaction
from apps.core import permission_rules as perm_rules
@@ -46,6 +46,24 @@
"delete": perm_rules.allowed_to_delete_landscape,
}
+ def save(self, *args, **kwargs):
+ with transaction.atomic():
+ creating = not Landscape.objects.filter(pk=self.pk).exists()
+
+ super().save(*args, **kwargs)
+
+ if creating and self.created_by:
+ group = Group(
+ name="Group {}".format(self.slug),
+ description="",
+ created_by=self.created_by,
+ )
+ group.save()
+ landscape_group = LandscapeGroup(
+ group=group, landscape=self, is_default_landscape_group=True
+ )
+ landscape_group.save()
+
def get_default_group(self):
"""
A default Group in a Landscape is that Group where any
| {"golden_diff": "diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py\n--- a/terraso_backend/apps/core/models/landscapes.py\n+++ b/terraso_backend/apps/core/models/landscapes.py\n@@ -1,5 +1,5 @@\n import structlog\n-from django.db import models\n+from django.db import models, transaction\n \n from apps.core import permission_rules as perm_rules\n \n@@ -46,6 +46,24 @@\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n \n+ def save(self, *args, **kwargs):\n+ with transaction.atomic():\n+ creating = not Landscape.objects.filter(pk=self.pk).exists()\n+\n+ super().save(*args, **kwargs)\n+\n+ if creating and self.created_by:\n+ group = Group(\n+ name=\"Group {}\".format(self.slug),\n+ description=\"\",\n+ created_by=self.created_by,\n+ )\n+ group.save()\n+ landscape_group = LandscapeGroup(\n+ group=group, landscape=self, is_default_landscape_group=True\n+ )\n+ landscape_group.save()\n+\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n", "issue": "Landscape creation and update\n<!--\r\nUse a concise title that describes the request.\r\nBad: localization\r\nGood: Translate site into Spanish\r\n\r\nBad: customize hostname\r\nGood: Determine hostname at build time from environment\r\n-->\r\n\r\n## Description\r\n- Default landscape group should be created when a new landscape is created\r\n- Manager should be assigned at the creation of a landscape\r\n- Only managers can update landscape data\n", "before_files": [{"content": "import structlog\nfrom django.db import models\n\nfrom apps.core import permission_rules as perm_rules\n\nfrom .commons import BaseModel, SlugModel\nfrom .groups import Group\nfrom .users import User\n\nlogger = structlog.get_logger(__name__)\n\n\nclass Landscape(SlugModel):\n \"\"\"\n This model represents a Landscape on Terraso platform.\n\n A Landscape is a socio-ecological system that consists of natural\n and/or human-modified ecosystems. Defined by its stakeholds, a\n Landscape usually has geographical boundaries. It may correspond to,\n or be a combination of, natural boundaries, distinct land features,\n socially defined areas such as indigenous territories, and/or\n jurisdictional and administrative boundaries. The boundaries of a\n Landscape can cross several countries.\n \"\"\"\n\n name = models.CharField(max_length=128, unique=True)\n description = models.TextField(max_length=512, blank=True, default=\"\")\n website = models.URLField(blank=True, default=\"\")\n location = models.CharField(max_length=128, blank=True, default=\"\")\n area_polygon = models.JSONField(blank=True, null=True)\n\n created_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"created_landscapes\",\n )\n groups = models.ManyToManyField(Group, through=\"LandscapeGroup\")\n\n field_to_slug = \"name\"\n\n class Meta(SlugModel.Meta):\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_landscape,\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n individual (associated or not with other Groups) is added when\n associating directly with a Landscape.\n \"\"\"\n try:\n # associated_groups is the related_name defined on\n # LandscapeGroup relationship with Landscape. It returns a\n # queryset of LandscapeGroup\n landscape_group = self.associated_groups.get(is_default_landscape_group=True)\n except LandscapeGroup.DoesNotExist:\n logger.error(\n \"Landscape has no default group, but it must have\", extra={\"landscape_id\": self.pk}\n )\n return None\n\n return landscape_group.group\n\n def __str__(self):\n return self.name\n\n\nclass LandscapeGroup(BaseModel):\n \"\"\"\n This model represents the association between a Landscape and a Group on\n Terraso platform.\n \"\"\"\n\n landscape = models.ForeignKey(\n Landscape, on_delete=models.CASCADE, related_name=\"associated_groups\"\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"associated_landscapes\")\n\n is_default_landscape_group = models.BooleanField(blank=True, default=False)\n\n class Meta:\n rules_permissions = {\n \"add\": perm_rules.allowed_to_add_landscape_group,\n \"delete\": perm_rules.allowed_to_delete_landscape_group,\n }\n constraints = (\n models.UniqueConstraint(\n fields=(\"group\", \"landscape\"),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_landscape_group\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/landscapes.py"}], "after_files": [{"content": "import structlog\nfrom django.db import models, transaction\n\nfrom apps.core import permission_rules as perm_rules\n\nfrom .commons import BaseModel, SlugModel\nfrom .groups import Group\nfrom .users import User\n\nlogger = structlog.get_logger(__name__)\n\n\nclass Landscape(SlugModel):\n \"\"\"\n This model represents a Landscape on Terraso platform.\n\n A Landscape is a socio-ecological system that consists of natural\n and/or human-modified ecosystems. Defined by its stakeholds, a\n Landscape usually has geographical boundaries. It may correspond to,\n or be a combination of, natural boundaries, distinct land features,\n socially defined areas such as indigenous territories, and/or\n jurisdictional and administrative boundaries. The boundaries of a\n Landscape can cross several countries.\n \"\"\"\n\n name = models.CharField(max_length=128, unique=True)\n description = models.TextField(max_length=512, blank=True, default=\"\")\n website = models.URLField(blank=True, default=\"\")\n location = models.CharField(max_length=128, blank=True, default=\"\")\n area_polygon = models.JSONField(blank=True, null=True)\n\n created_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"created_landscapes\",\n )\n groups = models.ManyToManyField(Group, through=\"LandscapeGroup\")\n\n field_to_slug = \"name\"\n\n class Meta(SlugModel.Meta):\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_landscape,\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n\n def save(self, *args, **kwargs):\n with transaction.atomic():\n creating = not Landscape.objects.filter(pk=self.pk).exists()\n\n super().save(*args, **kwargs)\n\n if creating and self.created_by:\n group = Group(\n name=\"Group {}\".format(self.slug),\n description=\"\",\n created_by=self.created_by,\n )\n group.save()\n landscape_group = LandscapeGroup(\n group=group, landscape=self, is_default_landscape_group=True\n )\n landscape_group.save()\n\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n individual (associated or not with other Groups) is added when\n associating directly with a Landscape.\n \"\"\"\n try:\n # associated_groups is the related_name defined on\n # LandscapeGroup relationship with Landscape. It returns a\n # queryset of LandscapeGroup\n landscape_group = self.associated_groups.get(is_default_landscape_group=True)\n except LandscapeGroup.DoesNotExist:\n logger.error(\n \"Landscape has no default group, but it must have\", extra={\"landscape_id\": self.pk}\n )\n return None\n\n return landscape_group.group\n\n def __str__(self):\n return self.name\n\n\nclass LandscapeGroup(BaseModel):\n \"\"\"\n This model represents the association between a Landscape and a Group on\n Terraso platform.\n \"\"\"\n\n landscape = models.ForeignKey(\n Landscape, on_delete=models.CASCADE, related_name=\"associated_groups\"\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"associated_landscapes\")\n\n is_default_landscape_group = models.BooleanField(blank=True, default=False)\n\n class Meta:\n rules_permissions = {\n \"add\": perm_rules.allowed_to_add_landscape_group,\n \"delete\": perm_rules.allowed_to_delete_landscape_group,\n }\n constraints = (\n models.UniqueConstraint(\n fields=(\"group\", \"landscape\"),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_landscape_group\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/landscapes.py"}]} | 1,195 | 278 |
gh_patches_debug_31833 | rasdani/github-patches | git_diff | Flexget__Flexget-2858 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
torrentday pluging winth uid secret gets error expecting string
### Expected behaviour:
Setting uid from secrets file should work and update config
### Actual behaviour:
When setting the uid from a secret file, since uid is a number, it's converted to integer on the config, this makes a error in the config compilation of:
/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string
### Steps to reproduce:
- Step 1: Add config and run flexget check
#### Config:
```yaml
TV_TorrentDay:
discover:
what:
- next_series_episodes: yes
from:
- torrentday:
uid: "{? torrentday.uid ?}"
passkey: '{? torrentday.passkey ?}'
cfduid: '{? torrentday.cfduid ?}'
rss_key: '{? torrentday.rss_key ?}'
category: 'tvBRD'
```
secrets.yaml
```yaml
torrentday:
uid: "1234567"
passkey: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
cfduid: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
rss_key: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
```
#### Log:
<details>
<summary>(click to expand)</summary>
```
2021-02-16 01:29:54 CRITICAL manager [/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string
2021-02-16 01:29:54 CRITICAL manager Failed to load config file: Did not pass schema validation.
```
</details>
### Additional information:
- FlexGet version: 3.1.102
- Python version: 3.7.3
- Installation method: venv
- Using daemon (yes/no): yes
- OS and version: debian 10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/components/sites/sites/torrentday.py`
Content:
```
1 import re
2
3 from loguru import logger
4 from requests.exceptions import RequestException
5
6 from flexget import plugin
7 from flexget.components.sites.urlrewriting import UrlRewritingError
8 from flexget.components.sites.utils import normalize_unicode, torrent_availability
9 from flexget.entry import Entry
10 from flexget.event import event
11 from flexget.plugin import PluginError
12 from flexget.utils import requests
13 from flexget.utils.soup import get_soup
14 from flexget.utils.tools import parse_filesize
15
16 logger = logger.bind(name='torrentday')
17
18 CATEGORIES = {
19 'all': 0,
20 # Movies
21 'mov4k': 96,
22 'mov480p': 25,
23 'movHD': 11,
24 'movBD': 5,
25 'movDVD': 3,
26 'movMP4': 21,
27 'movNonEnglish': 22,
28 'movPACKS': 13,
29 'movSDx264': 44,
30 'movX265': 48,
31 'movXVID': 1,
32 # TV
33 'tv480p': 24,
34 'tvBRD': 32,
35 'tvDVD': 31,
36 'tvDVDrip': 33,
37 'tvMOBILE': 46,
38 'tvNonEnglish': 82,
39 'tvPACKS': 14,
40 'tvSDx264': 26,
41 'tvHDx264': 7,
42 'tvX265': 34,
43 'tvXVID': 2,
44 }
45
46
47 class UrlRewriteTorrentday:
48 """
49 Torrentday urlrewriter and search plugin.
50
51 torrentday:
52 uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies
53 passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous
54 cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES
55 rss_key: xxxxxxxxx (required) get this from your profile page
56 category: xxxxxxxx
57
58 Category can be one of
59 ID from browsing site OR 'name'
60 movies:
61 mov4k, mov480p, movHD, movBD, movDVD,
62 movMP4, movNonEnglish, movPACKS,
63 movSDx264, movX265, movXVID
64 tv:
65 tv480p, tvBRD, tvDVD, tvDVDrip,
66 tvMOBILE, tvNonEnglish, tvPACKS,
67 tvSDx264, tvHDx264, tvX265, tvXVID
68 """
69
70 schema = {
71 'type': 'object',
72 'properties': {
73 'rss_key': {'type': 'string'},
74 'uid': {'type': 'string'},
75 'passkey': {'type': 'string'},
76 'cfduid': {'type': 'string'},
77 'category': {
78 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]
79 },
80 },
81 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],
82 'additionalProperties': False,
83 }
84
85 # urlrewriter API
86 def url_rewritable(self, task, entry):
87 url = entry['url']
88 if url.find('.torrent'):
89 return False
90 if url.startswith('https://www.torrentday.com'):
91 return True
92 return False
93
94 # urlrewriter API
95 def url_rewrite(self, task, entry):
96 if 'url' not in entry:
97 logger.error('Didn\'t actually get a URL...')
98 else:
99 logger.debug('Got the URL: {}', entry['url'])
100 if entry['url'].startswith('https://www.torrentday.com/browse'):
101 # use search
102 results = self.search(task, entry)
103 if not results:
104 raise UrlRewritingError('No search results found')
105 entry['url'] = results[0]['url']
106
107 @plugin.internet(logger)
108 def search(self, task, entry, config=None):
109 """
110 Search for name from torrentday.
111 """
112
113 categories = config.get('category', 'all')
114 # Make sure categories is a list
115 if not isinstance(categories, list):
116 categories = [categories]
117 # If there are any text categories, turn them into their id number
118 categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
119 params = {'cata': 'yes', 'clear-new': 1}
120 params.update({str(c): 1 for c in categories})
121
122 entries = set()
123 for search_string in entry.get('search_strings', [entry['title']]):
124
125 url = 'https://www.torrentday.com/t'
126 params['q'] = normalize_unicode(search_string).replace(':', '')
127 cookies = {
128 'uid': config['uid'],
129 'pass': config['passkey'],
130 '__cfduid': config['cfduid'],
131 }
132
133 try:
134 page = requests.get(url, params=params, cookies=cookies).content
135 except RequestException as e:
136 raise PluginError('Could not connect to torrentday: {}'.format(e))
137
138 # the following should avoid table being None due to a malformed
139 # html in td search results
140 soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling
141 table = soup.find('table', {'id': 'torrentTable'})
142 if table is None:
143 raise PluginError(
144 'Search returned by torrentday appears to be empty or malformed.'
145 )
146
147 # the first row is the header so skip it
148 for tr in table.find_all('tr')[1:]:
149 entry = Entry()
150 # find the torrent names
151 td = tr.find('td', {'class': 'torrentNameInfo'})
152 if not td:
153 logger.warning('Could not find entry torrentNameInfo for {}.', search_string)
154 continue
155 title = td.find('a')
156 if not title:
157 logger.warning('Could not determine title for {}.', search_string)
158 continue
159 entry['title'] = title.contents[0]
160 logger.debug('title: {}', title.contents[0])
161
162 # find download link
163 torrent_url = tr.find('td', {'class': 'ac'})
164 if not torrent_url:
165 logger.warning('Could not determine download link for {}.', search_string)
166 continue
167 torrent_url = torrent_url.find('a').get('href')
168
169 # construct download URL
170 torrent_url = (
171 'https://www.torrentday.com/'
172 + torrent_url
173 + '?torrent_pass='
174 + config['rss_key']
175 )
176 logger.debug('RSS-ified download link: {}', torrent_url)
177 entry['url'] = torrent_url
178
179 # us tr object for seeders/leechers
180 seeders = tr.find('td', {'class': 'ac seedersInfo'})
181 leechers = tr.find('td', {'class': 'ac leechersInfo'})
182 entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))
183 entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))
184 entry['torrent_availability'] = torrent_availability(
185 entry['torrent_seeds'], entry['torrent_leeches']
186 )
187
188 # use tr object for size
189 size = tr.find('td', text=re.compile(r'([\.\d]+) ([TGMKk]?)B')).contents[0]
190 size = re.search(r'([\.\d]+) ([TGMKk]?)B', str(size))
191
192 entry['content_size'] = parse_filesize(size.group(0))
193
194 entries.add(entry)
195
196 return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))
197
198
199 @event('plugin.register')
200 def register_plugin():
201 plugin.register(
202 UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2
203 )
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py
--- a/flexget/components/sites/sites/torrentday.py
+++ b/flexget/components/sites/sites/torrentday.py
@@ -6,6 +6,7 @@
from flexget import plugin
from flexget.components.sites.urlrewriting import UrlRewritingError
from flexget.components.sites.utils import normalize_unicode, torrent_availability
+from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
@@ -69,12 +70,12 @@
'type': 'object',
'properties': {
'rss_key': {'type': 'string'},
- 'uid': {'type': 'string'},
+ 'uid': {'oneOf': [{'type': 'integer'}, {'type': 'string'}]},
'passkey': {'type': 'string'},
'cfduid': {'type': 'string'},
- 'category': {
- 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]
- },
+ 'category': one_or_more(
+ {'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]}
+ ),
},
'required': ['rss_key', 'uid', 'passkey', 'cfduid'],
'additionalProperties': False,
@@ -125,7 +126,7 @@
url = 'https://www.torrentday.com/t'
params['q'] = normalize_unicode(search_string).replace(':', '')
cookies = {
- 'uid': config['uid'],
+ 'uid': str(config['uid']),
'pass': config['passkey'],
'__cfduid': config['cfduid'],
}
| {"golden_diff": "diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py\n--- a/flexget/components/sites/sites/torrentday.py\n+++ b/flexget/components/sites/sites/torrentday.py\n@@ -6,6 +6,7 @@\n from flexget import plugin\n from flexget.components.sites.urlrewriting import UrlRewritingError\n from flexget.components.sites.utils import normalize_unicode, torrent_availability\n+from flexget.config_schema import one_or_more\n from flexget.entry import Entry\n from flexget.event import event\n from flexget.plugin import PluginError\n@@ -69,12 +70,12 @@\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n- 'uid': {'type': 'string'},\n+ 'uid': {'oneOf': [{'type': 'integer'}, {'type': 'string'}]},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n- 'category': {\n- 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\n- },\n+ 'category': one_or_more(\n+ {'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]}\n+ ),\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False,\n@@ -125,7 +126,7 @@\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = {\n- 'uid': config['uid'],\n+ 'uid': str(config['uid']),\n 'pass': config['passkey'],\n '__cfduid': config['cfduid'],\n }\n", "issue": "torrentday pluging winth uid secret gets error expecting string\n### Expected behaviour:\r\n\r\nSetting uid from secrets file should work and update config\r\n\r\n### Actual behaviour:\r\n\r\nWhen setting the uid from a secret file, since uid is a number, it's converted to integer on the config, this makes a error in the config compilation of:\r\n\r\n/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\r\n\r\n### Steps to reproduce:\r\n- Step 1: Add config and run flexget check\r\n\r\n#### Config:\r\n```yaml\r\n TV_TorrentDay:\r\n discover:\r\n what:\r\n - next_series_episodes: yes\r\n from:\r\n - torrentday:\r\n uid: \"{? torrentday.uid ?}\"\r\n passkey: '{? torrentday.passkey ?}'\r\n cfduid: '{? torrentday.cfduid ?}'\r\n rss_key: '{? torrentday.rss_key ?}'\r\n category: 'tvBRD'\r\n```\r\nsecrets.yaml\r\n```yaml\r\ntorrentday:\r\n uid: \"1234567\"\r\n passkey: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n cfduid: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n rss_key: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n```\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n```\r\n2021-02-16 01:29:54 CRITICAL manager [/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\r\n2021-02-16 01:29:54 CRITICAL manager Failed to load config file: Did not pass schema validation.\r\n\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.1.102\r\n- Python version: 3.7.3\r\n- Installation method: venv\r\n- Using daemon (yes/no): yes\r\n- OS and version: debian 10\n", "before_files": [{"content": "import re\n\nfrom loguru import logger\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.components.sites.urlrewriting import UrlRewritingError\nfrom flexget.components.sites.utils import normalize_unicode, torrent_availability\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugin import PluginError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.tools import parse_filesize\n\nlogger = logger.bind(name='torrentday')\n\nCATEGORIES = {\n 'all': 0,\n # Movies\n 'mov4k': 96,\n 'mov480p': 25,\n 'movHD': 11,\n 'movBD': 5,\n 'movDVD': 3,\n 'movMP4': 21,\n 'movNonEnglish': 22,\n 'movPACKS': 13,\n 'movSDx264': 44,\n 'movX265': 48,\n 'movXVID': 1,\n # TV\n 'tv480p': 24,\n 'tvBRD': 32,\n 'tvDVD': 31,\n 'tvDVDrip': 33,\n 'tvMOBILE': 46,\n 'tvNonEnglish': 82,\n 'tvPACKS': 14,\n 'tvSDx264': 26,\n 'tvHDx264': 7,\n 'tvX265': 34,\n 'tvXVID': 2,\n}\n\n\nclass UrlRewriteTorrentday:\n \"\"\"\n Torrentday urlrewriter and search plugin.\n\n torrentday:\n uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\n passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\n cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\n rss_key: xxxxxxxxx (required) get this from your profile page\n category: xxxxxxxx\n\n Category can be one of\n ID from browsing site OR 'name'\n movies:\n mov4k, mov480p, movHD, movBD, movDVD,\n movMP4, movNonEnglish, movPACKS,\n movSDx264, movX265, movXVID\n tv:\n tv480p, tvBRD, tvDVD, tvDVDrip,\n tvMOBILE, tvNonEnglish, tvPACKS,\n tvSDx264, tvHDx264, tvX265, tvXVID\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'type': 'string'},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n 'category': {\n 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\n },\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False,\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.find('.torrent'):\n return False\n if url.startswith('https://www.torrentday.com'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n logger.error('Didn\\'t actually get a URL...')\n else:\n logger.debug('Got the URL: {}', entry['url'])\n if entry['url'].startswith('https://www.torrentday.com/browse'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError('No search results found')\n entry['url'] = results[0]['url']\n\n @plugin.internet(logger)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from torrentday.\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n params = {'cata': 'yes', 'clear-new': 1}\n params.update({str(c): 1 for c in categories})\n\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = {\n 'uid': config['uid'],\n 'pass': config['passkey'],\n '__cfduid': config['cfduid'],\n }\n\n try:\n page = requests.get(url, params=params, cookies=cookies).content\n except RequestException as e:\n raise PluginError('Could not connect to torrentday: {}'.format(e))\n\n # the following should avoid table being None due to a malformed\n # html in td search results\n soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling\n table = soup.find('table', {'id': 'torrentTable'})\n if table is None:\n raise PluginError(\n 'Search returned by torrentday appears to be empty or malformed.'\n )\n\n # the first row is the header so skip it\n for tr in table.find_all('tr')[1:]:\n entry = Entry()\n # find the torrent names\n td = tr.find('td', {'class': 'torrentNameInfo'})\n if not td:\n logger.warning('Could not find entry torrentNameInfo for {}.', search_string)\n continue\n title = td.find('a')\n if not title:\n logger.warning('Could not determine title for {}.', search_string)\n continue\n entry['title'] = title.contents[0]\n logger.debug('title: {}', title.contents[0])\n\n # find download link\n torrent_url = tr.find('td', {'class': 'ac'})\n if not torrent_url:\n logger.warning('Could not determine download link for {}.', search_string)\n continue\n torrent_url = torrent_url.find('a').get('href')\n\n # construct download URL\n torrent_url = (\n 'https://www.torrentday.com/'\n + torrent_url\n + '?torrent_pass='\n + config['rss_key']\n )\n logger.debug('RSS-ified download link: {}', torrent_url)\n entry['url'] = torrent_url\n\n # us tr object for seeders/leechers\n seeders = tr.find('td', {'class': 'ac seedersInfo'})\n leechers = tr.find('td', {'class': 'ac leechersInfo'})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['torrent_availability'] = torrent_availability(\n entry['torrent_seeds'], entry['torrent_leeches']\n )\n\n # use tr object for size\n size = tr.find('td', text=re.compile(r'([\\.\\d]+) ([TGMKk]?)B')).contents[0]\n size = re.search(r'([\\.\\d]+) ([TGMKk]?)B', str(size))\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(\n UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2\n )\n", "path": "flexget/components/sites/sites/torrentday.py"}], "after_files": [{"content": "import re\n\nfrom loguru import logger\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.components.sites.urlrewriting import UrlRewritingError\nfrom flexget.components.sites.utils import normalize_unicode, torrent_availability\nfrom flexget.config_schema import one_or_more\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugin import PluginError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.tools import parse_filesize\n\nlogger = logger.bind(name='torrentday')\n\nCATEGORIES = {\n 'all': 0,\n # Movies\n 'mov480p': 25,\n 'movHD': 11,\n 'movBD': 5,\n 'movDVD': 3,\n 'movMP4': 21,\n 'movNonEnglish': 22,\n 'movPACKS': 13,\n 'movSDx264': 44,\n 'movX265': 48,\n 'movXVID': 1,\n # TV\n 'tv480p': 24,\n 'tvBRD': 32,\n 'tvDVD': 31,\n 'tvDVDrip': 33,\n 'tvMOBILE': 46,\n 'tvPACKS': 14,\n 'tvSDx264': 26,\n 'tvHDx264': 7,\n 'tvX265': 34,\n 'tvXVID': 2,\n}\n\n\nclass UrlRewriteTorrentday:\n \"\"\"\n Torrentday urlrewriter and search plugin.\n\n torrentday:\n uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\n passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\n cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\n rss_key: xxxxxxxxx (required) get this from your profile page\n category: xxxxxxxx\n\n Category can be one of \n ID from browsing site OR 'name'\n movies:\n mov480p, movHD, movBD, movDVD,\n movMP4, movNonEnglish, movPACKS,\n movSDx264, movX265, movXVID\n tv:\n tv480p, tvBRD, tvDVD, tvDVDrip,\n tvMOBILE, tvPACKS, tvSDx264, \n tvHDx264, tvX265, tvXVID\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'oneOf': [{'type': 'integer'}, {'type': 'string'}]},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n 'category': one_or_more(\n {'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]}\n ),\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False,\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.find('.torrent'):\n return False\n if url.startswith('https://www.torrentday.com'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n logger.error('Didn\\'t actually get a URL...')\n else:\n logger.debug('Got the URL: {}', entry['url'])\n if entry['url'].startswith('https://www.torrentday.com/browse'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError('No search results found')\n entry['url'] = results[0]['url']\n\n @plugin.internet(logger)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from torrentday.\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n params = {\n 'cata': 'yes',\n 'c{}'.format(','.join(str(c) for c in categories)): 1,\n 'clear-new': 1,\n }\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = {\n 'uid': str(config['uid']),\n 'pass': config['passkey'],\n '__cfduid': config['cfduid'],\n }\n\n try:\n page = requests.get(url, params=params, cookies=cookies).content\n except RequestException as e:\n raise PluginError('Could not connect to torrentday: {}'.format(e))\n\n # the following should avoid table being None due to a malformed\n # html in td search results\n soup = get_soup(page).contents[1].contents[1].next.next.nextSibling\n table = soup.find('table', {'id': 'torrentTable'})\n if table is None:\n raise PluginError(\n 'Search returned by torrentday appears to be empty or malformed.'\n )\n\n # the first row is the header so skip it\n for tr in table.find_all('tr')[1:]:\n entry = Entry()\n # find the torrent names\n td = tr.find('td', {'class': 'torrentNameInfo'})\n if not td:\n logger.warning('Could not find entry torrentNameInfo for {}.', search_string)\n continue\n title = td.find('a')\n if not title:\n logger.warning('Could not determine title for {}.', search_string)\n continue\n entry['title'] = title.contents[0]\n logger.debug('title: {}', title.contents[0])\n\n # find download link\n torrent_url = tr.find('td', {'class': 'ac'})\n if not torrent_url:\n logger.warning('Could not determine download link for {}.', search_string)\n continue\n torrent_url = torrent_url.find('a').get('href')\n\n # construct download URL\n torrent_url = (\n 'https://www.torrentday.com/'\n + torrent_url\n + '?torrent_pass='\n + config['rss_key']\n )\n logger.debug('RSS-ified download link: {}', torrent_url)\n entry['url'] = torrent_url\n\n # us tr object for seeders/leechers\n seeders = tr.find('td', {'class': 'ac seedersInfo'})\n leechers = tr.find('td', {'class': 'ac leechersInfo'})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['torrent_availability'] = torrent_availability(\n entry['torrent_seeds'], entry['torrent_leeches']\n )\n\n # use tr object for size\n size = tr.find('td', text=re.compile(r'([\\.\\d]+) ([TGMKk]?)B')).contents[0]\n size = re.search(r'([\\.\\d]+) ([TGMKk]?)B', str(size))\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(\n UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2\n )\n", "path": "flexget/components/sites/sites/torrentday.py"}]} | 2,987 | 413 |
gh_patches_debug_20437 | rasdani/github-patches | git_diff | graspologic-org__graspologic-488 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove * imports in subgraph, utils
the` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspy/utils/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 from .utils import *
5 from .ptr import *
6
```
Path: `graspy/subgraph/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 from .sg import *
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py
--- a/graspy/subgraph/__init__.py
+++ b/graspy/subgraph/__init__.py
@@ -1,4 +1,7 @@
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
-from .sg import *
+
+from .sg import SignalSubgraph
+
+__all__ = [SignalSubgraph]
diff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py
--- a/graspy/utils/__init__.py
+++ b/graspy/utils/__init__.py
@@ -1,5 +1,42 @@
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
-from .utils import *
-from .ptr import *
+from .utils import (
+ import_graph,
+ import_edgelist,
+ is_symmetric,
+ is_loopless,
+ is_unweighted,
+ is_almost_symmetric,
+ symmetrize,
+ remove_loops,
+ to_laplace,
+ is_fully_connected,
+ get_lcc,
+ get_multigraph_union_lcc,
+ get_multigraph_intersect_lcc,
+ augment_diagonal,
+ binarize,
+ cartprod,
+)
+from .ptr import pass_to_ranks
+
+__all__ = [
+ import_graph,
+ import_edgelist,
+ is_symmetric,
+ is_loopless,
+ is_unweighted,
+ is_almost_symmetric,
+ symmetrize,
+ remove_loops,
+ to_laplace,
+ is_fully_connected,
+ get_lcc,
+ get_multigraph_union_lcc,
+ get_multigraph_intersect_lcc,
+ augment_diagonal,
+ binarize,
+ cartprod,
+ pass_to_ranks,
+]
| {"golden_diff": "diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py\n--- a/graspy/subgraph/__init__.py\n+++ b/graspy/subgraph/__init__.py\n@@ -1,4 +1,7 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .sg import *\n+\n+from .sg import SignalSubgraph\n+\n+__all__ = [SignalSubgraph]\ndiff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py\n--- a/graspy/utils/__init__.py\n+++ b/graspy/utils/__init__.py\n@@ -1,5 +1,42 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .utils import *\n-from .ptr import *\n+from .utils import (\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+)\n+from .ptr import pass_to_ranks\n+\n+__all__ = [\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+ pass_to_ranks,\n+]\n", "issue": "Remove * imports in subgraph, utils\nthe` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .utils import *\nfrom .ptr import *\n", "path": "graspy/utils/__init__.py"}, {"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .sg import *\n", "path": "graspy/subgraph/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .utils import (\n import_graph,\n import_edgelist,\n is_symmetric,\n is_loopless,\n is_unweighted,\n is_almost_symmetric,\n symmetrize,\n remove_loops,\n to_laplace,\n is_fully_connected,\n get_lcc,\n get_multigraph_union_lcc,\n get_multigraph_intersect_lcc,\n augment_diagonal,\n binarize,\n cartprod,\n)\nfrom .ptr import pass_to_ranks\n\n__all__ = [\n import_graph,\n import_edgelist,\n is_symmetric,\n is_loopless,\n is_unweighted,\n is_almost_symmetric,\n symmetrize,\n remove_loops,\n to_laplace,\n is_fully_connected,\n get_lcc,\n get_multigraph_union_lcc,\n get_multigraph_intersect_lcc,\n augment_diagonal,\n binarize,\n cartprod,\n pass_to_ranks,\n]\n", "path": "graspy/utils/__init__.py"}, {"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\n\nfrom .sg import SignalSubgraph\n\n__all__ = [SignalSubgraph]\n", "path": "graspy/subgraph/__init__.py"}]} | 382 | 429 |
gh_patches_debug_27136 | rasdani/github-patches | git_diff | getmoto__moto-923 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cloudformation doesn`t work with yaml templates
When i try to use moto to mock a call to `create_stack` the following happens:
Traceback:
```
File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py", line 107, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py", line 167, in _dispatch
return self.call_action()
File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py", line 183, in call_action
response = method()
File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/responses.py", line 51, in create_stack
role_arn=role_arn,
File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py", line 126, in create_stack
role_arn=role_arn,
File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py", line 18, in __init__
self.template_dict = json.loads(self.template)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py", line 354, in loads
return _default_decoder.decode(s)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
```
#### How to reproduce
```python
# coding: utf-8
import unittest
from boto.cloudformation import connect_to_region
from moto import mock_cloudformation
class TestCase(unittest.TestCase):
@mock_cloudformation
def test_cloudformation_create_stack(self):
connection = connect_to_region('sa-east-1')
with open('ec2.yml') as fp:
template = fp.read()
connection.create_stack('test-stack', template_body=template)
if __name__ == '__main__':
unittest.main()
```
Template: ec2.yaml
```yaml
Resources:
BastionInstance:
Type: "AWS::EC2::Instance"
Properties:
AvailabilityZone: sa-east-1a
DisableApiTermination: false
ImageId: ami-37cfad5b
InstanceType: t2.micro
```
Moto version: 0.4.31
Boto Version: 2.46.1
Python Version: Python 3.6.0rc1
System: Darwin Iurys-MacBook-Pro.local 15.5.0 Darwin Kernel Version 15.5.0: Tue Apr 19 18:36:36 PDT 2016; root:xnu-3248.50.21~8/RELEASE_X86_64 x86_64
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3 from setuptools import setup, find_packages
4
5 install_requires = [
6 "Jinja2>=2.8",
7 "boto>=2.36.0",
8 "cookies",
9 "requests>=2.0",
10 "xmltodict",
11 "dicttoxml",
12 "six",
13 "werkzeug",
14 "pytz",
15 "python-dateutil",
16 ]
17
18 extras_require = {
19 'server': ['flask'],
20 }
21
22 setup(
23 name='moto',
24 version='0.4.31',
25 description='A library that allows your python tests to easily'
26 ' mock out the boto library',
27 author='Steve Pulec',
28 author_email='[email protected]',
29 url='https://github.com/spulec/moto',
30 entry_points={
31 'console_scripts': [
32 'moto_server = moto.server:main',
33 ],
34 },
35 packages=find_packages(exclude=("tests", "tests.*")),
36 install_requires=install_requires,
37 extras_require=extras_require,
38 license="Apache",
39 test_suite="tests",
40 classifiers=[
41 "Programming Language :: Python :: 2",
42 "Programming Language :: Python :: 2.7",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3.3",
45 "License :: OSI Approved :: Apache Software License",
46 "Topic :: Software Development :: Testing",
47 ],
48 )
49
```
Path: `moto/cloudformation/models.py`
Content:
```
1 from __future__ import unicode_literals
2 from datetime import datetime
3 import json
4 import uuid
5
6 import boto.cloudformation
7 from moto.core import BaseBackend, BaseModel
8
9 from .parsing import ResourceMap, OutputMap
10 from .utils import generate_stack_id
11 from .exceptions import ValidationError
12
13
14 class FakeStack(BaseModel):
15
16 def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):
17 self.stack_id = stack_id
18 self.name = name
19 self.template = template
20 self.template_dict = json.loads(self.template)
21 self.parameters = parameters
22 self.region_name = region_name
23 self.notification_arns = notification_arns if notification_arns else []
24 self.role_arn = role_arn
25 self.tags = tags if tags else {}
26 self.events = []
27 self._add_stack_event("CREATE_IN_PROGRESS",
28 resource_status_reason="User Initiated")
29
30 self.description = self.template_dict.get('Description')
31 self.resource_map = self._create_resource_map()
32 self.output_map = self._create_output_map()
33 self._add_stack_event("CREATE_COMPLETE")
34 self.status = 'CREATE_COMPLETE'
35
36 def _create_resource_map(self):
37 resource_map = ResourceMap(
38 self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict)
39 resource_map.create()
40 return resource_map
41
42 def _create_output_map(self):
43 output_map = OutputMap(self.resource_map, self.template_dict)
44 output_map.create()
45 return output_map
46
47 def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):
48 self.events.append(FakeEvent(
49 stack_id=self.stack_id,
50 stack_name=self.name,
51 logical_resource_id=self.name,
52 physical_resource_id=self.stack_id,
53 resource_type="AWS::CloudFormation::Stack",
54 resource_status=resource_status,
55 resource_status_reason=resource_status_reason,
56 resource_properties=resource_properties,
57 ))
58
59 def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):
60 # not used yet... feel free to help yourself
61 resource = self.resource_map[logical_resource_id]
62 self.events.append(FakeEvent(
63 stack_id=self.stack_id,
64 stack_name=self.name,
65 logical_resource_id=logical_resource_id,
66 physical_resource_id=resource.physical_resource_id,
67 resource_type=resource.type,
68 resource_status=resource_status,
69 resource_status_reason=resource_status_reason,
70 resource_properties=resource_properties,
71 ))
72
73 @property
74 def stack_parameters(self):
75 return self.resource_map.resolved_parameters
76
77 @property
78 def stack_resources(self):
79 return self.resource_map.values()
80
81 @property
82 def stack_outputs(self):
83 return self.output_map.values()
84
85 def update(self, template, role_arn=None, parameters=None, tags=None):
86 self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated")
87 self.template = template
88 self.resource_map.update(json.loads(template), parameters)
89 self.output_map = self._create_output_map()
90 self._add_stack_event("UPDATE_COMPLETE")
91 self.status = "UPDATE_COMPLETE"
92 self.role_arn = role_arn
93 # only overwrite tags if passed
94 if tags is not None:
95 self.tags = tags
96 # TODO: update tags in the resource map
97
98 def delete(self):
99 self._add_stack_event("DELETE_IN_PROGRESS",
100 resource_status_reason="User Initiated")
101 self.resource_map.delete()
102 self._add_stack_event("DELETE_COMPLETE")
103 self.status = "DELETE_COMPLETE"
104
105
106 class FakeEvent(BaseModel):
107
108 def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):
109 self.stack_id = stack_id
110 self.stack_name = stack_name
111 self.logical_resource_id = logical_resource_id
112 self.physical_resource_id = physical_resource_id
113 self.resource_type = resource_type
114 self.resource_status = resource_status
115 self.resource_status_reason = resource_status_reason
116 self.resource_properties = resource_properties
117 self.timestamp = datetime.utcnow()
118 self.event_id = uuid.uuid4()
119
120
121 class CloudFormationBackend(BaseBackend):
122
123 def __init__(self):
124 self.stacks = {}
125 self.deleted_stacks = {}
126
127 def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):
128 stack_id = generate_stack_id(name)
129 new_stack = FakeStack(
130 stack_id=stack_id,
131 name=name,
132 template=template,
133 parameters=parameters,
134 region_name=region_name,
135 notification_arns=notification_arns,
136 tags=tags,
137 role_arn=role_arn,
138 )
139 self.stacks[stack_id] = new_stack
140 return new_stack
141
142 def describe_stacks(self, name_or_stack_id):
143 stacks = self.stacks.values()
144 if name_or_stack_id:
145 for stack in stacks:
146 if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:
147 return [stack]
148 if self.deleted_stacks:
149 deleted_stacks = self.deleted_stacks.values()
150 for stack in deleted_stacks:
151 if stack.stack_id == name_or_stack_id:
152 return [stack]
153 raise ValidationError(name_or_stack_id)
154 else:
155 return stacks
156
157 def list_stacks(self):
158 return self.stacks.values()
159
160 def get_stack(self, name_or_stack_id):
161 all_stacks = dict(self.deleted_stacks, **self.stacks)
162 if name_or_stack_id in all_stacks:
163 # Lookup by stack id - deleted stacks incldued
164 return all_stacks[name_or_stack_id]
165 else:
166 # Lookup by stack name - undeleted stacks only
167 for stack in self.stacks.values():
168 if stack.name == name_or_stack_id:
169 return stack
170
171 def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):
172 stack = self.get_stack(name)
173 stack.update(template, role_arn, parameters=parameters, tags=tags)
174 return stack
175
176 def list_stack_resources(self, stack_name_or_id):
177 stack = self.get_stack(stack_name_or_id)
178 return stack.stack_resources
179
180 def delete_stack(self, name_or_stack_id):
181 if name_or_stack_id in self.stacks:
182 # Delete by stack id
183 stack = self.stacks.pop(name_or_stack_id, None)
184 stack.delete()
185 self.deleted_stacks[stack.stack_id] = stack
186 return self.stacks.pop(name_or_stack_id, None)
187 else:
188 # Delete by stack name
189 for stack in list(self.stacks.values()):
190 if stack.name == name_or_stack_id:
191 self.delete_stack(stack.stack_id)
192
193
194 cloudformation_backends = {}
195 for region in boto.cloudformation.regions():
196 cloudformation_backends[region.name] = CloudFormationBackend()
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py
--- a/moto/cloudformation/models.py
+++ b/moto/cloudformation/models.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
from datetime import datetime
import json
+import yaml
import uuid
import boto.cloudformation
@@ -17,7 +18,7 @@
self.stack_id = stack_id
self.name = name
self.template = template
- self.template_dict = json.loads(self.template)
+ self._parse_template()
self.parameters = parameters
self.region_name = region_name
self.notification_arns = notification_arns if notification_arns else []
@@ -70,6 +71,12 @@
resource_properties=resource_properties,
))
+ def _parse_template(self):
+ try:
+ self.template_dict = yaml.load(self.template)
+ except yaml.parser.ParserError:
+ self.template_dict = json.loads(self.template)
+
@property
def stack_parameters(self):
return self.resource_map.resolved_parameters
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,6 +11,7 @@
"dicttoxml",
"six",
"werkzeug",
+ "pyaml",
"pytz",
"python-dateutil",
]
| {"golden_diff": "diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py\n--- a/moto/cloudformation/models.py\n+++ b/moto/cloudformation/models.py\n@@ -1,6 +1,7 @@\n from __future__ import unicode_literals\n from datetime import datetime\n import json\n+import yaml\n import uuid\n \n import boto.cloudformation\n@@ -17,7 +18,7 @@\n self.stack_id = stack_id\n self.name = name\n self.template = template\n- self.template_dict = json.loads(self.template)\n+ self._parse_template()\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n@@ -70,6 +71,12 @@\n resource_properties=resource_properties,\n ))\n \n+ def _parse_template(self):\n+ try:\n+ self.template_dict = yaml.load(self.template)\n+ except yaml.parser.ParserError:\n+ self.template_dict = json.loads(self.template)\n+\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,6 +11,7 @@\n \"dicttoxml\",\n \"six\",\n \"werkzeug\",\n+ \"pyaml\",\n \"pytz\",\n \"python-dateutil\",\n ]\n", "issue": "Cloudformation doesn`t work with yaml templates\nWhen i try to use moto to mock a call to `create_stack` the following happens:\r\n\r\nTraceback:\r\n```\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 107, in dispatch\r\n return cls()._dispatch(*args, **kwargs)\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 167, in _dispatch\r\n return self.call_action()\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 183, in call_action\r\n response = method()\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/responses.py\", line 51, in create_stack\r\n role_arn=role_arn,\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\", line 126, in create_stack\r\n role_arn=role_arn,\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\", line 18, in __init__\r\n self.template_dict = json.loads(self.template)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py\", line 354, in loads\r\n return _default_decoder.decode(s)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\", line 339, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\", line 357, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\r\n\r\n#### How to reproduce\r\n\r\n```python\r\n# coding: utf-8\r\n\r\nimport unittest\r\nfrom boto.cloudformation import connect_to_region\r\nfrom moto import mock_cloudformation\r\n\r\n\r\nclass TestCase(unittest.TestCase):\r\n\r\n @mock_cloudformation\r\n def test_cloudformation_create_stack(self):\r\n\t connection = connect_to_region('sa-east-1')\r\n\t with open('ec2.yml') as fp:\r\n template = fp.read()\r\n\r\n connection.create_stack('test-stack', template_body=template)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n```\r\n\r\nTemplate: ec2.yaml\r\n```yaml\r\nResources:\r\n BastionInstance:\r\n Type: \"AWS::EC2::Instance\"\r\n Properties: \r\n AvailabilityZone: sa-east-1a\r\n DisableApiTermination: false\r\n ImageId: ami-37cfad5b\r\n InstanceType: t2.micro\r\n```\r\n\r\nMoto version: 0.4.31\r\nBoto Version: 2.46.1\r\nPython Version: Python 3.6.0rc1\r\nSystem: Darwin Iurys-MacBook-Pro.local 15.5.0 Darwin Kernel Version 15.5.0: Tue Apr 19 18:36:36 PDT 2016; root:xnu-3248.50.21~8/RELEASE_X86_64 x86_64\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2>=2.8\",\n \"boto>=2.36.0\",\n \"cookies\",\n \"requests>=2.0\",\n \"xmltodict\",\n \"dicttoxml\",\n \"six\",\n \"werkzeug\",\n \"pytz\",\n \"python-dateutil\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\nsetup(\n name='moto',\n version='0.4.31',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}, {"content": "from __future__ import unicode_literals\nfrom datetime import datetime\nimport json\nimport uuid\n\nimport boto.cloudformation\nfrom moto.core import BaseBackend, BaseModel\n\nfrom .parsing import ResourceMap, OutputMap\nfrom .utils import generate_stack_id\nfrom .exceptions import ValidationError\n\n\nclass FakeStack(BaseModel):\n\n def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n self.stack_id = stack_id\n self.name = name\n self.template = template\n self.template_dict = json.loads(self.template)\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n self.role_arn = role_arn\n self.tags = tags if tags else {}\n self.events = []\n self._add_stack_event(\"CREATE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n\n self.description = self.template_dict.get('Description')\n self.resource_map = self._create_resource_map()\n self.output_map = self._create_output_map()\n self._add_stack_event(\"CREATE_COMPLETE\")\n self.status = 'CREATE_COMPLETE'\n\n def _create_resource_map(self):\n resource_map = ResourceMap(\n self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict)\n resource_map.create()\n return resource_map\n\n def _create_output_map(self):\n output_map = OutputMap(self.resource_map, self.template_dict)\n output_map.create()\n return output_map\n\n def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=self.name,\n physical_resource_id=self.stack_id,\n resource_type=\"AWS::CloudFormation::Stack\",\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\n # not used yet... feel free to help yourself\n resource = self.resource_map[logical_resource_id]\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=logical_resource_id,\n physical_resource_id=resource.physical_resource_id,\n resource_type=resource.type,\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\n\n @property\n def stack_resources(self):\n return self.resource_map.values()\n\n @property\n def stack_outputs(self):\n return self.output_map.values()\n\n def update(self, template, role_arn=None, parameters=None, tags=None):\n self._add_stack_event(\"UPDATE_IN_PROGRESS\", resource_status_reason=\"User Initiated\")\n self.template = template\n self.resource_map.update(json.loads(template), parameters)\n self.output_map = self._create_output_map()\n self._add_stack_event(\"UPDATE_COMPLETE\")\n self.status = \"UPDATE_COMPLETE\"\n self.role_arn = role_arn\n # only overwrite tags if passed\n if tags is not None:\n self.tags = tags\n # TODO: update tags in the resource map\n\n def delete(self):\n self._add_stack_event(\"DELETE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n self.resource_map.delete()\n self._add_stack_event(\"DELETE_COMPLETE\")\n self.status = \"DELETE_COMPLETE\"\n\n\nclass FakeEvent(BaseModel):\n\n def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\n self.stack_id = stack_id\n self.stack_name = stack_name\n self.logical_resource_id = logical_resource_id\n self.physical_resource_id = physical_resource_id\n self.resource_type = resource_type\n self.resource_status = resource_status\n self.resource_status_reason = resource_status_reason\n self.resource_properties = resource_properties\n self.timestamp = datetime.utcnow()\n self.event_id = uuid.uuid4()\n\n\nclass CloudFormationBackend(BaseBackend):\n\n def __init__(self):\n self.stacks = {}\n self.deleted_stacks = {}\n\n def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n stack_id = generate_stack_id(name)\n new_stack = FakeStack(\n stack_id=stack_id,\n name=name,\n template=template,\n parameters=parameters,\n region_name=region_name,\n notification_arns=notification_arns,\n tags=tags,\n role_arn=role_arn,\n )\n self.stacks[stack_id] = new_stack\n return new_stack\n\n def describe_stacks(self, name_or_stack_id):\n stacks = self.stacks.values()\n if name_or_stack_id:\n for stack in stacks:\n if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\n return [stack]\n if self.deleted_stacks:\n deleted_stacks = self.deleted_stacks.values()\n for stack in deleted_stacks:\n if stack.stack_id == name_or_stack_id:\n return [stack]\n raise ValidationError(name_or_stack_id)\n else:\n return stacks\n\n def list_stacks(self):\n return self.stacks.values()\n\n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n if name_or_stack_id in all_stacks:\n # Lookup by stack id - deleted stacks incldued\n return all_stacks[name_or_stack_id]\n else:\n # Lookup by stack name - undeleted stacks only\n for stack in self.stacks.values():\n if stack.name == name_or_stack_id:\n return stack\n\n def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\n stack = self.get_stack(name)\n stack.update(template, role_arn, parameters=parameters, tags=tags)\n return stack\n\n def list_stack_resources(self, stack_name_or_id):\n stack = self.get_stack(stack_name_or_id)\n return stack.stack_resources\n\n def delete_stack(self, name_or_stack_id):\n if name_or_stack_id in self.stacks:\n # Delete by stack id\n stack = self.stacks.pop(name_or_stack_id, None)\n stack.delete()\n self.deleted_stacks[stack.stack_id] = stack\n return self.stacks.pop(name_or_stack_id, None)\n else:\n # Delete by stack name\n for stack in list(self.stacks.values()):\n if stack.name == name_or_stack_id:\n self.delete_stack(stack.stack_id)\n\n\ncloudformation_backends = {}\nfor region in boto.cloudformation.regions():\n cloudformation_backends[region.name] = CloudFormationBackend()\n", "path": "moto/cloudformation/models.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2>=2.8\",\n \"boto>=2.36.0\",\n \"cookies\",\n \"requests>=2.0\",\n \"xmltodict\",\n \"dicttoxml\",\n \"six\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\nsetup(\n name='moto',\n version='0.4.31',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}, {"content": "from __future__ import unicode_literals\nfrom datetime import datetime\nimport json\nimport yaml\nimport uuid\n\nimport boto.cloudformation\nfrom moto.core import BaseBackend, BaseModel\n\nfrom .parsing import ResourceMap, OutputMap\nfrom .utils import generate_stack_id\nfrom .exceptions import ValidationError\n\n\nclass FakeStack(BaseModel):\n\n def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n self.stack_id = stack_id\n self.name = name\n self.template = template\n self._parse_template()\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n self.role_arn = role_arn\n self.tags = tags if tags else {}\n self.events = []\n self._add_stack_event(\"CREATE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n\n self.description = self.template_dict.get('Description')\n self.resource_map = self._create_resource_map()\n self.output_map = self._create_output_map()\n self._add_stack_event(\"CREATE_COMPLETE\")\n self.status = 'CREATE_COMPLETE'\n\n def _create_resource_map(self):\n resource_map = ResourceMap(\n self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict)\n resource_map.create()\n return resource_map\n\n def _create_output_map(self):\n output_map = OutputMap(self.resource_map, self.template_dict)\n output_map.create()\n return output_map\n\n def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=self.name,\n physical_resource_id=self.stack_id,\n resource_type=\"AWS::CloudFormation::Stack\",\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\n # not used yet... feel free to help yourself\n resource = self.resource_map[logical_resource_id]\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=logical_resource_id,\n physical_resource_id=resource.physical_resource_id,\n resource_type=resource.type,\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _parse_template(self):\n try:\n self.template_dict = yaml.load(self.template)\n except yaml.parser.ParserError:\n self.template_dict = json.loads(self.template)\n\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\n\n @property\n def stack_resources(self):\n return self.resource_map.values()\n\n @property\n def stack_outputs(self):\n return self.output_map.values()\n\n def update(self, template, role_arn=None, parameters=None, tags=None):\n self._add_stack_event(\"UPDATE_IN_PROGRESS\", resource_status_reason=\"User Initiated\")\n self.template = template\n self.resource_map.update(json.loads(template), parameters)\n self.output_map = self._create_output_map()\n self._add_stack_event(\"UPDATE_COMPLETE\")\n self.status = \"UPDATE_COMPLETE\"\n self.role_arn = role_arn\n # only overwrite tags if passed\n if tags is not None:\n self.tags = tags\n # TODO: update tags in the resource map\n\n def delete(self):\n self._add_stack_event(\"DELETE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n self.resource_map.delete()\n self._add_stack_event(\"DELETE_COMPLETE\")\n self.status = \"DELETE_COMPLETE\"\n\n\nclass FakeEvent(BaseModel):\n\n def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\n self.stack_id = stack_id\n self.stack_name = stack_name\n self.logical_resource_id = logical_resource_id\n self.physical_resource_id = physical_resource_id\n self.resource_type = resource_type\n self.resource_status = resource_status\n self.resource_status_reason = resource_status_reason\n self.resource_properties = resource_properties\n self.timestamp = datetime.utcnow()\n self.event_id = uuid.uuid4()\n\n\nclass CloudFormationBackend(BaseBackend):\n\n def __init__(self):\n self.stacks = {}\n self.deleted_stacks = {}\n\n def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n stack_id = generate_stack_id(name)\n new_stack = FakeStack(\n stack_id=stack_id,\n name=name,\n template=template,\n parameters=parameters,\n region_name=region_name,\n notification_arns=notification_arns,\n tags=tags,\n role_arn=role_arn,\n )\n self.stacks[stack_id] = new_stack\n return new_stack\n\n def describe_stacks(self, name_or_stack_id):\n stacks = self.stacks.values()\n if name_or_stack_id:\n for stack in stacks:\n if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\n return [stack]\n if self.deleted_stacks:\n deleted_stacks = self.deleted_stacks.values()\n for stack in deleted_stacks:\n if stack.stack_id == name_or_stack_id:\n return [stack]\n raise ValidationError(name_or_stack_id)\n else:\n return stacks\n\n def list_stacks(self):\n return self.stacks.values()\n\n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n if name_or_stack_id in all_stacks:\n # Lookup by stack id - deleted stacks incldued\n return all_stacks[name_or_stack_id]\n else:\n # Lookup by stack name - undeleted stacks only\n for stack in self.stacks.values():\n if stack.name == name_or_stack_id:\n return stack\n\n def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\n stack = self.get_stack(name)\n stack.update(template, role_arn, parameters=parameters, tags=tags)\n return stack\n\n def list_stack_resources(self, stack_name_or_id):\n stack = self.get_stack(stack_name_or_id)\n return stack.stack_resources\n\n def delete_stack(self, name_or_stack_id):\n if name_or_stack_id in self.stacks:\n # Delete by stack id\n stack = self.stacks.pop(name_or_stack_id, None)\n stack.delete()\n self.deleted_stacks[stack.stack_id] = stack\n return self.stacks.pop(name_or_stack_id, None)\n else:\n # Delete by stack name\n for stack in list(self.stacks.values()):\n if stack.name == name_or_stack_id:\n self.delete_stack(stack.stack_id)\n\n\ncloudformation_backends = {}\nfor region in boto.cloudformation.regions():\n cloudformation_backends[region.name] = CloudFormationBackend()\n", "path": "moto/cloudformation/models.py"}]} | 3,486 | 306 |
gh_patches_debug_19187 | rasdani/github-patches | git_diff | mozmeao__snippets-service-769 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a button to copy to clipboard the preview link on click
Preview link for ASR Snippets cannot be directly clicked and instead must be copied and pasted to the URL bar. Create a `Copy` button next to the link to make the procedure easier.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/admin/adminmodels.py`
Content:
```
1 import re
2
3 from django.contrib import admin
4 from django.db.models import TextField, Q
5 from django.template.loader import get_template
6 from django.utils.safestring import mark_safe
7
8 from reversion.admin import VersionAdmin
9 from django_ace import AceWidget
10 from django_statsd.clients import statsd
11 from jinja2.meta import find_undeclared_variables
12 from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter
13
14 from snippets.base import forms, models
15 from snippets.base.models import JINJA_ENV
16 from snippets.base.admin.filters import ModifiedFilter, ReleaseFilter
17
18
19 MATCH_LOCALE_REGEX = re.compile('(\w+(?:-\w+)*)')
20 RESERVED_VARIABLES = ('_', 'snippet_id')
21
22
23 class ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):
24 list_display = ('description', 'is_exclusion', 'startpage_version', 'name',
25 'version', 'locale', 'appbuildid', 'build_target',
26 'channel', 'os_version', 'distribution',
27 'distribution_version', 'modified')
28 list_filter = ('name', 'version', 'os_version', 'appbuildid',
29 'build_target', 'channel', 'distribution', 'locale')
30 save_on_top = True
31 search_fields = ('description',)
32
33
34 class LogEntryAdmin(admin.ModelAdmin):
35 list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')
36 list_filter = ('user', 'content_type')
37
38
39 class SnippetTemplateVariableInline(admin.TabularInline):
40 model = models.SnippetTemplateVariable
41 formset = forms.SnippetTemplateVariableInlineFormset
42 max_num = 0
43 can_delete = False
44 readonly_fields = ('name',)
45 fields = ('name', 'type', 'order', 'description')
46
47
48 class SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):
49 save_on_top = True
50 list_display = ('name', 'priority', 'hidden')
51 list_filter = ('hidden', 'startpage')
52 inlines = (SnippetTemplateVariableInline,)
53 formfield_overrides = {
54 TextField: {'widget': AceWidget(mode='html', theme='github',
55 width='1200px', height='500px')},
56 }
57
58 class Media:
59 css = {
60 'all': ('css/admin.css',)
61 }
62
63 def save_related(self, request, form, formsets, change):
64 """
65 After saving the related objects, remove and add
66 SnippetTemplateVariables depending on how the template code changed.
67 """
68 super(SnippetTemplateAdmin, self).save_related(request, form, formsets,
69 change)
70
71 # Parse the template code and find any undefined variables.
72 ast = JINJA_ENV.env.parse(form.instance.code)
73 new_vars = find_undeclared_variables(ast)
74 var_manager = form.instance.variable_set
75
76 # Filter out reserved variable names.
77 new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]
78
79 # Delete variables not in the new set.
80 var_manager.filter(~Q(name__in=new_vars)).delete()
81
82 # Create variables that don't exist.
83 for i, variable in enumerate(new_vars, start=1):
84 obj, _ = models.SnippetTemplateVariable.objects.get_or_create(
85 template=form.instance, name=variable)
86 if obj.order == 0:
87 obj.order = i * 10
88 obj.save()
89
90
91 class UploadedFileAdmin(admin.ModelAdmin):
92 readonly_fields = ('url', 'preview', 'snippets')
93 list_display = ('name', 'url', 'preview', 'modified')
94 prepopulated_fields = {'name': ('file',)}
95 form = forms.UploadedFileAdminForm
96
97 def preview(self, obj):
98 template = get_template('base/uploadedfile_preview.jinja')
99 return mark_safe(template.render({'file': obj}))
100
101 def snippets(self, obj):
102 """Snippets using this file."""
103 template = get_template('base/uploadedfile_snippets.jinja')
104 return mark_safe(template.render({'snippets': obj.snippets}))
105
106
107 class AddonAdmin(admin.ModelAdmin):
108 list_display = ('name', 'guid')
109
110
111 class ASRSnippetAdmin(admin.ModelAdmin):
112 form = forms.ASRSnippetAdminForm
113
114 list_display_links = (
115 'id',
116 'name',
117 )
118 list_display = (
119 'id',
120 'name',
121 'status',
122 'modified',
123 )
124 list_filter = (
125 ModifiedFilter,
126 'status',
127 ReleaseFilter,
128 ('template', RelatedDropdownFilter),
129 )
130 search_fields = (
131 'name',
132 )
133 autocomplete_fields = (
134 'campaign',
135 'target',
136 )
137 preserve_filters = True
138 readonly_fields = (
139 'created',
140 'modified',
141 'uuid',
142 'creator',
143 'preview_url',
144 )
145 filter_horizontal = ('locales',)
146 save_on_top = True
147 save_as = True
148 view_on_site = False
149
150 fieldsets = (
151 ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),
152 ('Content', {
153 'description': (
154 '''
155 <strong>Available deep links:</strong><br/>
156 <ol>
157 <li><code>special:accounts</code> to open Firefox Accounts</li>
158 <li><code>special:appMenu</code> to open the hamburger menu</li>
159 </ol><br/>
160 <strong>Automatically add Snippet ID:</strong><br/>
161 You can use <code>[[snippet_id]]</code> in any field and it
162 will be automatically replaced by Snippet ID when served to users.
163 <br/>
164 Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa
165 <br/>
166 '''
167 ),
168 'fields': ('template', 'data'),
169 }),
170 ('Publishing Options', {
171 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)
172 }),
173 ('Other Info', {
174 'fields': ('uuid', ('created', 'modified')),
175 'classes': ('collapse',)
176 }),
177 )
178
179 class Media:
180 css = {
181 'all': ('css/admin/ASRSnippetAdmin.css',)
182 }
183
184 def save_model(self, request, obj, form, change):
185 obj.creator = request.user
186 statsd.incr('save.asrsnippet')
187 super().save_model(request, obj, form, change)
188
189 def preview_url(self, obj):
190 return obj.get_preview_url()
191
192
193 class CampaignAdmin(admin.ModelAdmin):
194 readonly_fields = ('created', 'modified', 'creator',)
195 prepopulated_fields = {'slug': ('name',)}
196
197 fieldsets = (
198 ('ID', {'fields': ('name', 'slug')}),
199 ('Other Info', {
200 'fields': ('creator', ('created', 'modified')),
201 }),
202 )
203 search_fields = (
204 'name',
205 )
206
207 def save_model(self, request, obj, form, change):
208 obj.creator = request.user
209 statsd.incr('save.campaign')
210 super().save_model(request, obj, form, change)
211
212
213 class TargetAdmin(admin.ModelAdmin):
214 form = forms.TargetAdminForm
215 readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')
216 search_fields = (
217 'name',
218 )
219 fieldsets = (
220 ('ID', {'fields': ('name',)}),
221 ('Product channels', {
222 'description': 'What channels will this snippet be available in?',
223 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)
224 }),
225 ('Targeting', {
226 'fields': (
227 'filtr_is_default_browser',
228 'filtr_updates_enabled',
229 'filtr_updates_autodownload_enabled',
230 'filtr_profile_age_created',
231 'filtr_firefox_version',
232 'filtr_previous_session_end',
233 'filtr_uses_firefox_sync',
234 'filtr_country',
235 'filtr_is_developer',
236 'filtr_current_search_engine',
237 'filtr_browser_addon',
238 'filtr_total_bookmarks_count',
239 )
240 }),
241 ('Other Info', {
242 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),
243 }),
244 )
245
246 def save_model(self, request, obj, form, change):
247 obj.creator = request.user
248 statsd.incr('save.target')
249 super().save_model(request, obj, form, change)
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py
--- a/snippets/base/admin/adminmodels.py
+++ b/snippets/base/admin/adminmodels.py
@@ -180,6 +180,10 @@
css = {
'all': ('css/admin/ASRSnippetAdmin.css',)
}
+ js = (
+ 'js/admin/clipboard.min.js',
+ 'js/admin/copy_preview.js',
+ )
def save_model(self, request, obj, form, change):
obj.creator = request.user
@@ -187,7 +191,15 @@
super().save_model(request, obj, form, change)
def preview_url(self, obj):
- return obj.get_preview_url()
+ text = f'''
+ <span id="previewLinkUrl">{obj.get_preview_url()}</span>
+ <button id="copyPreviewLink" class="btn"
+ data-clipboard-target="#previewLinkUrl"
+ originalText="Copy to Clipboard" type="button">
+ Copy to Clipboard
+ </button>
+ '''
+ return mark_safe(text)
class CampaignAdmin(admin.ModelAdmin):
| {"golden_diff": "diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py\n--- a/snippets/base/admin/adminmodels.py\n+++ b/snippets/base/admin/adminmodels.py\n@@ -180,6 +180,10 @@\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n+ js = (\n+ 'js/admin/clipboard.min.js',\n+ 'js/admin/copy_preview.js',\n+ )\n \n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n@@ -187,7 +191,15 @@\n super().save_model(request, obj, form, change)\n \n def preview_url(self, obj):\n- return obj.get_preview_url()\n+ text = f'''\n+ <span id=\"previewLinkUrl\">{obj.get_preview_url()}</span>\n+ <button id=\"copyPreviewLink\" class=\"btn\"\n+ data-clipboard-target=\"#previewLinkUrl\"\n+ originalText=\"Copy to Clipboard\" type=\"button\">\n+ Copy to Clipboard\n+ </button>\n+ '''\n+ return mark_safe(text)\n \n \n class CampaignAdmin(admin.ModelAdmin):\n", "issue": "Create a button to copy to clipboard the preview link on click\nPreview link for ASR Snippets cannot be directly clicked and instead must be copied and pasted to the URL bar. Create a `Copy` button next to the link to make the procedure easier. \n", "before_files": [{"content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\n\n\nMATCH_LOCALE_REGEX = re.compile('(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n ModifiedFilter,\n 'status',\n ReleaseFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n )\n autocomplete_fields = (\n 'campaign',\n 'target',\n )\n preserve_filters = True\n readonly_fields = (\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n )\n filter_horizontal = ('locales',)\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa\n <br/>\n '''\n ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n return obj.get_preview_url()\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n search_fields = (\n 'name',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py"}], "after_files": [{"content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\n\n\nMATCH_LOCALE_REGEX = re.compile('(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n ModifiedFilter,\n 'status',\n ReleaseFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n )\n autocomplete_fields = (\n 'campaign',\n 'target',\n )\n preserve_filters = True\n readonly_fields = (\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n )\n filter_horizontal = ('locales',)\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa\n <br/>\n '''\n ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n js = (\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n text = f'''\n <span id=\"previewLinkUrl\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrl\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n search_fields = (\n 'name',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py"}]} | 2,811 | 260 |
gh_patches_debug_1928 | rasdani/github-patches | git_diff | goauthentik__authentik-3299 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Get username from mailcow source
**Is your feature request related to a problem? Please describe.**
I like to get a username from mailcow. With username the enrollment for new users is more simple.
**Describe the solution you'd like**
Set username to full_name provided by mailcow oauths source.
**Additional context**
For other sources the username is also set redundant to another attribute if there is no special source attribute:
azure_ad.py:
```
"username": info.get("displayName"),
"name": info.get("displayName"),
```
discord.py:
```
"username": info.get("username"),
"name": info.get("username"),
```
facebook.py:
```
"username": info.get("name"),
"name": info.get("name"),
```
reddit.py
```
"username": info.get("name"),
"name": info.get("name"),
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/types/mailcow.py`
Content:
```
1 """Mailcow OAuth Views"""
2 from typing import Any, Optional
3
4 from requests.exceptions import RequestException
5 from structlog.stdlib import get_logger
6
7 from authentik.sources.oauth.clients.oauth2 import OAuth2Client
8 from authentik.sources.oauth.types.manager import MANAGER, SourceType
9 from authentik.sources.oauth.views.callback import OAuthCallback
10 from authentik.sources.oauth.views.redirect import OAuthRedirect
11
12 LOGGER = get_logger()
13
14
15 class MailcowOAuthRedirect(OAuthRedirect):
16 """Mailcow OAuth2 Redirect"""
17
18 def get_additional_parameters(self, source): # pragma: no cover
19 return {
20 "scope": ["profile"],
21 }
22
23
24 class MailcowOAuth2Client(OAuth2Client):
25 """MailcowOAuth2Client, for some reason, mailcow does not like the default headers"""
26
27 def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:
28 "Fetch user profile information."
29 profile_url = self.source.type.profile_url or ""
30 if self.source.type.urls_customizable and self.source.profile_url:
31 profile_url = self.source.profile_url
32 try:
33 response = self.session.request(
34 "get",
35 f"{profile_url}?access_token={token['access_token']}",
36 )
37 response.raise_for_status()
38 except RequestException as exc:
39 LOGGER.warning("Unable to fetch user profile", exc=exc, body=response.text)
40 return None
41 else:
42 return response.json()
43
44
45 class MailcowOAuth2Callback(OAuthCallback):
46 """Mailcow OAuth2 Callback"""
47
48 client_class = MailcowOAuth2Client
49
50 def get_user_enroll_context(
51 self,
52 info: dict[str, Any],
53 ) -> dict[str, Any]:
54 return {
55 "email": info.get("email"),
56 "name": info.get("full_name"),
57 }
58
59
60 @MANAGER.type()
61 class MailcowType(SourceType):
62 """Mailcow Type definition"""
63
64 callback_view = MailcowOAuth2Callback
65 redirect_view = MailcowOAuthRedirect
66 name = "Mailcow"
67 slug = "mailcow"
68
69 urls_customizable = True
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py
--- a/authentik/sources/oauth/types/mailcow.py
+++ b/authentik/sources/oauth/types/mailcow.py
@@ -52,6 +52,7 @@
info: dict[str, Any],
) -> dict[str, Any]:
return {
+ "username": info.get("full_name"),
"email": info.get("email"),
"name": info.get("full_name"),
}
| {"golden_diff": "diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py\n--- a/authentik/sources/oauth/types/mailcow.py\n+++ b/authentik/sources/oauth/types/mailcow.py\n@@ -52,6 +52,7 @@\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n+ \"username\": info.get(\"full_name\"),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n", "issue": "Get username from mailcow source\n**Is your feature request related to a problem? Please describe.**\r\nI like to get a username from mailcow. With username the enrollment for new users is more simple.\r\n\r\n**Describe the solution you'd like**\r\nSet username to full_name provided by mailcow oauths source.\r\n\r\n**Additional context**\r\nFor other sources the username is also set redundant to another attribute if there is no special source attribute:\r\nazure_ad.py:\r\n```\r\n \"username\": info.get(\"displayName\"),\r\n \"name\": info.get(\"displayName\"),\r\n```\r\n\r\ndiscord.py:\r\n```\r\n \"username\": info.get(\"username\"),\r\n \"name\": info.get(\"username\"),\r\n```\r\n\r\nfacebook.py:\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\r\nreddit.py\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Mailcow OAuth Views\"\"\"\nfrom typing import Any, Optional\n\nfrom requests.exceptions import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import OAuth2Client\nfrom authentik.sources.oauth.types.manager import MANAGER, SourceType\nfrom authentik.sources.oauth.views.callback import OAuthCallback\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass MailcowOAuthRedirect(OAuthRedirect):\n \"\"\"Mailcow OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"profile\"],\n }\n\n\nclass MailcowOAuth2Client(OAuth2Client):\n \"\"\"MailcowOAuth2Client, for some reason, mailcow does not like the default headers\"\"\"\n\n def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:\n \"Fetch user profile information.\"\n profile_url = self.source.type.profile_url or \"\"\n if self.source.type.urls_customizable and self.source.profile_url:\n profile_url = self.source.profile_url\n try:\n response = self.session.request(\n \"get\",\n f\"{profile_url}?access_token={token['access_token']}\",\n )\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\"Unable to fetch user profile\", exc=exc, body=response.text)\n return None\n else:\n return response.json()\n\n\nclass MailcowOAuth2Callback(OAuthCallback):\n \"\"\"Mailcow OAuth2 Callback\"\"\"\n\n client_class = MailcowOAuth2Client\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n\n\[email protected]()\nclass MailcowType(SourceType):\n \"\"\"Mailcow Type definition\"\"\"\n\n callback_view = MailcowOAuth2Callback\n redirect_view = MailcowOAuthRedirect\n name = \"Mailcow\"\n slug = \"mailcow\"\n\n urls_customizable = True\n", "path": "authentik/sources/oauth/types/mailcow.py"}], "after_files": [{"content": "\"\"\"Mailcow OAuth Views\"\"\"\nfrom typing import Any, Optional\n\nfrom requests.exceptions import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import OAuth2Client\nfrom authentik.sources.oauth.types.manager import MANAGER, SourceType\nfrom authentik.sources.oauth.views.callback import OAuthCallback\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass MailcowOAuthRedirect(OAuthRedirect):\n \"\"\"Mailcow OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"profile\"],\n }\n\n\nclass MailcowOAuth2Client(OAuth2Client):\n \"\"\"MailcowOAuth2Client, for some reason, mailcow does not like the default headers\"\"\"\n\n def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:\n \"Fetch user profile information.\"\n profile_url = self.source.type.profile_url or \"\"\n if self.source.type.urls_customizable and self.source.profile_url:\n profile_url = self.source.profile_url\n try:\n response = self.session.request(\n \"get\",\n f\"{profile_url}?access_token={token['access_token']}\",\n )\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\"Unable to fetch user profile\", exc=exc, body=response.text)\n return None\n else:\n return response.json()\n\n\nclass MailcowOAuth2Callback(OAuthCallback):\n \"\"\"Mailcow OAuth2 Callback\"\"\"\n\n client_class = MailcowOAuth2Client\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n \"username\": info.get(\"full_name\"),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n\n\[email protected]()\nclass MailcowType(SourceType):\n \"\"\"Mailcow Type definition\"\"\"\n\n callback_view = MailcowOAuth2Callback\n redirect_view = MailcowOAuthRedirect\n name = \"Mailcow\"\n slug = \"mailcow\"\n\n urls_customizable = True\n", "path": "authentik/sources/oauth/types/mailcow.py"}]} | 1,039 | 111 |
gh_patches_debug_26693 | rasdani/github-patches | git_diff | benoitc__gunicorn-1581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--config doesn't work in GUNICORN_CMD_ARGS
Specifying `--config` in the `GUNICORN_CMD_ARGS` environment variable quietly fails as config file loading only happens when it is passed as an argument:
https://github.com/benoitc/gunicorn/blob/328e509260ae70de6c04c5ba885ee17960b3ced5/gunicorn/app/base.py#L137-L175
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/app/base.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5 from __future__ import print_function
6
7 import os
8 import sys
9 import traceback
10
11 from gunicorn._compat import execfile_
12 from gunicorn import util
13 from gunicorn.arbiter import Arbiter
14 from gunicorn.config import Config, get_default_config_file
15 from gunicorn import debug
16
17 class BaseApplication(object):
18 """
19 An application interface for configuring and loading
20 the various necessities for any given web framework.
21 """
22 def __init__(self, usage=None, prog=None):
23 self.usage = usage
24 self.cfg = None
25 self.callable = None
26 self.prog = prog
27 self.logger = None
28 self.do_load_config()
29
30 def do_load_config(self):
31 """
32 Loads the configuration
33 """
34 try:
35 self.load_default_config()
36 self.load_config()
37 except Exception as e:
38 print("\nError: %s" % str(e), file=sys.stderr)
39 sys.stderr.flush()
40 sys.exit(1)
41
42 def load_default_config(self):
43 # init configuration
44 self.cfg = Config(self.usage, prog=self.prog)
45
46 def init(self, parser, opts, args):
47 raise NotImplementedError
48
49 def load(self):
50 raise NotImplementedError
51
52 def load_config(self):
53 """
54 This method is used to load the configuration from one or several input(s).
55 Custom Command line, configuration file.
56 You have to override this method in your class.
57 """
58 raise NotImplementedError
59
60 def reload(self):
61 self.do_load_config()
62 if self.cfg.spew:
63 debug.spew()
64
65 def wsgi(self):
66 if self.callable is None:
67 self.callable = self.load()
68 return self.callable
69
70 def run(self):
71 try:
72 Arbiter(self).run()
73 except RuntimeError as e:
74 print("\nError: %s\n" % e, file=sys.stderr)
75 sys.stderr.flush()
76 sys.exit(1)
77
78 class Application(BaseApplication):
79
80 def get_config_from_filename(self, filename):
81
82 if not os.path.exists(filename):
83 raise RuntimeError("%r doesn't exist" % filename)
84
85 cfg = {
86 "__builtins__": __builtins__,
87 "__name__": "__config__",
88 "__file__": filename,
89 "__doc__": None,
90 "__package__": None
91 }
92 try:
93 execfile_(filename, cfg, cfg)
94 except Exception:
95 print("Failed to read config file: %s" % filename, file=sys.stderr)
96 traceback.print_exc()
97 sys.stderr.flush()
98 sys.exit(1)
99
100 return cfg
101
102 def get_config_from_module_name(self, module_name):
103 return vars(util.import_module(module_name))
104
105 def load_config_from_module_name_or_filename(self, location):
106 """
107 Loads the configuration file: the file is a python file, otherwise raise an RuntimeError
108 Exception or stop the process if the configuration file contains a syntax error.
109 """
110
111 if location.startswith("python:"):
112 module_name = location[len("python:"):]
113 cfg = self.get_config_from_module_name(module_name)
114 else:
115 if location.startswith("file:"):
116 filename = location[len("file:"):]
117 else:
118 filename = location
119 cfg = self.get_config_from_filename(filename)
120
121 for k, v in cfg.items():
122 # Ignore unknown names
123 if k not in self.cfg.settings:
124 continue
125 try:
126 self.cfg.set(k.lower(), v)
127 except:
128 print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr)
129 sys.stderr.flush()
130 raise
131
132 return cfg
133
134 def load_config_from_file(self, filename):
135 return self.load_config_from_module_name_or_filename(location=filename)
136
137 def load_config(self):
138 # parse console args
139 parser = self.cfg.parser()
140 args = parser.parse_args()
141
142 # optional settings from apps
143 cfg = self.init(parser, args, args.args)
144
145 # Load up the any app specific configuration
146 if cfg:
147 for k, v in cfg.items():
148 self.cfg.set(k.lower(), v)
149
150 if args.config:
151 self.load_config_from_file(args.config)
152 else:
153 default_config = get_default_config_file()
154 if default_config is not None:
155 self.load_config_from_file(default_config)
156
157 # Load up environment configuration
158 env_vars = self.cfg.get_cmd_args_from_env()
159 if env_vars:
160 env_args = parser.parse_args(env_vars)
161 for k, v in vars(env_args).items():
162 if v is None:
163 continue
164 if k == "args":
165 continue
166 self.cfg.set(k.lower(), v)
167
168 # Lastly, update the configuration with any command line
169 # settings.
170 for k, v in vars(args).items():
171 if v is None:
172 continue
173 if k == "args":
174 continue
175 self.cfg.set(k.lower(), v)
176
177 def run(self):
178 if self.cfg.check_config:
179 try:
180 self.load()
181 except:
182 msg = "\nError while loading the application:\n"
183 print(msg, file=sys.stderr)
184 traceback.print_exc()
185 sys.stderr.flush()
186 sys.exit(1)
187 sys.exit(0)
188
189 if self.cfg.spew:
190 debug.spew()
191
192 if self.cfg.daemon:
193 util.daemonize(self.cfg.enable_stdio_inheritance)
194
195 # set python paths
196 if self.cfg.pythonpath:
197 paths = self.cfg.pythonpath.split(",")
198 for path in paths:
199 pythonpath = os.path.abspath(path)
200 if pythonpath not in sys.path:
201 sys.path.insert(0, pythonpath)
202
203 super(Application, self).run()
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn/app/base.py b/gunicorn/app/base.py
--- a/gunicorn/app/base.py
+++ b/gunicorn/app/base.py
@@ -147,26 +147,26 @@
for k, v in cfg.items():
self.cfg.set(k.lower(), v)
+ env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())
+
if args.config:
self.load_config_from_file(args.config)
+ elif env_args.config:
+ self.load_config_from_file(env_args.config)
else:
default_config = get_default_config_file()
if default_config is not None:
self.load_config_from_file(default_config)
# Load up environment configuration
- env_vars = self.cfg.get_cmd_args_from_env()
- if env_vars:
- env_args = parser.parse_args(env_vars)
- for k, v in vars(env_args).items():
- if v is None:
- continue
- if k == "args":
- continue
- self.cfg.set(k.lower(), v)
+ for k, v in vars(env_args).items():
+ if v is None:
+ continue
+ if k == "args":
+ continue
+ self.cfg.set(k.lower(), v)
- # Lastly, update the configuration with any command line
- # settings.
+ # Lastly, update the configuration with any command line settings.
for k, v in vars(args).items():
if v is None:
continue
| {"golden_diff": "diff --git a/gunicorn/app/base.py b/gunicorn/app/base.py\n--- a/gunicorn/app/base.py\n+++ b/gunicorn/app/base.py\n@@ -147,26 +147,26 @@\n for k, v in cfg.items():\n self.cfg.set(k.lower(), v)\n \n+ env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n+\n if args.config:\n self.load_config_from_file(args.config)\n+ elif env_args.config:\n+ self.load_config_from_file(env_args.config)\n else:\n default_config = get_default_config_file()\n if default_config is not None:\n self.load_config_from_file(default_config)\n \n # Load up environment configuration\n- env_vars = self.cfg.get_cmd_args_from_env()\n- if env_vars:\n- env_args = parser.parse_args(env_vars)\n- for k, v in vars(env_args).items():\n- if v is None:\n- continue\n- if k == \"args\":\n- continue\n- self.cfg.set(k.lower(), v)\n+ for k, v in vars(env_args).items():\n+ if v is None:\n+ continue\n+ if k == \"args\":\n+ continue\n+ self.cfg.set(k.lower(), v)\n \n- # Lastly, update the configuration with any command line\n- # settings.\n+ # Lastly, update the configuration with any command line settings.\n for k, v in vars(args).items():\n if v is None:\n continue\n", "issue": "--config doesn't work in GUNICORN_CMD_ARGS\nSpecifying `--config` in the `GUNICORN_CMD_ARGS` environment variable quietly fails as config file loading only happens when it is passed as an argument:\r\nhttps://github.com/benoitc/gunicorn/blob/328e509260ae70de6c04c5ba885ee17960b3ced5/gunicorn/app/base.py#L137-L175\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport traceback\n\nfrom gunicorn._compat import execfile_\nfrom gunicorn import util\nfrom gunicorn.arbiter import Arbiter\nfrom gunicorn.config import Config, get_default_config_file\nfrom gunicorn import debug\n\nclass BaseApplication(object):\n \"\"\"\n An application interface for configuring and loading\n the various necessities for any given web framework.\n \"\"\"\n def __init__(self, usage=None, prog=None):\n self.usage = usage\n self.cfg = None\n self.callable = None\n self.prog = prog\n self.logger = None\n self.do_load_config()\n\n def do_load_config(self):\n \"\"\"\n Loads the configuration\n \"\"\"\n try:\n self.load_default_config()\n self.load_config()\n except Exception as e:\n print(\"\\nError: %s\" % str(e), file=sys.stderr)\n sys.stderr.flush()\n sys.exit(1)\n\n def load_default_config(self):\n # init configuration\n self.cfg = Config(self.usage, prog=self.prog)\n\n def init(self, parser, opts, args):\n raise NotImplementedError\n\n def load(self):\n raise NotImplementedError\n\n def load_config(self):\n \"\"\"\n This method is used to load the configuration from one or several input(s).\n Custom Command line, configuration file.\n You have to override this method in your class.\n \"\"\"\n raise NotImplementedError\n\n def reload(self):\n self.do_load_config()\n if self.cfg.spew:\n debug.spew()\n\n def wsgi(self):\n if self.callable is None:\n self.callable = self.load()\n return self.callable\n\n def run(self):\n try:\n Arbiter(self).run()\n except RuntimeError as e:\n print(\"\\nError: %s\\n\" % e, file=sys.stderr)\n sys.stderr.flush()\n sys.exit(1)\n\nclass Application(BaseApplication):\n\n def get_config_from_filename(self, filename):\n\n if not os.path.exists(filename):\n raise RuntimeError(\"%r doesn't exist\" % filename)\n\n cfg = {\n \"__builtins__\": __builtins__,\n \"__name__\": \"__config__\",\n \"__file__\": filename,\n \"__doc__\": None,\n \"__package__\": None\n }\n try:\n execfile_(filename, cfg, cfg)\n except Exception:\n print(\"Failed to read config file: %s\" % filename, file=sys.stderr)\n traceback.print_exc()\n sys.stderr.flush()\n sys.exit(1)\n\n return cfg\n\n def get_config_from_module_name(self, module_name):\n return vars(util.import_module(module_name))\n\n def load_config_from_module_name_or_filename(self, location):\n \"\"\"\n Loads the configuration file: the file is a python file, otherwise raise an RuntimeError\n Exception or stop the process if the configuration file contains a syntax error.\n \"\"\"\n\n if location.startswith(\"python:\"):\n module_name = location[len(\"python:\"):]\n cfg = self.get_config_from_module_name(module_name)\n else:\n if location.startswith(\"file:\"):\n filename = location[len(\"file:\"):]\n else:\n filename = location\n cfg = self.get_config_from_filename(filename)\n\n for k, v in cfg.items():\n # Ignore unknown names\n if k not in self.cfg.settings:\n continue\n try:\n self.cfg.set(k.lower(), v)\n except:\n print(\"Invalid value for %s: %s\\n\" % (k, v), file=sys.stderr)\n sys.stderr.flush()\n raise\n\n return cfg\n\n def load_config_from_file(self, filename):\n return self.load_config_from_module_name_or_filename(location=filename)\n\n def load_config(self):\n # parse console args\n parser = self.cfg.parser()\n args = parser.parse_args()\n\n # optional settings from apps\n cfg = self.init(parser, args, args.args)\n\n # Load up the any app specific configuration\n if cfg:\n for k, v in cfg.items():\n self.cfg.set(k.lower(), v)\n\n if args.config:\n self.load_config_from_file(args.config)\n else:\n default_config = get_default_config_file()\n if default_config is not None:\n self.load_config_from_file(default_config)\n\n # Load up environment configuration\n env_vars = self.cfg.get_cmd_args_from_env()\n if env_vars:\n env_args = parser.parse_args(env_vars)\n for k, v in vars(env_args).items():\n if v is None:\n continue\n if k == \"args\":\n continue\n self.cfg.set(k.lower(), v)\n\n # Lastly, update the configuration with any command line\n # settings.\n for k, v in vars(args).items():\n if v is None:\n continue\n if k == \"args\":\n continue\n self.cfg.set(k.lower(), v)\n\n def run(self):\n if self.cfg.check_config:\n try:\n self.load()\n except:\n msg = \"\\nError while loading the application:\\n\"\n print(msg, file=sys.stderr)\n traceback.print_exc()\n sys.stderr.flush()\n sys.exit(1)\n sys.exit(0)\n\n if self.cfg.spew:\n debug.spew()\n\n if self.cfg.daemon:\n util.daemonize(self.cfg.enable_stdio_inheritance)\n\n # set python paths\n if self.cfg.pythonpath:\n paths = self.cfg.pythonpath.split(\",\")\n for path in paths:\n pythonpath = os.path.abspath(path)\n if pythonpath not in sys.path:\n sys.path.insert(0, pythonpath)\n\n super(Application, self).run()\n", "path": "gunicorn/app/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport traceback\n\nfrom gunicorn._compat import execfile_\nfrom gunicorn import util\nfrom gunicorn.arbiter import Arbiter\nfrom gunicorn.config import Config, get_default_config_file\nfrom gunicorn import debug\n\nclass BaseApplication(object):\n \"\"\"\n An application interface for configuring and loading\n the various necessities for any given web framework.\n \"\"\"\n def __init__(self, usage=None, prog=None):\n self.usage = usage\n self.cfg = None\n self.callable = None\n self.prog = prog\n self.logger = None\n self.do_load_config()\n\n def do_load_config(self):\n \"\"\"\n Loads the configuration\n \"\"\"\n try:\n self.load_default_config()\n self.load_config()\n except Exception as e:\n print(\"\\nError: %s\" % str(e), file=sys.stderr)\n sys.stderr.flush()\n sys.exit(1)\n\n def load_default_config(self):\n # init configuration\n self.cfg = Config(self.usage, prog=self.prog)\n\n def init(self, parser, opts, args):\n raise NotImplementedError\n\n def load(self):\n raise NotImplementedError\n\n def load_config(self):\n \"\"\"\n This method is used to load the configuration from one or several input(s).\n Custom Command line, configuration file.\n You have to override this method in your class.\n \"\"\"\n raise NotImplementedError\n\n def reload(self):\n self.do_load_config()\n if self.cfg.spew:\n debug.spew()\n\n def wsgi(self):\n if self.callable is None:\n self.callable = self.load()\n return self.callable\n\n def run(self):\n try:\n Arbiter(self).run()\n except RuntimeError as e:\n print(\"\\nError: %s\\n\" % e, file=sys.stderr)\n sys.stderr.flush()\n sys.exit(1)\n\nclass Application(BaseApplication):\n\n def get_config_from_filename(self, filename):\n\n if not os.path.exists(filename):\n raise RuntimeError(\"%r doesn't exist\" % filename)\n\n cfg = {\n \"__builtins__\": __builtins__,\n \"__name__\": \"__config__\",\n \"__file__\": filename,\n \"__doc__\": None,\n \"__package__\": None\n }\n try:\n execfile_(filename, cfg, cfg)\n except Exception:\n print(\"Failed to read config file: %s\" % filename, file=sys.stderr)\n traceback.print_exc()\n sys.stderr.flush()\n sys.exit(1)\n\n return cfg\n\n def get_config_from_module_name(self, module_name):\n return vars(util.import_module(module_name))\n\n def load_config_from_module_name_or_filename(self, location):\n \"\"\"\n Loads the configuration file: the file is a python file, otherwise raise an RuntimeError\n Exception or stop the process if the configuration file contains a syntax error.\n \"\"\"\n\n if location.startswith(\"python:\"):\n module_name = location[len(\"python:\"):]\n cfg = self.get_config_from_module_name(module_name)\n else:\n if location.startswith(\"file:\"):\n filename = location[len(\"file:\"):]\n else:\n filename = location\n cfg = self.get_config_from_filename(filename)\n\n for k, v in cfg.items():\n # Ignore unknown names\n if k not in self.cfg.settings:\n continue\n try:\n self.cfg.set(k.lower(), v)\n except:\n print(\"Invalid value for %s: %s\\n\" % (k, v), file=sys.stderr)\n sys.stderr.flush()\n raise\n\n return cfg\n\n def load_config_from_file(self, filename):\n return self.load_config_from_module_name_or_filename(location=filename)\n\n def load_config(self):\n # parse console args\n parser = self.cfg.parser()\n args = parser.parse_args()\n\n # optional settings from apps\n cfg = self.init(parser, args, args.args)\n\n # Load up the any app specific configuration\n if cfg:\n for k, v in cfg.items():\n self.cfg.set(k.lower(), v)\n\n env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n\n if args.config:\n self.load_config_from_file(args.config)\n elif env_args.config:\n self.load_config_from_file(env_args.config)\n else:\n default_config = get_default_config_file()\n if default_config is not None:\n self.load_config_from_file(default_config)\n\n # Load up environment configuration\n for k, v in vars(env_args).items():\n if v is None:\n continue\n if k == \"args\":\n continue\n self.cfg.set(k.lower(), v)\n\n # Lastly, update the configuration with any command line settings.\n for k, v in vars(args).items():\n if v is None:\n continue\n if k == \"args\":\n continue\n self.cfg.set(k.lower(), v)\n\n def run(self):\n if self.cfg.check_config:\n try:\n self.load()\n except:\n msg = \"\\nError while loading the application:\\n\"\n print(msg, file=sys.stderr)\n traceback.print_exc()\n sys.stderr.flush()\n sys.exit(1)\n sys.exit(0)\n\n if self.cfg.spew:\n debug.spew()\n\n if self.cfg.daemon:\n util.daemonize(self.cfg.enable_stdio_inheritance)\n\n # set python paths\n if self.cfg.pythonpath:\n paths = self.cfg.pythonpath.split(\",\")\n for path in paths:\n pythonpath = os.path.abspath(path)\n if pythonpath not in sys.path:\n sys.path.insert(0, pythonpath)\n\n super(Application, self).run()\n", "path": "gunicorn/app/base.py"}]} | 2,122 | 328 |
gh_patches_debug_51516 | rasdani/github-patches | git_diff | pulp__pulpcore-4156 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/download/base.py`
Content:
```
1 from gettext import gettext as _
2
3 import asyncio
4 from collections import namedtuple
5 import logging
6 import os
7 import tempfile
8 from urllib.parse import urlsplit
9
10 from pulpcore.app import pulp_hashlib
11 from pulpcore.app.models import Artifact
12 from pulpcore.exceptions import (
13 DigestValidationError,
14 SizeValidationError,
15 TimeoutException,
16 UnsupportedDigestValidationError,
17 )
18
19
20 log = logging.getLogger(__name__)
21
22
23 DownloadResult = namedtuple("DownloadResult", ["url", "artifact_attributes", "path", "headers"])
24 """
25 Args:
26 url (str): The url corresponding with the download.
27 path (str): The absolute path to the saved file
28 artifact_attributes (dict): Contains keys corresponding with
29 :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values
30 along with size information.
31 headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The
32 values are header content. None when not using the HttpDownloader or sublclass.
33 """
34
35
36 class BaseDownloader:
37 """
38 The base class of all downloaders, providing digest calculation, validation, and file handling.
39
40 This is an abstract class and is meant to be subclassed. Subclasses are required to implement
41 the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:
42
43 1. Pass all downloaded data to
44 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.
45
46 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has
47 been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
48
49 Passing all downloaded data the into
50 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to
51 be computed while data is written to disk. The digests computed are required if the download is
52 to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the
53 data later.
54
55 The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default
56 writes to a random file in the current working directory.
57
58 The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all
59 data written to the file-like object is quiesced to disk before the file-like object has
60 `close()` called on it.
61
62 Attributes:
63 url (str): The url to download.
64 expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
65 value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
66 expected_size (int): The number of bytes the download is expected to have.
67 path (str): The full path to the file containing the downloaded data.
68 """
69
70 def __init__(
71 self,
72 url,
73 expected_digests=None,
74 expected_size=None,
75 semaphore=None,
76 *args,
77 **kwargs,
78 ):
79 """
80 Create a BaseDownloader object. This is expected to be called by all subclasses.
81
82 Args:
83 url (str): The url to download.
84 expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
85 value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
86 expected_size (int): The number of bytes the download is expected to have.
87 semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.
88 Useful for limiting the number of outstanding downloaders in various ways.
89 """
90
91 self.url = url
92 self._writer = None
93 self.path = None
94 self.expected_digests = expected_digests
95 self.expected_size = expected_size
96 if semaphore:
97 self.semaphore = semaphore
98 else:
99 self.semaphore = asyncio.Semaphore() # This will always be acquired
100 self._digests = {}
101 self._size = 0
102 if self.expected_digests:
103 if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):
104 raise UnsupportedDigestValidationError(
105 _(
106 "Content at the URL '{}' does not contain at least one trusted hasher which"
107 " is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The"
108 " downloader expected one of the following hashers: {}"
109 ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))
110 )
111
112 def _ensure_writer_has_open_file(self):
113 """
114 Create a temporary file on demand.
115
116 Create a temporary file when it's actually used,
117 allowing plugin writers to instantiate many downloaders in memory.
118 """
119 if not self._writer:
120 filename = urlsplit(self.url).path.split("/")[-1]
121 # linux allows any character except NUL or / in a filename and has a length limit of
122 # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded
123 # paths should be OK
124 is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length
125 # if the filename isn't legal then we just fall back to no suffix (random name)
126 suffix = "-" + filename if is_legal_filename else None
127 # write the file to the current working directory with a random prefix and the
128 # desired suffix. we always want the random prefix as it is possible to download
129 # the same filename from two different URLs, and the files may not be the same.
130 self._writer = tempfile.NamedTemporaryFile(dir=".", suffix=suffix, delete=False)
131 self.path = self._writer.name
132 self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}
133 self._size = 0
134
135 async def handle_data(self, data):
136 """
137 A coroutine that writes data to the file object and compute its digests.
138
139 All subclassed downloaders are expected to pass all data downloaded to this method. Similar
140 to the hashlib docstring, repeated calls are equivalent to a single call with
141 the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to
142 m.handle_data(a+b).
143
144 Args:
145 data (bytes): The data to be handled by the downloader.
146 """
147 self._ensure_writer_has_open_file()
148 self._writer.write(data)
149 self._record_size_and_digests_for_data(data)
150
151 async def finalize(self):
152 """
153 A coroutine to flush downloaded data, close the file writer, and validate the data.
154
155 All subclasses are required to call this method after all data has been passed to
156 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
157
158 Raises:
159 :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``
160 values don't match the digest of the data passed to
161 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
162 :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value
163 doesn't match the size of the data passed to
164 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
165 """
166 self._ensure_writer_has_open_file()
167 self._writer.flush()
168 os.fsync(self._writer.fileno())
169 self._writer.close()
170 self._writer = None
171 self.validate_digests()
172 self.validate_size()
173 log.debug(f"Downloaded file from {self.url}")
174
175 def fetch(self):
176 """
177 Run the download synchronously and return the `DownloadResult`.
178
179 Returns:
180 :class:`~pulpcore.plugin.download.DownloadResult`
181
182 Raises:
183 Exception: Any fatal exception emitted during downloading
184 """
185 done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
186 return done.pop().result()
187
188 def _record_size_and_digests_for_data(self, data):
189 """
190 Record the size and digest for an available chunk of data.
191
192 Args:
193 data (bytes): The data to have its size and digest values recorded.
194 """
195 for algorithm in self._digests.values():
196 algorithm.update(data)
197 self._size += len(data)
198
199 @property
200 def artifact_attributes(self):
201 """
202 A property that returns a dictionary with size and digest information. The keys of this
203 dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.
204 """
205 attributes = {"size": self._size}
206 for algorithm in self._digests:
207 attributes[algorithm] = self._digests[algorithm].hexdigest()
208 return attributes
209
210 def validate_digests(self):
211 """
212 Validate all digests validate if ``expected_digests`` is set
213
214 Raises:
215 :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``
216 values don't match the digest of the data passed to
217 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
218 """
219 if self.expected_digests:
220 for algorithm, expected_digest in self.expected_digests.items():
221 actual_digest = self._digests[algorithm].hexdigest()
222 if actual_digest != expected_digest:
223 raise DigestValidationError(actual_digest, expected_digest, url=self.url)
224
225 def validate_size(self):
226 """
227 Validate the size if ``expected_size`` is set
228
229 Raises:
230 :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value
231 doesn't match the size of the data passed to
232 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
233 """
234 if self.expected_size:
235 actual_size = self._size
236 expected_size = self.expected_size
237 if actual_size != expected_size:
238 raise SizeValidationError(actual_size, expected_size, url=self.url)
239
240 async def run(self, extra_data=None):
241 """
242 Run the downloader with concurrency restriction.
243
244 This method acquires `self.semaphore` before calling the actual download implementation
245 contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`
246 decorator on `_run()`, handles backoff-and-retry logic.
247
248 Args:
249 extra_data (dict): Extra data passed to the downloader.
250
251 Returns:
252 :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.
253
254 """
255 async with self.semaphore:
256 try:
257 return await self._run(extra_data=extra_data)
258 except asyncio.TimeoutError:
259 raise TimeoutException(self.url)
260
261 async def _run(self, extra_data=None):
262 """
263 Run the downloader.
264
265 This is a coroutine that asyncio can schedule to complete downloading. Subclasses are
266 required to implement this method and do two things:
267
268 1. Pass all downloaded data to
269 :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
270
271 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has
272 been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
273
274 It is also expected that the subclass implementation return a
275 :class:`~pulpcore.plugin.download.DownloadResult` object. The
276 ``artifact_attributes`` value of the
277 :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the
278 :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.
279
280 This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which
281 handles concurrency restriction. Thus, by the time this method is called, the download can
282 occur without violating the concurrency restriction.
283
284 Args:
285 extra_data (dict): Extra data passed to the downloader.
286
287 Returns:
288 :class:`~pulpcore.plugin.download.DownloadResult`
289
290 Raises:
291 Validation errors could be emitted when subclassed implementations call
292 :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.
293 """
294 raise NotImplementedError("Subclasses must define a _run() method that returns a coroutine")
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| {"golden_diff": "diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py\n--- a/pulpcore/download/base.py\n+++ b/pulpcore/download/base.py\n@@ -182,8 +182,8 @@\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))\n- return done.pop().result()\n+ result = asyncio.get_event_loop().run_until_complete(self.run())\n+ return result\n \n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n", "issue": "BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11\nPython 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .\r\n\r\nPython provides the solution in the error message: \"Passing coroutines is forbidden, use tasks explicitly.\"\r\n\r\nI believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`\n", "before_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))\n return done.pop().result()\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n", "path": "pulpcore/download/base.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n result = asyncio.get_event_loop().run_until_complete(self.run())\n return result\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n", "path": "pulpcore/download/base.py"}]} | 3,894 | 131 |
gh_patches_debug_6594 | rasdani/github-patches | git_diff | pytorch__ignite-1192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DistributedProxySampler RuntimeError when indices are padded
## 🐛 Bug description
The RuntimeError that occurs in the `DistributedProxySampler` on [line 241](https://github.com/pytorch/ignite/blob/master/ignite/distributed/auto.py#L241) shouldn't be there since the indices are padded with the full sample which was updated because of [this comment](https://github.com/pytorch/pytorch/issues/23430#issuecomment-597191137).
## Environment
- PyTorch Version (e.g., 1.4):
- Ignite Version (e.g., 0.3.0):
- OS (e.g., Linux):
- How you installed Ignite (`conda`, `pip`, source):
- Python version:
- Any other relevant information:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/distributed/auto.py`
Content:
```
1 import warnings
2
3 import torch
4 import torch.nn as nn
5 from torch.optim.optimizer import Optimizer
6 from torch.utils.data import DataLoader, Dataset
7 from torch.utils.data.distributed import DistributedSampler
8 from torch.utils.data.sampler import Sampler
9
10 from ignite.distributed import utils as idist
11 from ignite.distributed.comp_models import native as idist_native
12 from ignite.distributed.comp_models import xla as idist_xla
13 from ignite.utils import setup_logger
14
15 __all__ = ["auto_dataloader", "auto_model", "auto_optim", "DistributedProxySampler"]
16
17
18 def auto_dataloader(dataset, **kwargs):
19 """Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting
20 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
21
22 Internally, we create a dataloader with provided kwargs while applying the following updates:
23
24 - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.
25 - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.
26 - if no sampler provided by user, `torch DistributedSampler` is setup.
27 - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.
28 - if the default device is 'cuda', `pin_memory` is automatically set to `True`.
29
30 .. warning::
31
32 Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch
33 sampler is compatible with distributed configuration.
34
35 Examples:
36
37 .. code-block:: python
38
39 import ignite.distribted as idist
40
41 train_loader = idist.auto_dataloader(
42 train_dataset,
43 batch_size=32,
44 num_workers=4,
45 shuffle=True,
46 pin_memory="cuda" in idist.device().type,
47 drop_last=True,
48 )
49
50 Args:
51 dataset (Dataset): input torch dataset
52 **kwargs: keyword arguments for `torch DataLoader`_.
53
54 Returns:
55 `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices
56
57 .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
58 .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178
59 .. _torch DistributedSampler:
60 https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
61 """
62 rank = idist.get_rank()
63 world_size = idist.get_world_size()
64
65 logger = setup_logger(__name__ + ".auto_dataloader")
66 if world_size > 1:
67 if "batch_size" in kwargs and kwargs["batch_size"] >= world_size:
68 kwargs["batch_size"] //= world_size
69
70 nproc = idist.get_nproc_per_node()
71 if "num_workers" in kwargs and kwargs["num_workers"] >= nproc:
72 kwargs["num_workers"] = (kwargs["num_workers"] + nproc - 1) // nproc
73
74 if "batch_sampler" not in kwargs:
75 if kwargs.get("sampler", None) is not None:
76 sampler = DistributedProxySampler(kwargs["sampler"], num_replicas=world_size, rank=rank)
77 else:
78 sampler = DistributedSampler(
79 dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get("shuffle", True)
80 )
81 # we need to remove "shuffle" from kwargs if sampler is used
82 if "shuffle" in kwargs:
83 del kwargs["shuffle"]
84
85 kwargs["sampler"] = sampler
86 else:
87 warnings.warn(
88 "Found batch_sampler in provided kwargs. Please, make sure that it is compatible "
89 "with distributed configuration"
90 )
91
92 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get("pin_memory", False):
93 # TODO: How about XLA GPU ?
94 warnings.warn(
95 "Found incompatible options: xla support and pin_memory args equal True. "
96 "Argument `pin_memory=False` will be used to construct data loader."
97 )
98 kwargs["pin_memory"] = False
99 else:
100 kwargs["pin_memory"] = kwargs.get("pin_memory", "cuda" in idist.device().type)
101
102 logger.info("Use data loader kwargs for dataset '{}': \n\t{}".format(repr(dataset)[:20].strip(), kwargs))
103 dataloader = DataLoader(dataset, **kwargs)
104
105 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:
106
107 logger.info("DataLoader is wrapped by `MpDeviceLoader` on XLA")
108
109 mp_device_loader_cls = _MpDeviceLoader
110 try:
111 from torch_xla.distributed.parallel_loader import MpDeviceLoader
112
113 mp_device_loader_cls = MpDeviceLoader
114 except ImportError:
115 pass
116
117 sampler = dataloader.sampler
118 dataloader = mp_device_loader_cls(dataloader, idist.device())
119 dataloader.sampler = sampler
120
121 return dataloader
122
123
124 def auto_model(model: nn.Module) -> nn.Module:
125 """Helper method to adapt provided model for non-distributed and distributed configurations (supporting
126 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
127
128 Internally, we perform to following:
129
130 - send model to current :meth:`~ignite.distributed.utils.device()`.
131 - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1
132 - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.
133
134 Examples:
135
136 .. code-block:: python
137
138 import ignite.distribted as idist
139
140 model = idist.auto_model(model)
141
142 Args:
143 model (torch.nn.Module): model to adapt.
144
145 Returns:
146 torch.nn.Module
147
148 .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel
149 .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel
150 """
151 logger = setup_logger(__name__ + ".auto_model")
152
153 model.to(idist.device())
154
155 # distributed data parallel model
156 if idist.get_world_size() > 1:
157 if idist.backend() == idist_native.NCCL:
158 lrank = idist.get_local_rank()
159 logger.info("Apply torch DistributedDataParallel on model, device id: {}".format(lrank))
160 model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])
161 elif idist.backend() == idist_native.GLOO:
162 logger.info("Apply torch DistributedDataParallel on model")
163 model = torch.nn.parallel.DistributedDataParallel(model)
164
165 # not distributed but multiple GPUs reachable so data parallel model
166 elif torch.cuda.device_count() > 1 and "cuda" in idist.device().type:
167 logger.info("Apply torch DataParallel on model")
168 model = torch.nn.parallel.DataParallel(model)
169
170 return model
171
172
173 def auto_optim(optimizer: Optimizer) -> Optimizer:
174 """Helper method to adapt optimizer for non-distributed and distributed configurations (supporting
175 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
176
177 Internally, this method is no-op for non-distributed and torch native distributed configuration.
178 For XLA distributed configuration, we create a new class that inherits from provided optimizer.
179 The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.
180
181 Examples:
182
183 .. code-block:: python
184
185 import ignite.distribted as idist
186
187 optimizer = idist.auto_optim(optimizer)
188
189
190 Args:
191 optimizer (Optimizer): input torch optimizer
192
193 Returns:
194 Optimizer
195
196 .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step
197
198 """
199 if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):
200 return optimizer
201
202 cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))
203 return cls(optimizer)
204
205
206 class DistributedProxySampler(DistributedSampler):
207 """Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.
208
209 Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407
210
211
212 .. note::
213 Input sampler is assumed to have a constant size.
214
215 Args:
216 sampler (Sampler): Input torch data sampler.
217 num_replicas (int, optional): Number of processes participating in distributed training.
218 rank (int, optional): Rank of the current process within ``num_replicas``.
219
220 """
221
222 def __init__(self, sampler: Sampler, num_replicas=None, rank=None):
223
224 if not isinstance(sampler, Sampler):
225 raise TypeError("Argument sampler should be instance of torch Sampler, but given: {}".format(type(sampler)))
226
227 if not hasattr(sampler, "__len__"):
228 raise TypeError("Argument sampler should have length")
229
230 super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)
231 self.sampler = sampler
232
233 def __iter__(self):
234 # deterministically shuffle based on epoch
235 torch.manual_seed(self.epoch)
236
237 indices = []
238 while len(indices) < self.total_size:
239 indices += list(self.sampler)
240
241 if len(indices) != self.total_size:
242 raise RuntimeError("{} vs {}".format(len(indices), self.total_size))
243
244 # subsample
245 indices = indices[self.rank : self.total_size : self.num_replicas]
246 if len(indices) != self.num_samples:
247 raise RuntimeError("{} vs {}".format(len(indices), self.num_samples))
248
249 return iter(indices)
250
251
252 if idist.has_xla_support:
253
254 import torch_xla.core.xla_model as xm
255 from torch_xla.distributed.parallel_loader import ParallelLoader
256
257 class _MpDeviceLoader:
258 # https://github.com/pytorch/xla/pull/2117
259 # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available
260 def __init__(self, loader, device, **kwargs):
261 self._loader = loader
262 self._device = device
263 self._parallel_loader_kwargs = kwargs
264
265 def __iter__(self):
266 parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)
267 return parallel_loader.per_device_loader(self._device)
268
269 def __len__(self):
270 return len(self._loader)
271
272 class _XLADistributedOptimizer(Optimizer):
273 def __init__(self, optimizer):
274 super(self.__class__, self).__init__(optimizer.param_groups)
275 self.wrapped_optimizer = optimizer
276
277 def step(self, closure=None):
278 xm.optimizer_step(self.wrapped_optimizer, barrier=True)
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py
--- a/ignite/distributed/auto.py
+++ b/ignite/distributed/auto.py
@@ -238,8 +238,8 @@
while len(indices) < self.total_size:
indices += list(self.sampler)
- if len(indices) != self.total_size:
- raise RuntimeError("{} vs {}".format(len(indices), self.total_size))
+ if len(indices) > self.total_size:
+ indices = indices[: self.total_size]
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
| {"golden_diff": "diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py\n--- a/ignite/distributed/auto.py\n+++ b/ignite/distributed/auto.py\n@@ -238,8 +238,8 @@\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n \n- if len(indices) != self.total_size:\n- raise RuntimeError(\"{} vs {}\".format(len(indices), self.total_size))\n+ if len(indices) > self.total_size:\n+ indices = indices[: self.total_size]\n \n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n", "issue": "DistributedProxySampler RuntimeError when indices are padded\n## \ud83d\udc1b Bug description\r\n\r\nThe RuntimeError that occurs in the `DistributedProxySampler` on [line 241](https://github.com/pytorch/ignite/blob/master/ignite/distributed/auto.py#L241) shouldn't be there since the indices are padded with the full sample which was updated because of [this comment](https://github.com/pytorch/pytorch/issues/23430#issuecomment-597191137).\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.4):\r\n - Ignite Version (e.g., 0.3.0):\r\n - OS (e.g., Linux):\r\n - How you installed Ignite (`conda`, `pip`, source):\r\n - Python version:\r\n - Any other relevant information:\r\n\n", "before_files": [{"content": "import warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import Sampler\n\nfrom ignite.distributed import utils as idist\nfrom ignite.distributed.comp_models import native as idist_native\nfrom ignite.distributed.comp_models import xla as idist_xla\nfrom ignite.utils import setup_logger\n\n__all__ = [\"auto_dataloader\", \"auto_model\", \"auto_optim\", \"DistributedProxySampler\"]\n\n\ndef auto_dataloader(dataset, **kwargs):\n \"\"\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we create a dataloader with provided kwargs while applying the following updates:\n\n - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\n - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\n - if no sampler provided by user, `torch DistributedSampler` is setup.\n - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\n - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\n\n .. warning::\n\n Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\n sampler is compatible with distributed configuration.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n train_loader = idist.auto_dataloader(\n train_dataset,\n batch_size=32,\n num_workers=4,\n shuffle=True,\n pin_memory=\"cuda\" in idist.device().type,\n drop_last=True,\n )\n\n Args:\n dataset (Dataset): input torch dataset\n **kwargs: keyword arguments for `torch DataLoader`_.\n\n Returns:\n `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\n\n .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\n .. _torch DistributedSampler:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\n \"\"\"\n rank = idist.get_rank()\n world_size = idist.get_world_size()\n\n logger = setup_logger(__name__ + \".auto_dataloader\")\n if world_size > 1:\n if \"batch_size\" in kwargs and kwargs[\"batch_size\"] >= world_size:\n kwargs[\"batch_size\"] //= world_size\n\n nproc = idist.get_nproc_per_node()\n if \"num_workers\" in kwargs and kwargs[\"num_workers\"] >= nproc:\n kwargs[\"num_workers\"] = (kwargs[\"num_workers\"] + nproc - 1) // nproc\n\n if \"batch_sampler\" not in kwargs:\n if kwargs.get(\"sampler\", None) is not None:\n sampler = DistributedProxySampler(kwargs[\"sampler\"], num_replicas=world_size, rank=rank)\n else:\n sampler = DistributedSampler(\n dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\"shuffle\", True)\n )\n # we need to remove \"shuffle\" from kwargs if sampler is used\n if \"shuffle\" in kwargs:\n del kwargs[\"shuffle\"]\n\n kwargs[\"sampler\"] = sampler\n else:\n warnings.warn(\n \"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \"\n \"with distributed configuration\"\n )\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\"pin_memory\", False):\n # TODO: How about XLA GPU ?\n warnings.warn(\n \"Found incompatible options: xla support and pin_memory args equal True. \"\n \"Argument `pin_memory=False` will be used to construct data loader.\"\n )\n kwargs[\"pin_memory\"] = False\n else:\n kwargs[\"pin_memory\"] = kwargs.get(\"pin_memory\", \"cuda\" in idist.device().type)\n\n logger.info(\"Use data loader kwargs for dataset '{}': \\n\\t{}\".format(repr(dataset)[:20].strip(), kwargs))\n dataloader = DataLoader(dataset, **kwargs)\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\n\n logger.info(\"DataLoader is wrapped by `MpDeviceLoader` on XLA\")\n\n mp_device_loader_cls = _MpDeviceLoader\n try:\n from torch_xla.distributed.parallel_loader import MpDeviceLoader\n\n mp_device_loader_cls = MpDeviceLoader\n except ImportError:\n pass\n\n sampler = dataloader.sampler\n dataloader = mp_device_loader_cls(dataloader, idist.device())\n dataloader.sampler = sampler\n\n return dataloader\n\n\ndef auto_model(model: nn.Module) -> nn.Module:\n \"\"\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we perform to following:\n\n - send model to current :meth:`~ignite.distributed.utils.device()`.\n - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n model = idist.auto_model(model)\n\n Args:\n model (torch.nn.Module): model to adapt.\n\n Returns:\n torch.nn.Module\n\n .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\n .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n\n model.to(idist.device())\n\n # distributed data parallel model\n if idist.get_world_size() > 1:\n if idist.backend() == idist_native.NCCL:\n lrank = idist.get_local_rank()\n logger.info(\"Apply torch DistributedDataParallel on model, device id: {}\".format(lrank))\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\n elif idist.backend() == idist_native.GLOO:\n logger.info(\"Apply torch DistributedDataParallel on model\")\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n # not distributed but multiple GPUs reachable so data parallel model\n elif torch.cuda.device_count() > 1 and \"cuda\" in idist.device().type:\n logger.info(\"Apply torch DataParallel on model\")\n model = torch.nn.parallel.DataParallel(model)\n\n return model\n\n\ndef auto_optim(optimizer: Optimizer) -> Optimizer:\n \"\"\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, this method is no-op for non-distributed and torch native distributed configuration.\n For XLA distributed configuration, we create a new class that inherits from provided optimizer.\n The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n optimizer = idist.auto_optim(optimizer)\n\n\n Args:\n optimizer (Optimizer): input torch optimizer\n\n Returns:\n Optimizer\n\n .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\n\n \"\"\"\n if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\n return optimizer\n\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\n return cls(optimizer)\n\n\nclass DistributedProxySampler(DistributedSampler):\n \"\"\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\n\n Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\n\n\n .. note::\n Input sampler is assumed to have a constant size.\n\n Args:\n sampler (Sampler): Input torch data sampler.\n num_replicas (int, optional): Number of processes participating in distributed training.\n rank (int, optional): Rank of the current process within ``num_replicas``.\n\n \"\"\"\n\n def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\n\n if not isinstance(sampler, Sampler):\n raise TypeError(\"Argument sampler should be instance of torch Sampler, but given: {}\".format(type(sampler)))\n\n if not hasattr(sampler, \"__len__\"):\n raise TypeError(\"Argument sampler should have length\")\n\n super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\n self.sampler = sampler\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n torch.manual_seed(self.epoch)\n\n indices = []\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n\n if len(indices) != self.total_size:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.total_size))\n\n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n if len(indices) != self.num_samples:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.num_samples))\n\n return iter(indices)\n\n\nif idist.has_xla_support:\n\n import torch_xla.core.xla_model as xm\n from torch_xla.distributed.parallel_loader import ParallelLoader\n\n class _MpDeviceLoader:\n # https://github.com/pytorch/xla/pull/2117\n # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\n def __init__(self, loader, device, **kwargs):\n self._loader = loader\n self._device = device\n self._parallel_loader_kwargs = kwargs\n\n def __iter__(self):\n parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\n return parallel_loader.per_device_loader(self._device)\n\n def __len__(self):\n return len(self._loader)\n\n class _XLADistributedOptimizer(Optimizer):\n def __init__(self, optimizer):\n super(self.__class__, self).__init__(optimizer.param_groups)\n self.wrapped_optimizer = optimizer\n\n def step(self, closure=None):\n xm.optimizer_step(self.wrapped_optimizer, barrier=True)\n", "path": "ignite/distributed/auto.py"}], "after_files": [{"content": "import warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import Sampler\n\nfrom ignite.distributed import utils as idist\nfrom ignite.distributed.comp_models import native as idist_native\nfrom ignite.distributed.comp_models import xla as idist_xla\nfrom ignite.utils import setup_logger\n\n__all__ = [\"auto_dataloader\", \"auto_model\", \"auto_optim\", \"DistributedProxySampler\"]\n\n\ndef auto_dataloader(dataset, **kwargs):\n \"\"\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we create a dataloader with provided kwargs while applying the following updates:\n\n - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\n - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\n - if no sampler provided by user, `torch DistributedSampler` is setup.\n - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\n - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\n\n .. warning::\n\n Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\n sampler is compatible with distributed configuration.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n train_loader = idist.auto_dataloader(\n train_dataset,\n batch_size=32,\n num_workers=4,\n shuffle=True,\n pin_memory=\"cuda\" in idist.device().type,\n drop_last=True,\n )\n\n Args:\n dataset (Dataset): input torch dataset\n **kwargs: keyword arguments for `torch DataLoader`_.\n\n Returns:\n `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\n\n .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\n .. _torch DistributedSampler:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\n \"\"\"\n rank = idist.get_rank()\n world_size = idist.get_world_size()\n\n logger = setup_logger(__name__ + \".auto_dataloader\")\n if world_size > 1:\n if \"batch_size\" in kwargs and kwargs[\"batch_size\"] >= world_size:\n kwargs[\"batch_size\"] //= world_size\n\n nproc = idist.get_nproc_per_node()\n if \"num_workers\" in kwargs and kwargs[\"num_workers\"] >= nproc:\n kwargs[\"num_workers\"] = (kwargs[\"num_workers\"] + nproc - 1) // nproc\n\n if \"batch_sampler\" not in kwargs:\n if kwargs.get(\"sampler\", None) is not None:\n sampler = DistributedProxySampler(kwargs[\"sampler\"], num_replicas=world_size, rank=rank)\n else:\n sampler = DistributedSampler(\n dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\"shuffle\", True)\n )\n # we need to remove \"shuffle\" from kwargs if sampler is used\n if \"shuffle\" in kwargs:\n del kwargs[\"shuffle\"]\n\n kwargs[\"sampler\"] = sampler\n else:\n warnings.warn(\n \"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \"\n \"with distributed configuration\"\n )\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\"pin_memory\", False):\n # TODO: How about XLA GPU ?\n warnings.warn(\n \"Found incompatible options: xla support and pin_memory args equal True. \"\n \"Argument `pin_memory=False` will be used to construct data loader.\"\n )\n kwargs[\"pin_memory\"] = False\n else:\n kwargs[\"pin_memory\"] = kwargs.get(\"pin_memory\", \"cuda\" in idist.device().type)\n\n logger.info(\"Use data loader kwargs for dataset '{}': \\n\\t{}\".format(repr(dataset)[:20].strip(), kwargs))\n dataloader = DataLoader(dataset, **kwargs)\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\n\n logger.info(\"DataLoader is wrapped by `MpDeviceLoader` on XLA\")\n\n mp_device_loader_cls = _MpDeviceLoader\n try:\n from torch_xla.distributed.parallel_loader import MpDeviceLoader\n\n mp_device_loader_cls = MpDeviceLoader\n except ImportError:\n pass\n\n sampler = dataloader.sampler\n dataloader = mp_device_loader_cls(dataloader, idist.device())\n dataloader.sampler = sampler\n\n return dataloader\n\n\ndef auto_model(model: nn.Module) -> nn.Module:\n \"\"\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we perform to following:\n\n - send model to current :meth:`~ignite.distributed.utils.device()`.\n - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n model = idist.auto_model(model)\n\n Args:\n model (torch.nn.Module): model to adapt.\n\n Returns:\n torch.nn.Module\n\n .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\n .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n\n model.to(idist.device())\n\n # distributed data parallel model\n if idist.get_world_size() > 1:\n if idist.backend() == idist_native.NCCL:\n lrank = idist.get_local_rank()\n logger.info(\"Apply torch DistributedDataParallel on model, device id: {}\".format(lrank))\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\n elif idist.backend() == idist_native.GLOO:\n logger.info(\"Apply torch DistributedDataParallel on model\")\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n # not distributed but multiple GPUs reachable so data parallel model\n elif torch.cuda.device_count() > 1 and \"cuda\" in idist.device().type:\n logger.info(\"Apply torch DataParallel on model\")\n model = torch.nn.parallel.DataParallel(model)\n\n return model\n\n\ndef auto_optim(optimizer: Optimizer) -> Optimizer:\n \"\"\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, this method is no-op for non-distributed and torch native distributed configuration.\n For XLA distributed configuration, we create a new class that inherits from provided optimizer.\n The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n optimizer = idist.auto_optim(optimizer)\n\n\n Args:\n optimizer (Optimizer): input torch optimizer\n\n Returns:\n Optimizer\n\n .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\n\n \"\"\"\n if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\n return optimizer\n\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\n return cls(optimizer)\n\n\nclass DistributedProxySampler(DistributedSampler):\n \"\"\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\n\n Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\n\n\n .. note::\n Input sampler is assumed to have a constant size.\n\n Args:\n sampler (Sampler): Input torch data sampler.\n num_replicas (int, optional): Number of processes participating in distributed training.\n rank (int, optional): Rank of the current process within ``num_replicas``.\n\n \"\"\"\n\n def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\n\n if not isinstance(sampler, Sampler):\n raise TypeError(\"Argument sampler should be instance of torch Sampler, but given: {}\".format(type(sampler)))\n\n if not hasattr(sampler, \"__len__\"):\n raise TypeError(\"Argument sampler should have length\")\n\n super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\n self.sampler = sampler\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n torch.manual_seed(self.epoch)\n\n indices = []\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n\n if len(indices) > self.total_size:\n indices = indices[: self.total_size]\n\n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n if len(indices) != self.num_samples:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.num_samples))\n\n return iter(indices)\n\n\nif idist.has_xla_support:\n\n import torch_xla.core.xla_model as xm\n from torch_xla.distributed.parallel_loader import ParallelLoader\n\n class _MpDeviceLoader:\n # https://github.com/pytorch/xla/pull/2117\n # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\n def __init__(self, loader, device, **kwargs):\n self._loader = loader\n self._device = device\n self._parallel_loader_kwargs = kwargs\n\n def __iter__(self):\n parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\n return parallel_loader.per_device_loader(self._device)\n\n def __len__(self):\n return len(self._loader)\n\n class _XLADistributedOptimizer(Optimizer):\n def __init__(self, optimizer):\n super(self.__class__, self).__init__(optimizer.param_groups)\n self.wrapped_optimizer = optimizer\n\n def step(self, closure=None):\n xm.optimizer_step(self.wrapped_optimizer, barrier=True)\n", "path": "ignite/distributed/auto.py"}]} | 3,608 | 138 |
gh_patches_debug_3662 | rasdani/github-patches | git_diff | scikit-hep__awkward-2169 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
run cpp tests in CI
### Version of Awkward Array
2.0.6
### Description and code to reproduce
@agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dev/copy-cpp-headers.py`
Content:
```
1 """Copy the header-only cpp headers into the various package directories that they are required"""
2 import pathlib
3 import shutil
4
5 root_path = pathlib.Path(__file__).absolute().parents[1]
6 source_path = root_path / "header-only"
7 dest_paths = (
8 root_path / "awkward-cpp" / "header-only",
9 root_path / "src" / "awkward" / "_connect" / "header-only",
10 )
11
12 if __name__ == "__main__":
13 for path in dest_paths:
14 if path.exists():
15 shutil.rmtree(path)
16 shutil.copytree(source_path, path)
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py
--- a/dev/copy-cpp-headers.py
+++ b/dev/copy-cpp-headers.py
@@ -3,7 +3,7 @@
import shutil
root_path = pathlib.Path(__file__).absolute().parents[1]
-source_path = root_path / "header-only"
+source_path = root_path / "header-only" / "include"
dest_paths = (
root_path / "awkward-cpp" / "header-only",
root_path / "src" / "awkward" / "_connect" / "header-only",
| {"golden_diff": "diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py\n--- a/dev/copy-cpp-headers.py\n+++ b/dev/copy-cpp-headers.py\n@@ -3,7 +3,7 @@\n import shutil\n \n root_path = pathlib.Path(__file__).absolute().parents[1]\n-source_path = root_path / \"header-only\"\n+source_path = root_path / \"header-only\" / \"include\"\n dest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n", "issue": "run cpp tests in CI \n### Version of Awkward Array\n\n2.0.6\n\n### Description and code to reproduce\n\n@agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests.\n", "before_files": [{"content": "\"\"\"Copy the header-only cpp headers into the various package directories that they are required\"\"\"\nimport pathlib\nimport shutil\n\nroot_path = pathlib.Path(__file__).absolute().parents[1]\nsource_path = root_path / \"header-only\"\ndest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n)\n\nif __name__ == \"__main__\":\n for path in dest_paths:\n if path.exists():\n shutil.rmtree(path)\n shutil.copytree(source_path, path)\n", "path": "dev/copy-cpp-headers.py"}], "after_files": [{"content": "\"\"\"Copy the header-only cpp headers into the various package directories that they are required\"\"\"\nimport pathlib\nimport shutil\n\nroot_path = pathlib.Path(__file__).absolute().parents[1]\nsource_path = root_path / \"header-only\" / \"include\"\ndest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n)\n\nif __name__ == \"__main__\":\n for path in dest_paths:\n if path.exists():\n shutil.rmtree(path)\n shutil.copytree(source_path, path)\n", "path": "dev/copy-cpp-headers.py"}]} | 467 | 136 |
gh_patches_debug_23142 | rasdani/github-patches | git_diff | netbox-community__netbox-12244 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hide WIFI password
### NetBox version
v3.4.3
### Feature type
Change to existing functionality
### Proposed functionality
Is it possible to hide the WIFI password as well? As now when you add the password it is added as plain-text.
Can this be a hidden password with "*********" and icon on the side to make it visible?
Or does it needs to be moved to "secret(store) plugin?
### Use case
Well i think passwords dont need to be visible until you want it to be.
The best part is that you ofcourse need to be logged in to see this.
But some think this needs to be secure and im not sure if this is also plain text in the database itself?
### Database changes
Probally yes encrypted and hidden
### External dependencies
Maybe the secret plugin.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/wireless/forms/model_forms.py`
Content:
```
1 from django.utils.translation import gettext as _
2 from dcim.models import Device, Interface, Location, Region, Site, SiteGroup
3 from ipam.models import VLAN, VLANGroup
4 from netbox.forms import NetBoxModelForm
5 from tenancy.forms import TenancyForm
6 from utilities.forms import CommentField, DynamicModelChoiceField, SlugField, StaticSelect
7 from wireless.models import *
8
9 __all__ = (
10 'WirelessLANForm',
11 'WirelessLANGroupForm',
12 'WirelessLinkForm',
13 )
14
15
16 class WirelessLANGroupForm(NetBoxModelForm):
17 parent = DynamicModelChoiceField(
18 queryset=WirelessLANGroup.objects.all(),
19 required=False
20 )
21 slug = SlugField()
22
23 fieldsets = (
24 ('Wireless LAN Group', (
25 'parent', 'name', 'slug', 'description', 'tags',
26 )),
27 )
28
29 class Meta:
30 model = WirelessLANGroup
31 fields = [
32 'parent', 'name', 'slug', 'description', 'tags',
33 ]
34
35
36 class WirelessLANForm(TenancyForm, NetBoxModelForm):
37 group = DynamicModelChoiceField(
38 queryset=WirelessLANGroup.objects.all(),
39 required=False
40 )
41 region = DynamicModelChoiceField(
42 queryset=Region.objects.all(),
43 required=False,
44 initial_params={
45 'sites': '$site'
46 }
47 )
48 site_group = DynamicModelChoiceField(
49 queryset=SiteGroup.objects.all(),
50 required=False,
51 initial_params={
52 'sites': '$site'
53 }
54 )
55 site = DynamicModelChoiceField(
56 queryset=Site.objects.all(),
57 required=False,
58 null_option='None',
59 query_params={
60 'region_id': '$region',
61 'group_id': '$site_group',
62 }
63 )
64 vlan_group = DynamicModelChoiceField(
65 queryset=VLANGroup.objects.all(),
66 required=False,
67 label=_('VLAN group'),
68 null_option='None',
69 query_params={
70 'site': '$site'
71 },
72 initial_params={
73 'vlans': '$vlan'
74 }
75 )
76 vlan = DynamicModelChoiceField(
77 queryset=VLAN.objects.all(),
78 required=False,
79 label=_('VLAN'),
80 query_params={
81 'site_id': '$site',
82 'group_id': '$vlan_group',
83 }
84 )
85 comments = CommentField()
86
87 fieldsets = (
88 ('Wireless LAN', ('ssid', 'group', 'status', 'description', 'tags')),
89 ('VLAN', ('region', 'site_group', 'site', 'vlan_group', 'vlan',)),
90 ('Tenancy', ('tenant_group', 'tenant')),
91 ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),
92 )
93
94 class Meta:
95 model = WirelessLAN
96 fields = [
97 'ssid', 'group', 'region', 'site_group', 'site', 'status', 'vlan_group', 'vlan', 'tenant_group', 'tenant',
98 'auth_type', 'auth_cipher', 'auth_psk', 'description', 'comments', 'tags',
99 ]
100 widgets = {
101 'status': StaticSelect,
102 'auth_type': StaticSelect,
103 'auth_cipher': StaticSelect,
104 }
105
106
107 class WirelessLinkForm(TenancyForm, NetBoxModelForm):
108 site_a = DynamicModelChoiceField(
109 queryset=Site.objects.all(),
110 required=False,
111 label=_('Site'),
112 initial_params={
113 'devices': '$device_a',
114 }
115 )
116 location_a = DynamicModelChoiceField(
117 queryset=Location.objects.all(),
118 query_params={
119 'site_id': '$site_a',
120 },
121 required=False,
122 label=_('Location'),
123 initial_params={
124 'devices': '$device_a',
125 }
126 )
127 device_a = DynamicModelChoiceField(
128 queryset=Device.objects.all(),
129 query_params={
130 'site_id': '$site_a',
131 'location_id': '$location_a',
132 },
133 required=False,
134 label=_('Device'),
135 initial_params={
136 'interfaces': '$interface_a'
137 }
138 )
139 interface_a = DynamicModelChoiceField(
140 queryset=Interface.objects.all(),
141 query_params={
142 'kind': 'wireless',
143 'device_id': '$device_a',
144 },
145 disabled_indicator='_occupied',
146 label=_('Interface')
147 )
148 site_b = DynamicModelChoiceField(
149 queryset=Site.objects.all(),
150 required=False,
151 label=_('Site'),
152 initial_params={
153 'devices': '$device_b',
154 }
155 )
156 location_b = DynamicModelChoiceField(
157 queryset=Location.objects.all(),
158 query_params={
159 'site_id': '$site_b',
160 },
161 required=False,
162 label=_('Location'),
163 initial_params={
164 'devices': '$device_b',
165 }
166 )
167 device_b = DynamicModelChoiceField(
168 queryset=Device.objects.all(),
169 query_params={
170 'site_id': '$site_b',
171 'location_id': '$location_b',
172 },
173 required=False,
174 label=_('Device'),
175 initial_params={
176 'interfaces': '$interface_b'
177 }
178 )
179 interface_b = DynamicModelChoiceField(
180 queryset=Interface.objects.all(),
181 query_params={
182 'kind': 'wireless',
183 'device_id': '$device_b',
184 },
185 disabled_indicator='_occupied',
186 label=_('Interface')
187 )
188 comments = CommentField()
189
190 fieldsets = (
191 ('Side A', ('site_a', 'location_a', 'device_a', 'interface_a')),
192 ('Side B', ('site_b', 'location_b', 'device_b', 'interface_b')),
193 ('Link', ('status', 'ssid', 'description', 'tags')),
194 ('Tenancy', ('tenant_group', 'tenant')),
195 ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),
196 )
197
198 class Meta:
199 model = WirelessLink
200 fields = [
201 'site_a', 'location_a', 'device_a', 'interface_a', 'site_b', 'location_b', 'device_b', 'interface_b',
202 'status', 'ssid', 'tenant_group', 'tenant', 'auth_type', 'auth_cipher', 'auth_psk', 'description',
203 'comments', 'tags',
204 ]
205 widgets = {
206 'status': StaticSelect,
207 'auth_type': StaticSelect,
208 'auth_cipher': StaticSelect,
209 }
210 labels = {
211 'auth_type': 'Type',
212 'auth_cipher': 'Cipher',
213 }
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/wireless/forms/model_forms.py b/netbox/wireless/forms/model_forms.py
--- a/netbox/wireless/forms/model_forms.py
+++ b/netbox/wireless/forms/model_forms.py
@@ -1,3 +1,4 @@
+from django.forms import PasswordInput
from django.utils.translation import gettext as _
from dcim.models import Device, Interface, Location, Region, Site, SiteGroup
from ipam.models import VLAN, VLANGroup
@@ -101,6 +102,10 @@
'status': StaticSelect,
'auth_type': StaticSelect,
'auth_cipher': StaticSelect,
+ 'auth_psk': PasswordInput(
+ render_value=True,
+ attrs={'data-toggle': 'password'}
+ ),
}
@@ -206,6 +211,10 @@
'status': StaticSelect,
'auth_type': StaticSelect,
'auth_cipher': StaticSelect,
+ 'auth_psk': PasswordInput(
+ render_value=True,
+ attrs={'data-toggle': 'password'}
+ ),
}
labels = {
'auth_type': 'Type',
| {"golden_diff": "diff --git a/netbox/wireless/forms/model_forms.py b/netbox/wireless/forms/model_forms.py\n--- a/netbox/wireless/forms/model_forms.py\n+++ b/netbox/wireless/forms/model_forms.py\n@@ -1,3 +1,4 @@\n+from django.forms import PasswordInput\n from django.utils.translation import gettext as _\n from dcim.models import Device, Interface, Location, Region, Site, SiteGroup\n from ipam.models import VLAN, VLANGroup\n@@ -101,6 +102,10 @@\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n+ 'auth_psk': PasswordInput(\n+ render_value=True,\n+ attrs={'data-toggle': 'password'}\n+ ),\n }\n \n \n@@ -206,6 +211,10 @@\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n+ 'auth_psk': PasswordInput(\n+ render_value=True,\n+ attrs={'data-toggle': 'password'}\n+ ),\n }\n labels = {\n 'auth_type': 'Type',\n", "issue": "Hide WIFI password\n### NetBox version\n\nv3.4.3\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nIs it possible to hide the WIFI password as well? As now when you add the password it is added as plain-text.\r\nCan this be a hidden password with \"*********\" and icon on the side to make it visible?\r\n\r\nOr does it needs to be moved to \"secret(store) plugin?\r\n\r\n\n\n### Use case\n\nWell i think passwords dont need to be visible until you want it to be. \r\nThe best part is that you ofcourse need to be logged in to see this. \r\nBut some think this needs to be secure and im not sure if this is also plain text in the database itself? \n\n### Database changes\n\nProbally yes encrypted and hidden\n\n### External dependencies\n\nMaybe the secret plugin.\n", "before_files": [{"content": "from django.utils.translation import gettext as _\nfrom dcim.models import Device, Interface, Location, Region, Site, SiteGroup\nfrom ipam.models import VLAN, VLANGroup\nfrom netbox.forms import NetBoxModelForm\nfrom tenancy.forms import TenancyForm\nfrom utilities.forms import CommentField, DynamicModelChoiceField, SlugField, StaticSelect\nfrom wireless.models import *\n\n__all__ = (\n 'WirelessLANForm',\n 'WirelessLANGroupForm',\n 'WirelessLinkForm',\n)\n\n\nclass WirelessLANGroupForm(NetBoxModelForm):\n parent = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False\n )\n slug = SlugField()\n\n fieldsets = (\n ('Wireless LAN Group', (\n 'parent', 'name', 'slug', 'description', 'tags',\n )),\n )\n\n class Meta:\n model = WirelessLANGroup\n fields = [\n 'parent', 'name', 'slug', 'description', 'tags',\n ]\n\n\nclass WirelessLANForm(TenancyForm, NetBoxModelForm):\n group = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False\n )\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n null_option='None',\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n vlan_group = DynamicModelChoiceField(\n queryset=VLANGroup.objects.all(),\n required=False,\n label=_('VLAN group'),\n null_option='None',\n query_params={\n 'site': '$site'\n },\n initial_params={\n 'vlans': '$vlan'\n }\n )\n vlan = DynamicModelChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n label=_('VLAN'),\n query_params={\n 'site_id': '$site',\n 'group_id': '$vlan_group',\n }\n )\n comments = CommentField()\n\n fieldsets = (\n ('Wireless LAN', ('ssid', 'group', 'status', 'description', 'tags')),\n ('VLAN', ('region', 'site_group', 'site', 'vlan_group', 'vlan',)),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n )\n\n class Meta:\n model = WirelessLAN\n fields = [\n 'ssid', 'group', 'region', 'site_group', 'site', 'status', 'vlan_group', 'vlan', 'tenant_group', 'tenant',\n 'auth_type', 'auth_cipher', 'auth_psk', 'description', 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n }\n\n\nclass WirelessLinkForm(TenancyForm, NetBoxModelForm):\n site_a = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n label=_('Site'),\n initial_params={\n 'devices': '$device_a',\n }\n )\n location_a = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n query_params={\n 'site_id': '$site_a',\n },\n required=False,\n label=_('Location'),\n initial_params={\n 'devices': '$device_a',\n }\n )\n device_a = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site_a',\n 'location_id': '$location_a',\n },\n required=False,\n label=_('Device'),\n initial_params={\n 'interfaces': '$interface_a'\n }\n )\n interface_a = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n query_params={\n 'kind': 'wireless',\n 'device_id': '$device_a',\n },\n disabled_indicator='_occupied',\n label=_('Interface')\n )\n site_b = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n label=_('Site'),\n initial_params={\n 'devices': '$device_b',\n }\n )\n location_b = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n query_params={\n 'site_id': '$site_b',\n },\n required=False,\n label=_('Location'),\n initial_params={\n 'devices': '$device_b',\n }\n )\n device_b = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site_b',\n 'location_id': '$location_b',\n },\n required=False,\n label=_('Device'),\n initial_params={\n 'interfaces': '$interface_b'\n }\n )\n interface_b = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n query_params={\n 'kind': 'wireless',\n 'device_id': '$device_b',\n },\n disabled_indicator='_occupied',\n label=_('Interface')\n )\n comments = CommentField()\n\n fieldsets = (\n ('Side A', ('site_a', 'location_a', 'device_a', 'interface_a')),\n ('Side B', ('site_b', 'location_b', 'device_b', 'interface_b')),\n ('Link', ('status', 'ssid', 'description', 'tags')),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n )\n\n class Meta:\n model = WirelessLink\n fields = [\n 'site_a', 'location_a', 'device_a', 'interface_a', 'site_b', 'location_b', 'device_b', 'interface_b',\n 'status', 'ssid', 'tenant_group', 'tenant', 'auth_type', 'auth_cipher', 'auth_psk', 'description',\n 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n }\n labels = {\n 'auth_type': 'Type',\n 'auth_cipher': 'Cipher',\n }\n", "path": "netbox/wireless/forms/model_forms.py"}], "after_files": [{"content": "from django.forms import PasswordInput\nfrom django.utils.translation import gettext as _\nfrom dcim.models import Device, Interface, Location, Region, Site, SiteGroup\nfrom ipam.models import VLAN, VLANGroup\nfrom netbox.forms import NetBoxModelForm\nfrom tenancy.forms import TenancyForm\nfrom utilities.forms import CommentField, DynamicModelChoiceField, SlugField, StaticSelect\nfrom wireless.models import *\n\n__all__ = (\n 'WirelessLANForm',\n 'WirelessLANGroupForm',\n 'WirelessLinkForm',\n)\n\n\nclass WirelessLANGroupForm(NetBoxModelForm):\n parent = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False\n )\n slug = SlugField()\n\n fieldsets = (\n ('Wireless LAN Group', (\n 'parent', 'name', 'slug', 'description', 'tags',\n )),\n )\n\n class Meta:\n model = WirelessLANGroup\n fields = [\n 'parent', 'name', 'slug', 'description', 'tags',\n ]\n\n\nclass WirelessLANForm(TenancyForm, NetBoxModelForm):\n group = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False\n )\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n null_option='None',\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n vlan_group = DynamicModelChoiceField(\n queryset=VLANGroup.objects.all(),\n required=False,\n label=_('VLAN group'),\n null_option='None',\n query_params={\n 'site': '$site'\n },\n initial_params={\n 'vlans': '$vlan'\n }\n )\n vlan = DynamicModelChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n label=_('VLAN'),\n query_params={\n 'site_id': '$site',\n 'group_id': '$vlan_group',\n }\n )\n comments = CommentField()\n\n fieldsets = (\n ('Wireless LAN', ('ssid', 'group', 'status', 'description', 'tags')),\n ('VLAN', ('region', 'site_group', 'site', 'vlan_group', 'vlan',)),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n )\n\n class Meta:\n model = WirelessLAN\n fields = [\n 'ssid', 'group', 'region', 'site_group', 'site', 'status', 'vlan_group', 'vlan', 'tenant_group', 'tenant',\n 'auth_type', 'auth_cipher', 'auth_psk', 'description', 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n 'auth_psk': PasswordInput(\n render_value=True,\n attrs={'data-toggle': 'password'}\n ),\n }\n\n\nclass WirelessLinkForm(TenancyForm, NetBoxModelForm):\n site_a = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n label=_('Site'),\n initial_params={\n 'devices': '$device_a',\n }\n )\n location_a = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n query_params={\n 'site_id': '$site_a',\n },\n required=False,\n label=_('Location'),\n initial_params={\n 'devices': '$device_a',\n }\n )\n device_a = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site_a',\n 'location_id': '$location_a',\n },\n required=False,\n label=_('Device'),\n initial_params={\n 'interfaces': '$interface_a'\n }\n )\n interface_a = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n query_params={\n 'kind': 'wireless',\n 'device_id': '$device_a',\n },\n disabled_indicator='_occupied',\n label=_('Interface')\n )\n site_b = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n label=_('Site'),\n initial_params={\n 'devices': '$device_b',\n }\n )\n location_b = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n query_params={\n 'site_id': '$site_b',\n },\n required=False,\n label=_('Location'),\n initial_params={\n 'devices': '$device_b',\n }\n )\n device_b = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site_b',\n 'location_id': '$location_b',\n },\n required=False,\n label=_('Device'),\n initial_params={\n 'interfaces': '$interface_b'\n }\n )\n interface_b = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n query_params={\n 'kind': 'wireless',\n 'device_id': '$device_b',\n },\n disabled_indicator='_occupied',\n label=_('Interface')\n )\n comments = CommentField()\n\n fieldsets = (\n ('Side A', ('site_a', 'location_a', 'device_a', 'interface_a')),\n ('Side B', ('site_b', 'location_b', 'device_b', 'interface_b')),\n ('Link', ('status', 'ssid', 'description', 'tags')),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n )\n\n class Meta:\n model = WirelessLink\n fields = [\n 'site_a', 'location_a', 'device_a', 'interface_a', 'site_b', 'location_b', 'device_b', 'interface_b',\n 'status', 'ssid', 'tenant_group', 'tenant', 'auth_type', 'auth_cipher', 'auth_psk', 'description',\n 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n 'auth_psk': PasswordInput(\n render_value=True,\n attrs={'data-toggle': 'password'}\n ),\n }\n labels = {\n 'auth_type': 'Type',\n 'auth_cipher': 'Cipher',\n }\n", "path": "netbox/wireless/forms/model_forms.py"}]} | 2,337 | 253 |
gh_patches_debug_28272 | rasdani/github-patches | git_diff | googleapis__python-bigquery-89 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove test_utils directory
Now that test utils are available in a GitHub repo, a local `test_utils` copy is not needed anymore and should be removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import os
18 import shutil
19
20 import nox
21
22
23 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
24
25
26 def default(session):
27 """Default unit test session.
28
29 This is intended to be run **without** an interpreter set, so
30 that the current ``python`` (on the ``PATH``) or the version of
31 Python corresponding to the ``nox`` binary the ``PATH`` can
32 run the tests.
33 """
34 # Install all test dependencies, then install local packages in-place.
35 session.install("mock", "pytest", "pytest-cov", "freezegun")
36 session.install("grpcio")
37 session.install("-e", "test_utils")
38
39 # fastparquet is not included in .[all] because, in general, it's redundant
40 # with pyarrow. We still want to run some unit tests with fastparquet
41 # serialization, though.
42 session.install("-e", ".[all,fastparquet]")
43
44 # IPython does not support Python 2 after version 5.x
45 if session.python == "2.7":
46 session.install("ipython==5.5")
47 else:
48 session.install("ipython")
49
50 # Run py.test against the unit tests.
51 session.run(
52 "py.test",
53 "--quiet",
54 "--cov=google.cloud.bigquery",
55 "--cov=tests.unit",
56 "--cov-append",
57 "--cov-config=.coveragerc",
58 "--cov-report=",
59 "--cov-fail-under=0",
60 os.path.join("tests", "unit"),
61 *session.posargs,
62 )
63
64
65 @nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"])
66 def unit(session):
67 """Run the unit test suite."""
68 default(session)
69
70
71 @nox.session(python=["2.7", "3.8"])
72 def system(session):
73 """Run the system test suite."""
74
75 # Sanity check: Only run system tests if the environment variable is set.
76 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
77 session.skip("Credentials must be set via environment variable.")
78
79 # Use pre-release gRPC for system tests.
80 session.install("--pre", "grpcio")
81
82 # Install all test dependencies, then install local packages in place.
83 session.install("mock", "pytest", "psutil")
84 session.install("google-cloud-storage")
85 session.install("fastavro")
86 session.install("-e", "test_utils")
87 session.install("-e", ".[all]")
88
89 # IPython does not support Python 2 after version 5.x
90 if session.python == "2.7":
91 session.install("ipython==5.5")
92 else:
93 session.install("ipython")
94
95 # Run py.test against the system tests.
96 session.run(
97 "py.test", "--quiet", os.path.join("tests", "system.py"), *session.posargs
98 )
99
100
101 @nox.session(python=["2.7", "3.8"])
102 def snippets(session):
103 """Run the snippets test suite."""
104
105 # Sanity check: Only run snippets tests if the environment variable is set.
106 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
107 session.skip("Credentials must be set via environment variable.")
108
109 # Install all test dependencies, then install local packages in place.
110 session.install("mock", "pytest")
111 session.install("google-cloud-storage")
112 session.install("grpcio")
113 session.install("-e", "test_utils")
114 session.install("-e", ".[all]")
115
116 # Run py.test against the snippets tests.
117 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
118 session.run("py.test", "samples", *session.posargs)
119
120
121 @nox.session(python="3.8")
122 def cover(session):
123 """Run the final coverage report.
124
125 This outputs the coverage report aggregating coverage from the unit
126 test runs (not system test runs), and then erases coverage data.
127 """
128 session.install("coverage", "pytest-cov")
129 session.run("coverage", "report", "--show-missing", "--fail-under=100")
130 session.run("coverage", "erase")
131
132
133 @nox.session(python="3.8")
134 def lint(session):
135 """Run linters.
136
137 Returns a failure if the linters find linting errors or sufficiently
138 serious code quality issues.
139 """
140
141 session.install("black", "flake8")
142 session.install("-e", ".")
143 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
144 session.run("flake8", "tests")
145 session.run("flake8", os.path.join("docs", "samples"))
146 session.run("flake8", os.path.join("docs", "snippets.py"))
147 session.run("black", "--check", *BLACK_PATHS)
148
149
150 @nox.session(python="3.8")
151 def lint_setup_py(session):
152 """Verify that setup.py is valid (including RST check)."""
153
154 session.install("docutils", "Pygments")
155 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
156
157
158 @nox.session(python="3.6")
159 def blacken(session):
160 """Run black.
161 Format code to uniform standard.
162
163 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
164 That run uses an image that doesn't have 3.6 installed. Before updating this
165 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
166 """
167 session.install("black")
168 session.run("black", *BLACK_PATHS)
169
170
171 @nox.session(python="3.8")
172 def docs(session):
173 """Build the docs."""
174
175 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
176 session.install("google-cloud-storage")
177 session.install("-e", ".[all]")
178
179 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
180 session.run(
181 "sphinx-build",
182 "-W", # warnings as errors
183 "-T", # show full traceback on exception
184 "-N", # no colors
185 "-b",
186 "html",
187 "-d",
188 os.path.join("docs", "_build", "doctrees", ""),
189 os.path.join("docs", ""),
190 os.path.join("docs", "_build", "html", ""),
191 )
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -34,7 +34,7 @@
# Install all test dependencies, then install local packages in-place.
session.install("mock", "pytest", "pytest-cov", "freezegun")
session.install("grpcio")
- session.install("-e", "test_utils")
+ session.install("git+https://github.com/googleapis/python-test-utils")
# fastparquet is not included in .[all] because, in general, it's redundant
# with pyarrow. We still want to run some unit tests with fastparquet
@@ -83,7 +83,7 @@
session.install("mock", "pytest", "psutil")
session.install("google-cloud-storage")
session.install("fastavro")
- session.install("-e", "test_utils")
+ session.install("git+https://github.com/googleapis/python-test-utils")
session.install("-e", ".[all]")
# IPython does not support Python 2 after version 5.x
@@ -110,7 +110,7 @@
session.install("mock", "pytest")
session.install("google-cloud-storage")
session.install("grpcio")
- session.install("-e", "test_utils")
+ session.install("git+https://github.com/googleapis/python-test-utils")
session.install("-e", ".[all]")
# Run py.test against the snippets tests.
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -34,7 +34,7 @@\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n \n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n@@ -83,7 +83,7 @@\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n \n # IPython does not support Python 2 after version 5.x\n@@ -110,7 +110,7 @@\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n \n # Run py.test against the snippets tests.\n", "issue": "Remove test_utils directory\nNow that test utils are available in a GitHub repo, a local `test_utils` copy is not needed anymore and should be removed.\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n\n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n # serialization, though.\n session.install(\"-e\", \".[all,fastparquet]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # Run py.test against the snippets tests.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\"py.test\", \"samples\", *session.posargs)\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"black\", \"flake8\")\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(\"black\")\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n session.install(\"git+https://github.com/googleapis/python-test-utils\")\n\n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n # serialization, though.\n session.install(\"-e\", \".[all,fastparquet]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n\n # Run py.test against the snippets tests.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\"py.test\", \"samples\", *session.posargs)\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"black\", \"flake8\")\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(\"black\")\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]} | 2,323 | 334 |
gh_patches_debug_42985 | rasdani/github-patches | git_diff | pytorch__vision-914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
STL-10 Testing Protocol
Class STL10 does not support recommended testing protocol.
See STL-10 official page: https://cs.stanford.edu/~acoates/stl10/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/stl10.py`
Content:
```
1 from __future__ import print_function
2 from PIL import Image
3 import os
4 import os.path
5 import numpy as np
6 from .cifar import CIFAR10
7
8
9 class STL10(CIFAR10):
10 """`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.
11
12 Args:
13 root (string): Root directory of dataset where directory
14 ``stl10_binary`` exists.
15 split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
16 Accordingly dataset is selected.
17 transform (callable, optional): A function/transform that takes in an PIL image
18 and returns a transformed version. E.g, ``transforms.RandomCrop``
19 target_transform (callable, optional): A function/transform that takes in the
20 target and transforms it.
21 download (bool, optional): If true, downloads the dataset from the internet and
22 puts it in root directory. If dataset is already downloaded, it is not
23 downloaded again.
24
25 """
26 base_folder = 'stl10_binary'
27 url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
28 filename = "stl10_binary.tar.gz"
29 tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'
30 class_names_file = 'class_names.txt'
31 train_list = [
32 ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],
33 ['train_y.bin', '5a34089d4802c674881badbb80307741'],
34 ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']
35 ]
36
37 test_list = [
38 ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],
39 ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']
40 ]
41 splits = ('train', 'train+unlabeled', 'unlabeled', 'test')
42
43 def __init__(self, root, split='train',
44 transform=None, target_transform=None, download=False):
45 if split not in self.splits:
46 raise ValueError('Split "{}" not found. Valid splits are: {}'.format(
47 split, ', '.join(self.splits),
48 ))
49 self.root = os.path.expanduser(root)
50 self.transform = transform
51 self.target_transform = target_transform
52 self.split = split # train/test/unlabeled set
53
54 if download:
55 self.download()
56
57 if not self._check_integrity():
58 raise RuntimeError(
59 'Dataset not found or corrupted. '
60 'You can use download=True to download it')
61
62 # now load the picked numpy arrays
63 if self.split == 'train':
64 self.data, self.labels = self.__loadfile(
65 self.train_list[0][0], self.train_list[1][0])
66 elif self.split == 'train+unlabeled':
67 self.data, self.labels = self.__loadfile(
68 self.train_list[0][0], self.train_list[1][0])
69 unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
70 self.data = np.concatenate((self.data, unlabeled_data))
71 self.labels = np.concatenate(
72 (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))
73
74 elif self.split == 'unlabeled':
75 self.data, _ = self.__loadfile(self.train_list[2][0])
76 self.labels = np.asarray([-1] * self.data.shape[0])
77 else: # self.split == 'test':
78 self.data, self.labels = self.__loadfile(
79 self.test_list[0][0], self.test_list[1][0])
80
81 class_file = os.path.join(
82 self.root, self.base_folder, self.class_names_file)
83 if os.path.isfile(class_file):
84 with open(class_file) as f:
85 self.classes = f.read().splitlines()
86
87 def __getitem__(self, index):
88 """
89 Args:
90 index (int): Index
91
92 Returns:
93 tuple: (image, target) where target is index of the target class.
94 """
95 if self.labels is not None:
96 img, target = self.data[index], int(self.labels[index])
97 else:
98 img, target = self.data[index], None
99
100 # doing this so that it is consistent with all other datasets
101 # to return a PIL Image
102 img = Image.fromarray(np.transpose(img, (1, 2, 0)))
103
104 if self.transform is not None:
105 img = self.transform(img)
106
107 if self.target_transform is not None:
108 target = self.target_transform(target)
109
110 return img, target
111
112 def __len__(self):
113 return self.data.shape[0]
114
115 def __loadfile(self, data_file, labels_file=None):
116 labels = None
117 if labels_file:
118 path_to_labels = os.path.join(
119 self.root, self.base_folder, labels_file)
120 with open(path_to_labels, 'rb') as f:
121 labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based
122
123 path_to_data = os.path.join(self.root, self.base_folder, data_file)
124 with open(path_to_data, 'rb') as f:
125 # read whole file in uint8 chunks
126 everything = np.fromfile(f, dtype=np.uint8)
127 images = np.reshape(everything, (-1, 3, 96, 96))
128 images = np.transpose(images, (0, 1, 3, 2))
129
130 return images, labels
131
132 def extra_repr(self):
133 return "Split: {split}".format(**self.__dict__)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py
--- a/torchvision/datasets/stl10.py
+++ b/torchvision/datasets/stl10.py
@@ -14,6 +14,9 @@
``stl10_binary`` exists.
split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
Accordingly dataset is selected.
+ folds (int, optional): One of {0-9} or None.
+ For training, loads one of the 10 pre-defined folds of 1k samples for the
+ standard evaluation procedure. If no value is passed, loads the 5k samples.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
@@ -28,6 +31,7 @@
filename = "stl10_binary.tar.gz"
tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'
class_names_file = 'class_names.txt'
+ folds_list_file = 'fold_indices.txt'
train_list = [
['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],
['train_y.bin', '5a34089d4802c674881badbb80307741'],
@@ -40,7 +44,7 @@
]
splits = ('train', 'train+unlabeled', 'unlabeled', 'test')
- def __init__(self, root, split='train',
+ def __init__(self, root, split='train', folds=None,
transform=None, target_transform=None, download=False):
if split not in self.splits:
raise ValueError('Split "{}" not found. Valid splits are: {}'.format(
@@ -50,6 +54,7 @@
self.transform = transform
self.target_transform = target_transform
self.split = split # train/test/unlabeled set
+ self.folds = folds # one of the 10 pre-defined folds or the full dataset
if download:
self.download()
@@ -63,9 +68,12 @@
if self.split == 'train':
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0])
+ self.__load_folds(folds)
+
elif self.split == 'train+unlabeled':
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0])
+ self.__load_folds(folds)
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate(
@@ -131,3 +139,16 @@
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
+
+ def __load_folds(self, folds):
+ # loads one of the folds if specified
+ if isinstance(folds, int):
+ if folds >= 0 and folds < 10:
+ path_to_folds = os.path.join(
+ self.root, self.base_folder, self.folds_list_file)
+ with open(path_to_folds, 'r') as f:
+ str_idx = f.read().splitlines()[folds]
+ list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')
+ self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]
+ else:
+ raise ValueError('Folds "{}" not found. Valid splits are: 0-9.'.format(folds))
| {"golden_diff": "diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py\n--- a/torchvision/datasets/stl10.py\n+++ b/torchvision/datasets/stl10.py\n@@ -14,6 +14,9 @@\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n+ folds (int, optional): One of {0-9} or None.\n+ For training, loads one of the 10 pre-defined folds of 1k samples for the\n+ standard evaluation procedure. If no value is passed, loads the 5k samples.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n@@ -28,6 +31,7 @@\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n+ folds_list_file = 'fold_indices.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n@@ -40,7 +44,7 @@\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n \n- def __init__(self, root, split='train',\n+ def __init__(self, root, split='train', folds=None,\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n@@ -50,6 +54,7 @@\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n+ self.folds = folds # one of the 10 pre-defined folds or the full dataset\n \n if download:\n self.download()\n@@ -63,9 +68,12 @@\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n+\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n@@ -131,3 +139,16 @@\n \n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n+\n+ def __load_folds(self, folds):\n+ # loads one of the folds if specified\n+ if isinstance(folds, int):\n+ if folds >= 0 and folds < 10:\n+ path_to_folds = os.path.join(\n+ self.root, self.base_folder, self.folds_list_file)\n+ with open(path_to_folds, 'r') as f:\n+ str_idx = f.read().splitlines()[folds]\n+ list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')\n+ self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]\n+ else:\n+ raise ValueError('Folds \"{}\" not found. Valid splits are: 0-9.'.format(folds))\n", "issue": "STL-10 Testing Protocol\nClass STL10 does not support recommended testing protocol. \r\nSee STL-10 official page: https://cs.stanford.edu/~acoates/stl10/\n", "before_files": [{"content": "from __future__ import print_function\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom .cifar import CIFAR10\n\n\nclass STL10(CIFAR10):\n \"\"\"`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n base_folder = 'stl10_binary'\n url = \"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz\"\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']\n ]\n\n test_list = [\n ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],\n ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n\n def __init__(self, root, split='train',\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n split, ', '.join(self.splits),\n ))\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n 'Dataset not found or corrupted. '\n 'You can use download=True to download it')\n\n # now load the picked numpy arrays\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))\n\n elif self.split == 'unlabeled':\n self.data, _ = self.__loadfile(self.train_list[2][0])\n self.labels = np.asarray([-1] * self.data.shape[0])\n else: # self.split == 'test':\n self.data, self.labels = self.__loadfile(\n self.test_list[0][0], self.test_list[1][0])\n\n class_file = os.path.join(\n self.root, self.base_folder, self.class_names_file)\n if os.path.isfile(class_file):\n with open(class_file) as f:\n self.classes = f.read().splitlines()\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.labels is not None:\n img, target = self.data[index], int(self.labels[index])\n else:\n img, target = self.data[index], None\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return self.data.shape[0]\n\n def __loadfile(self, data_file, labels_file=None):\n labels = None\n if labels_file:\n path_to_labels = os.path.join(\n self.root, self.base_folder, labels_file)\n with open(path_to_labels, 'rb') as f:\n labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based\n\n path_to_data = os.path.join(self.root, self.base_folder, data_file)\n with open(path_to_data, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images = np.reshape(everything, (-1, 3, 96, 96))\n images = np.transpose(images, (0, 1, 3, 2))\n\n return images, labels\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n", "path": "torchvision/datasets/stl10.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom .cifar import CIFAR10\n\n\nclass STL10(CIFAR10):\n \"\"\"`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n folds (int, optional): One of {0-9} or None.\n For training, loads one of the 10 pre-defined folds of 1k samples for the\n standard evaluation procedure. If no value is passed, loads the 5k samples.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n base_folder = 'stl10_binary'\n url = \"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz\"\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n folds_list_file = 'fold_indices.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']\n ]\n\n test_list = [\n ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],\n ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n\n def __init__(self, root, split='train', folds=None,\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n split, ', '.join(self.splits),\n ))\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n self.folds = folds # one of the 10 pre-defined folds or the full dataset\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n 'Dataset not found or corrupted. '\n 'You can use download=True to download it')\n\n # now load the picked numpy arrays\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n self.__load_folds(folds)\n\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n self.__load_folds(folds)\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))\n\n elif self.split == 'unlabeled':\n self.data, _ = self.__loadfile(self.train_list[2][0])\n self.labels = np.asarray([-1] * self.data.shape[0])\n else: # self.split == 'test':\n self.data, self.labels = self.__loadfile(\n self.test_list[0][0], self.test_list[1][0])\n\n class_file = os.path.join(\n self.root, self.base_folder, self.class_names_file)\n if os.path.isfile(class_file):\n with open(class_file) as f:\n self.classes = f.read().splitlines()\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.labels is not None:\n img, target = self.data[index], int(self.labels[index])\n else:\n img, target = self.data[index], None\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return self.data.shape[0]\n\n def __loadfile(self, data_file, labels_file=None):\n labels = None\n if labels_file:\n path_to_labels = os.path.join(\n self.root, self.base_folder, labels_file)\n with open(path_to_labels, 'rb') as f:\n labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based\n\n path_to_data = os.path.join(self.root, self.base_folder, data_file)\n with open(path_to_data, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images = np.reshape(everything, (-1, 3, 96, 96))\n images = np.transpose(images, (0, 1, 3, 2))\n\n return images, labels\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n\n def __load_folds(self, folds):\n # loads one of the folds if specified\n if isinstance(folds, int):\n if folds >= 0 and folds < 10:\n path_to_folds = os.path.join(\n self.root, self.base_folder, self.folds_list_file)\n with open(path_to_folds, 'r') as f:\n str_idx = f.read().splitlines()[folds]\n list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')\n self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]\n else:\n raise ValueError('Folds \"{}\" not found. Valid splits are: 0-9.'.format(folds))\n", "path": "torchvision/datasets/stl10.py"}]} | 1,951 | 910 |
gh_patches_debug_15580 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-590 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Help documentation returns error for "min_profile" and "max_profile"
### Extension name (the extension in question)
storage-preview
### Description of issue (in as much detail as possible)
Returns the following error when prompting for help via `az storage account -h`
```
Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.
Traceback (most recent call last):
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\cli.py", line 206, in invoke
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\commands\__init__.py", line 276, in execute
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1734, in parse_args
args, argv = self.parse_known_args(args, namespace)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1766, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1954, in _parse_known_args
positionals_end_index = consume_positionals(start_index)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1931, in consume_positionals
take_action(action, args)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1840, in take_action
action(self, namespace, argument_values, option_string)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1137, in __call__
subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1766, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1954, in _parse_known_args
positionals_end_index = consume_positionals(start_index)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1931, in consume_positionals
take_action(action, args)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1840, in take_action
action(self, namespace, argument_values, option_string)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1137, in __call__
subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1766, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1972, in _parse_known_args
start_index = consume_optional(start_index)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1912, in consume_optional
take_action(action, args, option_string)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1840, in take_action
action(self, namespace, argument_values, option_string)
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1024, in __call__
parser.print_help()
File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 2366, in print_help
self._print_message(self.format_help(), file)
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\parser.py", line 154, in format_help
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\parser.py", line 225, in format_help
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 146, in show_help
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 664, in show_help
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 219, in __init__
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 240, in load
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help_loaders.py", line 152, in versioned_load
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 178, in load
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 183, in _load_from_file
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 234, in _load_from_data
File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 201, in _should_include_example
knack.help.HelpAuthoringException: Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.
```
-----
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/storage-preview/azext_storage_preview/_help.py`
Content:
```
1 # coding=utf-8
2 # --------------------------------------------------------------------------------------------
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License. See License.txt in the project root for license information.
5 # --------------------------------------------------------------------------------------------
6
7 from knack.help_files import helps
8
9 # pylint: disable=line-too-long, too-many-lines
10
11 helps['storage account create'] = """
12 type: command
13 short-summary: Create a storage account.
14 long-summary: >
15 The SKU of the storage account defaults to 'Standard_RAGRS'.
16 examples:
17 - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
18 text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS
19 min_profile: latest
20 - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
21 text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS
22 max_profile: 2017-03-09-profile
23 """
24
25 helps['storage account update'] = """
26 type: command
27 short-summary: Update the properties of a storage account.
28 """
29
30 helps['storage blob service-properties'] = """
31 type: group
32 short-summary: Manage storage blob service properties.
33 """
34
35 helps['storage blob service-properties update'] = """
36 type: command
37 short-summary: Update storage blob service properties.
38 """
39
40 helps['storage account management-policy'] = """
41 type: group
42 short-summary: Manage storage account management policies.
43 """
44
45 helps['storage account management-policy create'] = """
46 type: command
47 short-summary: Creates the data policy rules associated with the specified storage account.
48 """
49
50 helps['storage account management-policy update'] = """
51 type: command
52 short-summary: Updates the data policy rules associated with the specified storage account.
53 """
54
55 helps['storage azcopy'] = """
56 type: group
57 short-summary: |
58 [EXPERIMENTAL] Manage storage operations utilizing AzCopy.
59 long-summary: |
60 Open issues here: https://github.com/Azure/azure-storage-azcopy
61 """
62
63 helps['storage azcopy blob'] = """
64 type: group
65 short-summary: Manage object storage for unstructured data (blobs) using AzCopy.
66 """
67
68 helps['storage azcopy blob upload'] = """
69 type: command
70 short-summary: Upload blobs to a storage blob container using AzCopy.
71 examples:
72 - name: Upload a single blob to a container.
73 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/file" -d NewBlob
74 - name: Upload a directory to a container.
75 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/directory" --recursive
76 - name: Upload the contents of a directory to a container.
77 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/directory/*" --recursive
78 """
79
80 helps['storage azcopy blob download'] = """
81 type: command
82 short-summary: Download blobs from a storage blob container using AzCopy.
83 examples:
84 - name: Download a single blob from a container.
85 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s "path/to/blob" -d "path/to/file"
86 - name: Download a virtual directory from a container.
87 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s "path/to/virtual_directory" -d "download/path" --recursive
88 - name: Download the contents of a container onto a local file system.
89 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d "download/path" --recursive
90 """
91
92 helps['storage azcopy blob delete'] = """
93 type: command
94 short-summary: Delete blobs from a storage blob container using AzCopy.
95 examples:
96 - name: Delete a single blob from a container.
97 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob
98 - name: Delete all blobs from a container.
99 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive
100 - name: Delete all blobs in a virtual directory.
101 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t "path/to/virtual_directory" --recursive
102 """
103
104 helps['storage azcopy run-command'] = """
105 type: command
106 short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.
107 """
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/storage-preview/azext_storage_preview/_help.py b/src/storage-preview/azext_storage_preview/_help.py
--- a/src/storage-preview/azext_storage_preview/_help.py
+++ b/src/storage-preview/azext_storage_preview/_help.py
@@ -16,10 +16,6 @@
examples:
- name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS
- min_profile: latest
- - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
- text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS
- max_profile: 2017-03-09-profile
"""
helps['storage account update'] = """
| {"golden_diff": "diff --git a/src/storage-preview/azext_storage_preview/_help.py b/src/storage-preview/azext_storage_preview/_help.py\n--- a/src/storage-preview/azext_storage_preview/_help.py\n+++ b/src/storage-preview/azext_storage_preview/_help.py\n@@ -16,10 +16,6 @@\n examples:\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\n- min_profile: latest\n- - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n- text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\n- max_profile: 2017-03-09-profile\n \"\"\"\n \n helps['storage account update'] = \"\"\"\n", "issue": "Help documentation returns error for \"min_profile\" and \"max_profile\"\n### Extension name (the extension in question)\r\nstorage-preview\r\n\r\n### Description of issue (in as much detail as possible)\r\nReturns the following error when prompting for help via `az storage account -h`\r\n\r\n```\r\nHelp entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\cli.py\", line 206, in invoke\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\commands\\__init__.py\", line 276, in execute\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1734, in parse_args\r\n args, argv = self.parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1954, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1931, in consume_positionals\r\n take_action(action, args)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1137, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1954, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1931, in consume_positionals\r\n take_action(action, args)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1137, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1972, in _parse_known_args\r\n start_index = consume_optional(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1912, in consume_optional\r\n take_action(action, args, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1024, in __call__\r\n parser.print_help()\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 2366, in print_help\r\n self._print_message(self.format_help(), file)\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\parser.py\", line 154, in format_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\parser.py\", line 225, in format_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 146, in show_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 664, in show_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 219, in __init__\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 240, in load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help_loaders.py\", line 152, in versioned_load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 178, in load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 183, in _load_from_file\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 234, in _load_from_data\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 201, in _should_include_example\r\nknack.help.HelpAuthoringException: Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\r\n```\r\n-----\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n# pylint: disable=line-too-long, too-many-lines\n\nhelps['storage account create'] = \"\"\"\n type: command\n short-summary: Create a storage account.\n long-summary: >\n The SKU of the storage account defaults to 'Standard_RAGRS'.\n examples:\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\n min_profile: latest\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\n max_profile: 2017-03-09-profile\n\"\"\"\n\nhelps['storage account update'] = \"\"\"\n type: command\n short-summary: Update the properties of a storage account.\n\"\"\"\n\nhelps['storage blob service-properties'] = \"\"\"\n type: group\n short-summary: Manage storage blob service properties.\n\"\"\"\n\nhelps['storage blob service-properties update'] = \"\"\"\n type: command\n short-summary: Update storage blob service properties.\n\"\"\"\n\nhelps['storage account management-policy'] = \"\"\"\n type: group\n short-summary: Manage storage account management policies.\n\"\"\"\n\nhelps['storage account management-policy create'] = \"\"\"\n type: command\n short-summary: Creates the data policy rules associated with the specified storage account.\n\"\"\"\n\nhelps['storage account management-policy update'] = \"\"\"\n type: command\n short-summary: Updates the data policy rules associated with the specified storage account.\n\"\"\"\n\nhelps['storage azcopy'] = \"\"\"\n type: group\n short-summary: |\n [EXPERIMENTAL] Manage storage operations utilizing AzCopy.\n long-summary: |\n Open issues here: https://github.com/Azure/azure-storage-azcopy\n\"\"\"\n\nhelps['storage azcopy blob'] = \"\"\"\n type: group\n short-summary: Manage object storage for unstructured data (blobs) using AzCopy.\n\"\"\"\n\nhelps['storage azcopy blob upload'] = \"\"\"\n type: command\n short-summary: Upload blobs to a storage blob container using AzCopy.\n examples:\n - name: Upload a single blob to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/file\" -d NewBlob\n - name: Upload a directory to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory\" --recursive\n - name: Upload the contents of a directory to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory/*\" --recursive\n\"\"\"\n\nhelps['storage azcopy blob download'] = \"\"\"\n type: command\n short-summary: Download blobs from a storage blob container using AzCopy.\n examples:\n - name: Download a single blob from a container.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/blob\" -d \"path/to/file\"\n - name: Download a virtual directory from a container.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/virtual_directory\" -d \"download/path\" --recursive\n - name: Download the contents of a container onto a local file system.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d \"download/path\" --recursive\n\"\"\"\n\nhelps['storage azcopy blob delete'] = \"\"\"\n type: command\n short-summary: Delete blobs from a storage blob container using AzCopy.\n examples:\n - name: Delete a single blob from a container.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob\n - name: Delete all blobs from a container.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive\n - name: Delete all blobs in a virtual directory.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t \"path/to/virtual_directory\" --recursive\n\"\"\"\n\nhelps['storage azcopy run-command'] = \"\"\"\n type: command\n short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.\n\"\"\"\n", "path": "src/storage-preview/azext_storage_preview/_help.py"}], "after_files": [{"content": "# coding=utf-8\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n# pylint: disable=line-too-long, too-many-lines\n\nhelps['storage account create'] = \"\"\"\n type: command\n short-summary: Create a storage account.\n long-summary: >\n The SKU of the storage account defaults to 'Standard_RAGRS'.\n examples:\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\n\"\"\"\n\nhelps['storage account update'] = \"\"\"\n type: command\n short-summary: Update the properties of a storage account.\n\"\"\"\n\nhelps['storage blob service-properties'] = \"\"\"\n type: group\n short-summary: Manage storage blob service properties.\n\"\"\"\n\nhelps['storage blob service-properties update'] = \"\"\"\n type: command\n short-summary: Update storage blob service properties.\n\"\"\"\n\nhelps['storage account management-policy'] = \"\"\"\n type: group\n short-summary: Manage storage account management policies.\n\"\"\"\n\nhelps['storage account management-policy create'] = \"\"\"\n type: command\n short-summary: Creates the data policy rules associated with the specified storage account.\n\"\"\"\n\nhelps['storage account management-policy update'] = \"\"\"\n type: command\n short-summary: Updates the data policy rules associated with the specified storage account.\n\"\"\"\n\nhelps['storage azcopy'] = \"\"\"\n type: group\n short-summary: |\n [EXPERIMENTAL] Manage storage operations utilizing AzCopy.\n long-summary: |\n Open issues here: https://github.com/Azure/azure-storage-azcopy\n\"\"\"\n\nhelps['storage azcopy blob'] = \"\"\"\n type: group\n short-summary: Manage object storage for unstructured data (blobs) using AzCopy.\n\"\"\"\n\nhelps['storage azcopy blob upload'] = \"\"\"\n type: command\n short-summary: Upload blobs to a storage blob container using AzCopy.\n examples:\n - name: Upload a single blob to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/file\" -d NewBlob\n - name: Upload a directory to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory\" --recursive\n - name: Upload the contents of a directory to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory/*\" --recursive\n\"\"\"\n\nhelps['storage azcopy blob download'] = \"\"\"\n type: command\n short-summary: Download blobs from a storage blob container using AzCopy.\n examples:\n - name: Download a single blob from a container.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/blob\" -d \"path/to/file\"\n - name: Download a virtual directory from a container.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/virtual_directory\" -d \"download/path\" --recursive\n - name: Download the contents of a container onto a local file system.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d \"download/path\" --recursive\n\"\"\"\n\nhelps['storage azcopy blob delete'] = \"\"\"\n type: command\n short-summary: Delete blobs from a storage blob container using AzCopy.\n examples:\n - name: Delete a single blob from a container.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob\n - name: Delete all blobs from a container.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive\n - name: Delete all blobs in a virtual directory.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t \"path/to/virtual_directory\" --recursive\n\"\"\"\n\nhelps['storage azcopy run-command'] = \"\"\"\n type: command\n short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.\n\"\"\"\n", "path": "src/storage-preview/azext_storage_preview/_help.py"}]} | 3,135 | 226 |
gh_patches_debug_11962 | rasdani/github-patches | git_diff | svthalia__concrexit-2591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Updating promo requests
**Describe the bug**
It is not possible to update the promo request within a week of the publish date
**How to reproduce**
Create a promo request
Try to update the designer within a week to publish date
**Expected behavior**
The request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/promotion/forms.py`
Content:
```
1 from django import forms
2 from django.utils import timezone
3
4 from promotion.models import PromotionRequest
5 from thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA
6
7
8 class PromotionRequestForm(forms.ModelForm):
9 class Meta:
10 model = PromotionRequest
11 fields = [
12 "event",
13 "publish_date",
14 "channel",
15 "assigned_to",
16 "status",
17 "drive_folder",
18 "remarks",
19 ]
20
21 def clean_publish_date(self):
22 publish_date = self.cleaned_data.get("publish_date")
23 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
24 if timezone.localdate() > create_time_minimum:
25 raise forms.ValidationError(
26 "Publish date cannot be within a week from now."
27 )
28 if "publish_date" in self.changed_data:
29 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
30 if timezone.localdate() > create_time_minimum:
31 raise forms.ValidationError(
32 "Publish date cannot be within a week from now."
33 )
34 return publish_date
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/promotion/forms.py b/website/promotion/forms.py
--- a/website/promotion/forms.py
+++ b/website/promotion/forms.py
@@ -20,11 +20,6 @@
def clean_publish_date(self):
publish_date = self.cleaned_data.get("publish_date")
- create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
- if timezone.localdate() > create_time_minimum:
- raise forms.ValidationError(
- "Publish date cannot be within a week from now."
- )
if "publish_date" in self.changed_data:
create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
if timezone.localdate() > create_time_minimum:
| {"golden_diff": "diff --git a/website/promotion/forms.py b/website/promotion/forms.py\n--- a/website/promotion/forms.py\n+++ b/website/promotion/forms.py\n@@ -20,11 +20,6 @@\n \n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n- create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n- if timezone.localdate() > create_time_minimum:\n- raise forms.ValidationError(\n- \"Publish date cannot be within a week from now.\"\n- )\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n", "issue": "Updating promo requests \n**Describe the bug**\r\nIt is not possible to update the promo request within a week of the publish date \r\n\r\n**How to reproduce** \r\nCreate a promo request \r\nTry to update the designer within a week to publish date \r\n\r\n**Expected behavior** \r\nThe request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited. \n", "before_files": [{"content": "from django import forms\nfrom django.utils import timezone\n\nfrom promotion.models import PromotionRequest\nfrom thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA\n\n\nclass PromotionRequestForm(forms.ModelForm):\n class Meta:\n model = PromotionRequest\n fields = [\n \"event\",\n \"publish_date\",\n \"channel\",\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n \"remarks\",\n ]\n\n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n return publish_date\n", "path": "website/promotion/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils import timezone\n\nfrom promotion.models import PromotionRequest\nfrom thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA\n\n\nclass PromotionRequestForm(forms.ModelForm):\n class Meta:\n model = PromotionRequest\n fields = [\n \"event\",\n \"publish_date\",\n \"channel\",\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n \"remarks\",\n ]\n\n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n return publish_date\n", "path": "website/promotion/forms.py"}]} | 624 | 160 |
gh_patches_debug_33629 | rasdani/github-patches | git_diff | pre-commit__pre-commit-372 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not-found executable crashes framework
This was introduced with the new exe logic in 0.8.0
Here's a simple reproduction:
``` yaml
- repo: local
hooks:
- id: test
name: test
language: system
entry: i-dont-exist-lol
files: '\.py$'
```
```
$ pre-commit run --all-files
test.....................................................................An unexpected error has occurred: OSError: Executable i-dont-exist-lol not found
Check the log at ~/.pre-commit/pre-commit.log
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/util.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import errno
5 import functools
6 import os
7 import os.path
8 import shutil
9 import stat
10 import subprocess
11 import tarfile
12 import tempfile
13
14 import pkg_resources
15
16 from pre_commit import five
17 from pre_commit import parse_shebang
18
19
20 @contextlib.contextmanager
21 def cwd(path):
22 original_cwd = os.getcwd()
23 os.chdir(path)
24 try:
25 yield
26 finally:
27 os.chdir(original_cwd)
28
29
30 def mkdirp(path):
31 try:
32 os.makedirs(path)
33 except OSError:
34 if not os.path.exists(path):
35 raise
36
37
38 def memoize_by_cwd(func):
39 """Memoize a function call based on os.getcwd()."""
40 @functools.wraps(func)
41 def wrapper(*args):
42 cwd = os.getcwd()
43 key = (cwd,) + args
44 try:
45 return wrapper._cache[key]
46 except KeyError:
47 ret = wrapper._cache[key] = func(*args)
48 return ret
49
50 wrapper._cache = {}
51
52 return wrapper
53
54
55 @contextlib.contextmanager
56 def clean_path_on_failure(path):
57 """Cleans up the directory on an exceptional failure."""
58 try:
59 yield
60 except BaseException:
61 if os.path.exists(path):
62 rmtree(path)
63 raise
64
65
66 @contextlib.contextmanager
67 def noop_context():
68 yield
69
70
71 def no_git_env():
72 # Too many bugs dealing with environment variables and GIT:
73 # https://github.com/pre-commit/pre-commit/issues/300
74 # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
75 # pre-commit hooks
76 # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
77 # while running pre-commit hooks in submodules.
78 # GIT_DIR: Causes git clone to clone wrong thing
79 # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
80 return dict(
81 (k, v) for k, v in os.environ.items() if not k.startswith('GIT_')
82 )
83
84
85 @contextlib.contextmanager
86 def tarfile_open(*args, **kwargs):
87 """Compatibility layer because python2.6"""
88 tf = tarfile.open(*args, **kwargs)
89 try:
90 yield tf
91 finally:
92 tf.close()
93
94
95 @contextlib.contextmanager
96 def tmpdir():
97 """Contextmanager to create a temporary directory. It will be cleaned up
98 afterwards.
99 """
100 tempdir = tempfile.mkdtemp()
101 try:
102 yield tempdir
103 finally:
104 rmtree(tempdir)
105
106
107 def resource_filename(filename):
108 return pkg_resources.resource_filename(
109 'pre_commit',
110 os.path.join('resources', filename),
111 )
112
113
114 def make_executable(filename):
115 original_mode = os.stat(filename).st_mode
116 os.chmod(
117 filename,
118 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
119 )
120
121
122 class CalledProcessError(RuntimeError):
123 def __init__(self, returncode, cmd, expected_returncode, output=None):
124 super(CalledProcessError, self).__init__(
125 returncode, cmd, expected_returncode, output,
126 )
127 self.returncode = returncode
128 self.cmd = cmd
129 self.expected_returncode = expected_returncode
130 self.output = output
131
132 def to_bytes(self):
133 output = []
134 for maybe_text in self.output:
135 if maybe_text:
136 output.append(
137 b'\n ' +
138 five.to_bytes(maybe_text).replace(b'\n', b'\n ')
139 )
140 else:
141 output.append(b'(none)')
142
143 return b''.join((
144 five.to_bytes(
145 'Command: {0!r}\n'
146 'Return code: {1}\n'
147 'Expected return code: {2}\n'.format(
148 self.cmd, self.returncode, self.expected_returncode
149 )
150 ),
151 b'Output: ', output[0], b'\n',
152 b'Errors: ', output[1], b'\n',
153 ))
154
155 def to_text(self):
156 return self.to_bytes().decode('UTF-8')
157
158 if five.PY3: # pragma: no cover (py3)
159 __bytes__ = to_bytes
160 __str__ = to_text
161 else: # pragma: no cover (py2)
162 __str__ = to_bytes
163 __unicode__ = to_text
164
165
166 def cmd_output(*cmd, **kwargs):
167 retcode = kwargs.pop('retcode', 0)
168 encoding = kwargs.pop('encoding', 'UTF-8')
169 __popen = kwargs.pop('__popen', subprocess.Popen)
170
171 popen_kwargs = {
172 'stdin': subprocess.PIPE,
173 'stdout': subprocess.PIPE,
174 'stderr': subprocess.PIPE,
175 }
176
177 # py2/py3 on windows are more strict about the types here
178 cmd = tuple(five.n(arg) for arg in cmd)
179 kwargs['env'] = dict(
180 (five.n(key), five.n(value))
181 for key, value in kwargs.pop('env', {}).items()
182 ) or None
183
184 cmd = parse_shebang.normalize_cmd(cmd)
185
186 popen_kwargs.update(kwargs)
187 proc = __popen(cmd, **popen_kwargs)
188 stdout, stderr = proc.communicate()
189 if encoding is not None and stdout is not None:
190 stdout = stdout.decode(encoding)
191 if encoding is not None and stderr is not None:
192 stderr = stderr.decode(encoding)
193 returncode = proc.returncode
194
195 if retcode is not None and retcode != returncode:
196 raise CalledProcessError(
197 returncode, cmd, retcode, output=(stdout, stderr),
198 )
199
200 return proc.returncode, stdout, stderr
201
202
203 def rmtree(path):
204 """On windows, rmtree fails for readonly dirs."""
205 def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
206 excvalue = exc[1]
207 if (
208 func in (os.rmdir, os.remove, os.unlink) and
209 excvalue.errno == errno.EACCES
210 ):
211 os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
212 func(path)
213 else:
214 raise
215 shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
216
```
Path: `pre_commit/parse_shebang.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import unicode_literals
3
4 import io
5 import os.path
6 import shlex
7 import string
8
9 from pre_commit import five
10
11
12 printable = frozenset(string.printable)
13
14
15 def parse_bytesio(bytesio):
16 """Parse the shebang from a file opened for reading binary."""
17 if bytesio.read(2) != b'#!':
18 return ()
19 first_line = bytesio.readline()
20 try:
21 first_line = first_line.decode('US-ASCII')
22 except UnicodeDecodeError:
23 return ()
24
25 # Require only printable ascii
26 for c in first_line:
27 if c not in printable:
28 return ()
29
30 # shlex.split is horribly broken in py26 on text strings
31 cmd = tuple(shlex.split(five.n(first_line)))
32 if cmd[0] == '/usr/bin/env':
33 cmd = cmd[1:]
34 return cmd
35
36
37 def parse_filename(filename):
38 """Parse the shebang given a filename."""
39 if not os.path.exists(filename) or not os.access(filename, os.X_OK):
40 return ()
41
42 with io.open(filename, 'rb') as f:
43 return parse_bytesio(f)
44
45
46 def find_executable(exe, _environ=None):
47 exe = os.path.normpath(exe)
48 if os.sep in exe:
49 return exe
50
51 environ = _environ if _environ is not None else os.environ
52
53 if 'PATHEXT' in environ:
54 possible_exe_names = (exe,) + tuple(
55 exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)
56 )
57 else:
58 possible_exe_names = (exe,)
59
60 for path in environ.get('PATH', '').split(os.pathsep):
61 for possible_exe_name in possible_exe_names:
62 joined = os.path.join(path, possible_exe_name)
63 if os.path.isfile(joined) and os.access(joined, os.X_OK):
64 return joined
65 else:
66 return None
67
68
69 def normexe(orig_exe):
70 if os.sep not in orig_exe:
71 exe = find_executable(orig_exe)
72 if exe is None:
73 raise OSError('Executable {0} not found'.format(orig_exe))
74 return exe
75 else:
76 return orig_exe
77
78
79 def normalize_cmd(cmd):
80 """Fixes for the following issues on windows
81 - http://bugs.python.org/issue8557
82 - windows does not parse shebangs
83
84 This function also makes deep-path shebangs work just fine
85 """
86 # Use PATH to determine the executable
87 exe = normexe(cmd[0])
88
89 # Figure out the shebang from the resulting command
90 cmd = parse_filename(exe) + (exe,) + cmd[1:]
91
92 # This could have given us back another bare executable
93 exe = normexe(cmd[0])
94
95 return (exe,) + cmd[1:]
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py
--- a/pre_commit/parse_shebang.py
+++ b/pre_commit/parse_shebang.py
@@ -12,6 +12,10 @@
printable = frozenset(string.printable)
+class ExecutableNotFoundError(OSError):
+ pass
+
+
def parse_bytesio(bytesio):
"""Parse the shebang from a file opened for reading binary."""
if bytesio.read(2) != b'#!':
@@ -70,7 +74,9 @@
if os.sep not in orig_exe:
exe = find_executable(orig_exe)
if exe is None:
- raise OSError('Executable {0} not found'.format(orig_exe))
+ raise ExecutableNotFoundError(
+ 'Executable `{0}` not found'.format(orig_exe),
+ )
return exe
else:
return orig_exe
diff --git a/pre_commit/util.py b/pre_commit/util.py
--- a/pre_commit/util.py
+++ b/pre_commit/util.py
@@ -181,23 +181,26 @@
for key, value in kwargs.pop('env', {}).items()
) or None
- cmd = parse_shebang.normalize_cmd(cmd)
-
- popen_kwargs.update(kwargs)
- proc = __popen(cmd, **popen_kwargs)
- stdout, stderr = proc.communicate()
- if encoding is not None and stdout is not None:
- stdout = stdout.decode(encoding)
- if encoding is not None and stderr is not None:
- stderr = stderr.decode(encoding)
- returncode = proc.returncode
+ try:
+ cmd = parse_shebang.normalize_cmd(cmd)
+ except parse_shebang.ExecutableNotFoundError as e:
+ returncode, stdout, stderr = (-1, e.args[0].encode('UTF-8'), b'')
+ else:
+ popen_kwargs.update(kwargs)
+ proc = __popen(cmd, **popen_kwargs)
+ stdout, stderr = proc.communicate()
+ if encoding is not None and stdout is not None:
+ stdout = stdout.decode(encoding)
+ if encoding is not None and stderr is not None:
+ stderr = stderr.decode(encoding)
+ returncode = proc.returncode
if retcode is not None and retcode != returncode:
raise CalledProcessError(
returncode, cmd, retcode, output=(stdout, stderr),
)
- return proc.returncode, stdout, stderr
+ return returncode, stdout, stderr
def rmtree(path):
| {"golden_diff": "diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py\n--- a/pre_commit/parse_shebang.py\n+++ b/pre_commit/parse_shebang.py\n@@ -12,6 +12,10 @@\n printable = frozenset(string.printable)\n \n \n+class ExecutableNotFoundError(OSError):\n+ pass\n+\n+\n def parse_bytesio(bytesio):\n \"\"\"Parse the shebang from a file opened for reading binary.\"\"\"\n if bytesio.read(2) != b'#!':\n@@ -70,7 +74,9 @@\n if os.sep not in orig_exe:\n exe = find_executable(orig_exe)\n if exe is None:\n- raise OSError('Executable {0} not found'.format(orig_exe))\n+ raise ExecutableNotFoundError(\n+ 'Executable `{0}` not found'.format(orig_exe),\n+ )\n return exe\n else:\n return orig_exe\ndiff --git a/pre_commit/util.py b/pre_commit/util.py\n--- a/pre_commit/util.py\n+++ b/pre_commit/util.py\n@@ -181,23 +181,26 @@\n for key, value in kwargs.pop('env', {}).items()\n ) or None\n \n- cmd = parse_shebang.normalize_cmd(cmd)\n-\n- popen_kwargs.update(kwargs)\n- proc = __popen(cmd, **popen_kwargs)\n- stdout, stderr = proc.communicate()\n- if encoding is not None and stdout is not None:\n- stdout = stdout.decode(encoding)\n- if encoding is not None and stderr is not None:\n- stderr = stderr.decode(encoding)\n- returncode = proc.returncode\n+ try:\n+ cmd = parse_shebang.normalize_cmd(cmd)\n+ except parse_shebang.ExecutableNotFoundError as e:\n+ returncode, stdout, stderr = (-1, e.args[0].encode('UTF-8'), b'')\n+ else:\n+ popen_kwargs.update(kwargs)\n+ proc = __popen(cmd, **popen_kwargs)\n+ stdout, stderr = proc.communicate()\n+ if encoding is not None and stdout is not None:\n+ stdout = stdout.decode(encoding)\n+ if encoding is not None and stderr is not None:\n+ stderr = stderr.decode(encoding)\n+ returncode = proc.returncode\n \n if retcode is not None and retcode != returncode:\n raise CalledProcessError(\n returncode, cmd, retcode, output=(stdout, stderr),\n )\n \n- return proc.returncode, stdout, stderr\n+ return returncode, stdout, stderr\n \n \n def rmtree(path):\n", "issue": "Not-found executable crashes framework\nThis was introduced with the new exe logic in 0.8.0\n\nHere's a simple reproduction:\n\n``` yaml\n- repo: local\n hooks:\n - id: test\n name: test\n language: system\n entry: i-dont-exist-lol\n files: '\\.py$'\n```\n\n```\n$ pre-commit run --all-files\ntest.....................................................................An unexpected error has occurred: OSError: Executable i-dont-exist-lol not found\nCheck the log at ~/.pre-commit/pre-commit.log\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport errno\nimport functools\nimport os\nimport os.path\nimport shutil\nimport stat\nimport subprocess\nimport tarfile\nimport tempfile\n\nimport pkg_resources\n\nfrom pre_commit import five\nfrom pre_commit import parse_shebang\n\n\[email protected]\ndef cwd(path):\n original_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(original_cwd)\n\n\ndef mkdirp(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.exists(path):\n raise\n\n\ndef memoize_by_cwd(func):\n \"\"\"Memoize a function call based on os.getcwd().\"\"\"\n @functools.wraps(func)\n def wrapper(*args):\n cwd = os.getcwd()\n key = (cwd,) + args\n try:\n return wrapper._cache[key]\n except KeyError:\n ret = wrapper._cache[key] = func(*args)\n return ret\n\n wrapper._cache = {}\n\n return wrapper\n\n\[email protected]\ndef clean_path_on_failure(path):\n \"\"\"Cleans up the directory on an exceptional failure.\"\"\"\n try:\n yield\n except BaseException:\n if os.path.exists(path):\n rmtree(path)\n raise\n\n\[email protected]\ndef noop_context():\n yield\n\n\ndef no_git_env():\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n return dict(\n (k, v) for k, v in os.environ.items() if not k.startswith('GIT_')\n )\n\n\[email protected]\ndef tarfile_open(*args, **kwargs):\n \"\"\"Compatibility layer because python2.6\"\"\"\n tf = tarfile.open(*args, **kwargs)\n try:\n yield tf\n finally:\n tf.close()\n\n\[email protected]\ndef tmpdir():\n \"\"\"Contextmanager to create a temporary directory. It will be cleaned up\n afterwards.\n \"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n yield tempdir\n finally:\n rmtree(tempdir)\n\n\ndef resource_filename(filename):\n return pkg_resources.resource_filename(\n 'pre_commit',\n os.path.join('resources', filename),\n )\n\n\ndef make_executable(filename):\n original_mode = os.stat(filename).st_mode\n os.chmod(\n filename,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n\nclass CalledProcessError(RuntimeError):\n def __init__(self, returncode, cmd, expected_returncode, output=None):\n super(CalledProcessError, self).__init__(\n returncode, cmd, expected_returncode, output,\n )\n self.returncode = returncode\n self.cmd = cmd\n self.expected_returncode = expected_returncode\n self.output = output\n\n def to_bytes(self):\n output = []\n for maybe_text in self.output:\n if maybe_text:\n output.append(\n b'\\n ' +\n five.to_bytes(maybe_text).replace(b'\\n', b'\\n ')\n )\n else:\n output.append(b'(none)')\n\n return b''.join((\n five.to_bytes(\n 'Command: {0!r}\\n'\n 'Return code: {1}\\n'\n 'Expected return code: {2}\\n'.format(\n self.cmd, self.returncode, self.expected_returncode\n )\n ),\n b'Output: ', output[0], b'\\n',\n b'Errors: ', output[1], b'\\n',\n ))\n\n def to_text(self):\n return self.to_bytes().decode('UTF-8')\n\n if five.PY3: # pragma: no cover (py3)\n __bytes__ = to_bytes\n __str__ = to_text\n else: # pragma: no cover (py2)\n __str__ = to_bytes\n __unicode__ = to_text\n\n\ndef cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n __popen = kwargs.pop('__popen', subprocess.Popen)\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the types here\n cmd = tuple(five.n(arg) for arg in cmd)\n kwargs['env'] = dict(\n (five.n(key), five.n(value))\n for key, value in kwargs.pop('env', {}).items()\n ) or None\n\n cmd = parse_shebang.normalize_cmd(cmd)\n\n popen_kwargs.update(kwargs)\n proc = __popen(cmd, **popen_kwargs)\n stdout, stderr = proc.communicate()\n if encoding is not None and stdout is not None:\n stdout = stdout.decode(encoding)\n if encoding is not None and stderr is not None:\n stderr = stderr.decode(encoding)\n returncode = proc.returncode\n\n if retcode is not None and retcode != returncode:\n raise CalledProcessError(\n returncode, cmd, retcode, output=(stdout, stderr),\n )\n\n return proc.returncode, stdout, stderr\n\n\ndef rmtree(path):\n \"\"\"On windows, rmtree fails for readonly dirs.\"\"\"\n def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\n excvalue = exc[1]\n if (\n func in (os.rmdir, os.remove, os.unlink) and\n excvalue.errno == errno.EACCES\n ):\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)\n", "path": "pre_commit/util.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport io\nimport os.path\nimport shlex\nimport string\n\nfrom pre_commit import five\n\n\nprintable = frozenset(string.printable)\n\n\ndef parse_bytesio(bytesio):\n \"\"\"Parse the shebang from a file opened for reading binary.\"\"\"\n if bytesio.read(2) != b'#!':\n return ()\n first_line = bytesio.readline()\n try:\n first_line = first_line.decode('US-ASCII')\n except UnicodeDecodeError:\n return ()\n\n # Require only printable ascii\n for c in first_line:\n if c not in printable:\n return ()\n\n # shlex.split is horribly broken in py26 on text strings\n cmd = tuple(shlex.split(five.n(first_line)))\n if cmd[0] == '/usr/bin/env':\n cmd = cmd[1:]\n return cmd\n\n\ndef parse_filename(filename):\n \"\"\"Parse the shebang given a filename.\"\"\"\n if not os.path.exists(filename) or not os.access(filename, os.X_OK):\n return ()\n\n with io.open(filename, 'rb') as f:\n return parse_bytesio(f)\n\n\ndef find_executable(exe, _environ=None):\n exe = os.path.normpath(exe)\n if os.sep in exe:\n return exe\n\n environ = _environ if _environ is not None else os.environ\n\n if 'PATHEXT' in environ:\n possible_exe_names = (exe,) + tuple(\n exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)\n )\n else:\n possible_exe_names = (exe,)\n\n for path in environ.get('PATH', '').split(os.pathsep):\n for possible_exe_name in possible_exe_names:\n joined = os.path.join(path, possible_exe_name)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n else:\n return None\n\n\ndef normexe(orig_exe):\n if os.sep not in orig_exe:\n exe = find_executable(orig_exe)\n if exe is None:\n raise OSError('Executable {0} not found'.format(orig_exe))\n return exe\n else:\n return orig_exe\n\n\ndef normalize_cmd(cmd):\n \"\"\"Fixes for the following issues on windows\n - http://bugs.python.org/issue8557\n - windows does not parse shebangs\n\n This function also makes deep-path shebangs work just fine\n \"\"\"\n # Use PATH to determine the executable\n exe = normexe(cmd[0])\n\n # Figure out the shebang from the resulting command\n cmd = parse_filename(exe) + (exe,) + cmd[1:]\n\n # This could have given us back another bare executable\n exe = normexe(cmd[0])\n\n return (exe,) + cmd[1:]\n", "path": "pre_commit/parse_shebang.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport errno\nimport functools\nimport os\nimport os.path\nimport shutil\nimport stat\nimport subprocess\nimport tarfile\nimport tempfile\n\nimport pkg_resources\n\nfrom pre_commit import five\nfrom pre_commit import parse_shebang\n\n\[email protected]\ndef cwd(path):\n original_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(original_cwd)\n\n\ndef mkdirp(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.exists(path):\n raise\n\n\ndef memoize_by_cwd(func):\n \"\"\"Memoize a function call based on os.getcwd().\"\"\"\n @functools.wraps(func)\n def wrapper(*args):\n cwd = os.getcwd()\n key = (cwd,) + args\n try:\n return wrapper._cache[key]\n except KeyError:\n ret = wrapper._cache[key] = func(*args)\n return ret\n\n wrapper._cache = {}\n\n return wrapper\n\n\[email protected]\ndef clean_path_on_failure(path):\n \"\"\"Cleans up the directory on an exceptional failure.\"\"\"\n try:\n yield\n except BaseException:\n if os.path.exists(path):\n rmtree(path)\n raise\n\n\[email protected]\ndef noop_context():\n yield\n\n\ndef no_git_env():\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n return dict(\n (k, v) for k, v in os.environ.items() if not k.startswith('GIT_')\n )\n\n\[email protected]\ndef tarfile_open(*args, **kwargs):\n \"\"\"Compatibility layer because python2.6\"\"\"\n tf = tarfile.open(*args, **kwargs)\n try:\n yield tf\n finally:\n tf.close()\n\n\[email protected]\ndef tmpdir():\n \"\"\"Contextmanager to create a temporary directory. It will be cleaned up\n afterwards.\n \"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n yield tempdir\n finally:\n rmtree(tempdir)\n\n\ndef resource_filename(filename):\n return pkg_resources.resource_filename(\n 'pre_commit',\n os.path.join('resources', filename),\n )\n\n\ndef make_executable(filename):\n original_mode = os.stat(filename).st_mode\n os.chmod(\n filename,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n\nclass CalledProcessError(RuntimeError):\n def __init__(self, returncode, cmd, expected_returncode, output=None):\n super(CalledProcessError, self).__init__(\n returncode, cmd, expected_returncode, output,\n )\n self.returncode = returncode\n self.cmd = cmd\n self.expected_returncode = expected_returncode\n self.output = output\n\n def to_bytes(self):\n output = []\n for maybe_text in self.output:\n if maybe_text:\n output.append(\n b'\\n ' +\n five.to_bytes(maybe_text).replace(b'\\n', b'\\n ')\n )\n else:\n output.append(b'(none)')\n\n return b''.join((\n five.to_bytes(\n 'Command: {0!r}\\n'\n 'Return code: {1}\\n'\n 'Expected return code: {2}\\n'.format(\n self.cmd, self.returncode, self.expected_returncode\n )\n ),\n b'Output: ', output[0], b'\\n',\n b'Errors: ', output[1], b'\\n',\n ))\n\n def to_text(self):\n return self.to_bytes().decode('UTF-8')\n\n if five.PY3: # pragma: no cover (py3)\n __bytes__ = to_bytes\n __str__ = to_text\n else: # pragma: no cover (py2)\n __str__ = to_bytes\n __unicode__ = to_text\n\n\ndef cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n __popen = kwargs.pop('__popen', subprocess.Popen)\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the types here\n cmd = tuple(five.n(arg) for arg in cmd)\n kwargs['env'] = dict(\n (five.n(key), five.n(value))\n for key, value in kwargs.pop('env', {}).items()\n ) or None\n\n try:\n cmd = parse_shebang.normalize_cmd(cmd)\n except parse_shebang.ExecutableNotFoundError as e:\n returncode, stdout, stderr = (-1, e.args[0].encode('UTF-8'), b'')\n else:\n popen_kwargs.update(kwargs)\n proc = __popen(cmd, **popen_kwargs)\n stdout, stderr = proc.communicate()\n if encoding is not None and stdout is not None:\n stdout = stdout.decode(encoding)\n if encoding is not None and stderr is not None:\n stderr = stderr.decode(encoding)\n returncode = proc.returncode\n\n if retcode is not None and retcode != returncode:\n raise CalledProcessError(\n returncode, cmd, retcode, output=(stdout, stderr),\n )\n\n return returncode, stdout, stderr\n\n\ndef rmtree(path):\n \"\"\"On windows, rmtree fails for readonly dirs.\"\"\"\n def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\n excvalue = exc[1]\n if (\n func in (os.rmdir, os.remove, os.unlink) and\n excvalue.errno == errno.EACCES\n ):\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)\n", "path": "pre_commit/util.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport io\nimport os.path\nimport shlex\nimport string\n\nfrom pre_commit import five\n\n\nprintable = frozenset(string.printable)\n\n\nclass ExecutableNotFoundError(OSError):\n pass\n\n\ndef parse_bytesio(bytesio):\n \"\"\"Parse the shebang from a file opened for reading binary.\"\"\"\n if bytesio.read(2) != b'#!':\n return ()\n first_line = bytesio.readline()\n try:\n first_line = first_line.decode('US-ASCII')\n except UnicodeDecodeError:\n return ()\n\n # Require only printable ascii\n for c in first_line:\n if c not in printable:\n return ()\n\n # shlex.split is horribly broken in py26 on text strings\n cmd = tuple(shlex.split(five.n(first_line)))\n if cmd[0] == '/usr/bin/env':\n cmd = cmd[1:]\n return cmd\n\n\ndef parse_filename(filename):\n \"\"\"Parse the shebang given a filename.\"\"\"\n if not os.path.exists(filename) or not os.access(filename, os.X_OK):\n return ()\n\n with io.open(filename, 'rb') as f:\n return parse_bytesio(f)\n\n\ndef find_executable(exe, _environ=None):\n exe = os.path.normpath(exe)\n if os.sep in exe:\n return exe\n\n environ = _environ if _environ is not None else os.environ\n\n if 'PATHEXT' in environ:\n possible_exe_names = (exe,) + tuple(\n exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)\n )\n else:\n possible_exe_names = (exe,)\n\n for path in environ.get('PATH', '').split(os.pathsep):\n for possible_exe_name in possible_exe_names:\n joined = os.path.join(path, possible_exe_name)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n else:\n return None\n\n\ndef normexe(orig_exe):\n if os.sep not in orig_exe:\n exe = find_executable(orig_exe)\n if exe is None:\n raise ExecutableNotFoundError(\n 'Executable `{0}` not found'.format(orig_exe),\n )\n return exe\n else:\n return orig_exe\n\n\ndef normalize_cmd(cmd):\n \"\"\"Fixes for the following issues on windows\n - http://bugs.python.org/issue8557\n - windows does not parse shebangs\n\n This function also makes deep-path shebangs work just fine\n \"\"\"\n # Use PATH to determine the executable\n exe = normexe(cmd[0])\n\n # Figure out the shebang from the resulting command\n cmd = parse_filename(exe) + (exe,) + cmd[1:]\n\n # This could have given us back another bare executable\n exe = normexe(cmd[0])\n\n return (exe,) + cmd[1:]\n", "path": "pre_commit/parse_shebang.py"}]} | 3,140 | 569 |
gh_patches_debug_29010 | rasdani/github-patches | git_diff | Flexget__Flexget-2284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
descargas2020 plugin broken
### Expected behaviour:
it downloads torrent properly.
### Actual behaviour:
### Steps to reproduce:
add this rss http://feeds.feedburner.com/descargas2020new
flexget execute
you will get:
2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/
2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)
2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.
#### Config:
```
tasks:
series_sigo:
rss:
url: http://feeds.feedburner.com/descargas2020new
link: link
all_series: yes
seen: local
regexp:
reject:
- PCDVD
accept_all: yes
thetvdb_lookup: yes
no_entries_ok: yes
set:
filename: "{{title|pathscrub}}.torrent"
download: /home/flexget/torrent/
```
#### Log:
2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/
2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)
2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.
```
paste log output here
```
</details>
### Additional information:
- FlexGet version: 2.17.20
- Python version: 2.7.13
- Installation method: i don't remember
- Using daemon (yes/no): no
- OS and version: debian 9 x64
- Link to crash log:
<!---
In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.
Please verify that the following data is present before submitting your issue:
- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.
- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).
- FlexGet version (use `flexget -V` to get it).
- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.
- Installation method (pip, git install, etc).
- Whether or not you're running FlexGet as a daemon.
- OS and version.
- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.
--->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/sites/descargas2020.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import re
6
7 from flexget import plugin
8 from flexget.event import event
9 from flexget.plugins.internal.urlrewriting import UrlRewritingError
10 from flexget.utils.requests import Session, TimedLimiter
11 from flexget.utils.soup import get_soup
12 from flexget.utils import requests
13
14 from flexget.entry import Entry
15 from flexget.utils.search import normalize_unicode
16
17 import unicodedata
18
19 log = logging.getLogger('descargas2020')
20
21 requests = Session()
22 requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
23 requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))
24
25 DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'
26
27 class UrlRewriteDescargas2020(object):
28 """Descargas2020 urlrewriter and search."""
29
30 schema = {
31 'type': 'boolean',
32 'default': False
33 }
34
35 # urlrewriter API
36 def url_rewritable(self, task, entry):
37 url = entry['url']
38 rewritable_regex = '^http:\/\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\/.*'
39 return re.match(rewritable_regex, url) and not url.endswith('.torrent')
40
41 # urlrewriter API
42 def url_rewrite(self, task, entry):
43 entry['url'] = self.parse_download_page(entry['url'], task)
44
45 @plugin.internet(log)
46 def parse_download_page(self, url, task):
47 log.verbose('Descargas2020 URL: %s', url)
48
49 try:
50 page = requests.get(url)
51 except requests.exceptions.RequestException as e:
52 raise UrlRewritingError(e)
53 try:
54 soup = get_soup(page.text)
55 except Exception as e:
56 raise UrlRewritingError(e)
57
58 torrent_id = None
59 url_format = DESCARGAS2020_TORRENT_FORMAT
60
61 torrent_id_prog = re.compile("(?:parametros\s*=\s*\n?)\s*{\s*\n(?:\s*'\w+'\s*:.*\n)+\s*'(?:torrentID|id)"
62 "'\s*:\s*'(\d+)'")
63 torrent_ids = soup.findAll(text=torrent_id_prog)
64 if torrent_ids:
65 match = torrent_id_prog.search(torrent_ids[0])
66 if match:
67 torrent_id = match.group(1)
68 if not torrent_id:
69 log.debug('torrent ID not found, searching openTorrent script')
70 torrent_id_prog = re.compile('function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\"(.*\/\d+_-.*[^\/])\/?\";')
71 torrent_ids = soup.findAll(text=torrent_id_prog)
72 if torrent_ids:
73 match = torrent_id_prog.search(torrent_ids[0])
74 if match:
75 torrent_id = match.group(2)
76 return torrent_id.replace('descargar-torrent', 'download') + '.torrent'
77
78 if not torrent_id:
79 raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)
80
81 return url_format.format(torrent_id)
82
83 def search(self, task, entry, config=None):
84 if not config:
85 log.debug('Descargas2020 disabled')
86 return set()
87 log.debug('Search Descargas2020')
88 url_search = 'http://descargas2020.com/buscar'
89 results = set()
90 for search_string in entry.get('search_strings', [entry['title']]):
91 query = normalize_unicode(search_string)
92 query = re.sub(' \(\d\d\d\d\)$', '', query)
93 log.debug('Searching Descargas2020 %s', query)
94 query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')
95 data = {'q': query}
96 try:
97 response = task.requests.post(url_search, data=data)
98 except requests.RequestException as e:
99 log.error('Error searching Descargas2020: %s', e)
100 return results
101 content = response.content
102 soup = get_soup(content)
103 soup2 = soup.find('ul', attrs={'class': 'buscar-list'})
104 children = soup2.findAll('a', href=True)
105 for child in children:
106 entry = Entry()
107 entry['url'] = child['href']
108 entry_title = child.find('h2')
109 if entry_title is None:
110 log.debug('Ignore empty entry')
111 continue
112 entry_title = entry_title.text
113 if not entry_title:
114 continue
115 try:
116 entry_quality_lan = re.search('.+ \[([^\]]+)\](\[[^\]]+\])+$', entry_title).group(1)
117 except AttributeError:
118 log.debug('Quality not found')
119 continue
120 entry_title = re.sub(' \[.+]$', '', entry_title)
121 entry['title'] = entry_title + ' ' + entry_quality_lan
122 results.add(entry)
123 log.debug('Finish search Descargas2020 with %d entries', len(results))
124 return results
125
126 @event('plugin.register')
127 def register_plugin():
128 plugin.register(UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/sites/descargas2020.py b/flexget/plugins/sites/descargas2020.py
--- a/flexget/plugins/sites/descargas2020.py
+++ b/flexget/plugins/sites/descargas2020.py
@@ -22,7 +22,7 @@
requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))
-DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'
+DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'
class UrlRewriteDescargas2020(object):
"""Descargas2020 urlrewriter and search."""
@@ -67,13 +67,10 @@
torrent_id = match.group(1)
if not torrent_id:
log.debug('torrent ID not found, searching openTorrent script')
- torrent_id_prog = re.compile('function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\"(.*\/\d+_-.*[^\/])\/?\";')
- torrent_ids = soup.findAll(text=torrent_id_prog)
- if torrent_ids:
- match = torrent_id_prog.search(torrent_ids[0])
- if match:
- torrent_id = match.group(2)
- return torrent_id.replace('descargar-torrent', 'download') + '.torrent'
+ match = re.search('function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\".*\/(\d+.*)\";',
+ page.text, re.MULTILINE)
+ if match:
+ torrent_id = match.group(2).rstrip('/')
if not torrent_id:
raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)
| {"golden_diff": "diff --git a/flexget/plugins/sites/descargas2020.py b/flexget/plugins/sites/descargas2020.py\n--- a/flexget/plugins/sites/descargas2020.py\n+++ b/flexget/plugins/sites/descargas2020.py\n@@ -22,7 +22,7 @@\n requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\n requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n \n-DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\n+DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'\n \n class UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n@@ -67,13 +67,10 @@\n torrent_id = match.group(1)\n if not torrent_id:\n log.debug('torrent ID not found, searching openTorrent script')\n- torrent_id_prog = re.compile('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\"(.*\\/\\d+_-.*[^\\/])\\/?\\\";')\n- torrent_ids = soup.findAll(text=torrent_id_prog)\n- if torrent_ids:\n- match = torrent_id_prog.search(torrent_ids[0])\n- if match:\n- torrent_id = match.group(2)\n- return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\n+ match = re.search('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\".*\\/(\\d+.*)\\\";',\n+ page.text, re.MULTILINE)\n+ if match:\n+ torrent_id = match.group(2).rstrip('/')\n \n if not torrent_id:\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n", "issue": "descargas2020 plugin broken\n### Expected behaviour:\r\nit downloads torrent properly.\r\n\r\n### Actual behaviour:\r\n\r\n### Steps to reproduce:\r\nadd this rss http://feeds.feedburner.com/descargas2020new\r\nflexget execute\r\nyou will get:\r\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\r\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\r\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\r\n\r\n#### Config:\r\n\r\n```\r\ntasks:\r\n series_sigo:\r\n rss:\r\n url: http://feeds.feedburner.com/descargas2020new\r\n link: link\r\n all_series: yes\r\n seen: local\r\n regexp:\r\n reject:\r\n - PCDVD\r\n accept_all: yes\r\n thetvdb_lookup: yes\r\n no_entries_ok: yes\r\n set:\r\n filename: \"{{title|pathscrub}}.torrent\"\r\n download: /home/flexget/torrent/\r\n```\r\n \r\n#### Log:\r\n\r\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\r\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\r\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\r\n\r\n```\r\npaste log output here\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.17.20\r\n- Python version: 2.7.13\r\n- Installation method: i don't remember\r\n- Using daemon (yes/no): no\r\n- OS and version: debian 9 x64\r\n- Link to crash log:\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.\r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport re\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils.requests import Session, TimedLimiter\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils import requests\n\nfrom flexget.entry import Entry\nfrom flexget.utils.search import normalize_unicode\n\nimport unicodedata\n\nlog = logging.getLogger('descargas2020')\n\nrequests = Session()\nrequests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\nrequests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n\nDESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\n\nclass UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n\n schema = {\n 'type': 'boolean',\n 'default': False\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n rewritable_regex = '^http:\\/\\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\\/.*'\n return re.match(rewritable_regex, url) and not url.endswith('.torrent')\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task)\n\n @plugin.internet(log)\n def parse_download_page(self, url, task):\n log.verbose('Descargas2020 URL: %s', url)\n\n try:\n page = requests.get(url)\n except requests.exceptions.RequestException as e:\n raise UrlRewritingError(e)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n\n torrent_id = None\n url_format = DESCARGAS2020_TORRENT_FORMAT\n\n torrent_id_prog = re.compile(\"(?:parametros\\s*=\\s*\\n?)\\s*{\\s*\\n(?:\\s*'\\w+'\\s*:.*\\n)+\\s*'(?:torrentID|id)\"\n \"'\\s*:\\s*'(\\d+)'\")\n torrent_ids = soup.findAll(text=torrent_id_prog)\n if torrent_ids:\n match = torrent_id_prog.search(torrent_ids[0])\n if match:\n torrent_id = match.group(1)\n if not torrent_id:\n log.debug('torrent ID not found, searching openTorrent script')\n torrent_id_prog = re.compile('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\"(.*\\/\\d+_-.*[^\\/])\\/?\\\";')\n torrent_ids = soup.findAll(text=torrent_id_prog)\n if torrent_ids:\n match = torrent_id_prog.search(torrent_ids[0])\n if match:\n torrent_id = match.group(2)\n return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\n\n if not torrent_id:\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n\n return url_format.format(torrent_id)\n\n def search(self, task, entry, config=None):\n if not config:\n log.debug('Descargas2020 disabled')\n return set()\n log.debug('Search Descargas2020')\n url_search = 'http://descargas2020.com/buscar'\n results = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n query = normalize_unicode(search_string)\n query = re.sub(' \\(\\d\\d\\d\\d\\)$', '', query)\n log.debug('Searching Descargas2020 %s', query)\n query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')\n data = {'q': query}\n try:\n response = task.requests.post(url_search, data=data)\n except requests.RequestException as e:\n log.error('Error searching Descargas2020: %s', e)\n return results\n content = response.content\n soup = get_soup(content)\n soup2 = soup.find('ul', attrs={'class': 'buscar-list'})\n children = soup2.findAll('a', href=True)\n for child in children:\n entry = Entry()\n entry['url'] = child['href']\n entry_title = child.find('h2')\n if entry_title is None:\n log.debug('Ignore empty entry')\n continue\n entry_title = entry_title.text\n if not entry_title:\n continue\n try:\n entry_quality_lan = re.search('.+ \\[([^\\]]+)\\](\\[[^\\]]+\\])+$', entry_title).group(1)\n except AttributeError:\n log.debug('Quality not found')\n continue\n entry_title = re.sub(' \\[.+]$', '', entry_title)\n entry['title'] = entry_title + ' ' + entry_quality_lan\n results.add(entry)\n log.debug('Finish search Descargas2020 with %d entries', len(results))\n return results\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2)\n", "path": "flexget/plugins/sites/descargas2020.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport re\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils.requests import Session, TimedLimiter\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils import requests\n\nfrom flexget.entry import Entry\nfrom flexget.utils.search import normalize_unicode\n\nimport unicodedata\n\nlog = logging.getLogger('descargas2020')\n\nrequests = Session()\nrequests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\nrequests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n\nDESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'\n\nclass UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n\n schema = {\n 'type': 'boolean',\n 'default': False\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n rewritable_regex = '^http:\\/\\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\\/.*'\n return re.match(rewritable_regex, url) and not url.endswith('.torrent')\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task)\n\n @plugin.internet(log)\n def parse_download_page(self, url, task):\n log.verbose('Descargas2020 URL: %s', url)\n\n try:\n page = requests.get(url)\n except requests.exceptions.RequestException as e:\n raise UrlRewritingError(e)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n\n torrent_id = None\n url_format = DESCARGAS2020_TORRENT_FORMAT\n\n torrent_id_prog = re.compile(\"(?:parametros\\s*=\\s*\\n?)\\s*{\\s*\\n(?:\\s*'\\w+'\\s*:.*\\n)+\\s*'(?:torrentID|id)\"\n \"'\\s*:\\s*'(\\d+)'\")\n torrent_ids = soup.findAll(text=torrent_id_prog)\n if torrent_ids:\n match = torrent_id_prog.search(torrent_ids[0])\n if match:\n torrent_id = match.group(1)\n if not torrent_id:\n log.debug('torrent ID not found, searching openTorrent script')\n match = re.search('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\".*\\/(\\d+.*)\\\";',\n page.text, re.MULTILINE)\n if match:\n torrent_id = match.group(2).rstrip('/')\n\n if not torrent_id:\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n\n return url_format.format(torrent_id)\n\n def search(self, task, entry, config=None):\n if not config:\n log.debug('Descargas2020 disabled')\n return set()\n log.debug('Search Descargas2020')\n url_search = 'http://descargas2020.com/buscar'\n results = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n query = normalize_unicode(search_string)\n query = re.sub(' \\(\\d\\d\\d\\d\\)$', '', query)\n log.debug('Searching Descargas2020 %s', query)\n query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')\n data = {'q': query}\n try:\n response = task.requests.post(url_search, data=data)\n except requests.RequestException as e:\n log.error('Error searching Descargas2020: %s', e)\n return results\n content = response.content\n soup = get_soup(content)\n soup2 = soup.find('ul', attrs={'class': 'buscar-list'})\n children = soup2.findAll('a', href=True)\n for child in children:\n entry = Entry()\n entry['url'] = child['href']\n entry_title = child.find('h2')\n if entry_title is None:\n log.debug('Ignore empty entry')\n continue\n entry_title = entry_title.text\n if not entry_title:\n continue\n try:\n entry_quality_lan = re.search('.+ \\[([^\\]]+)\\](\\[[^\\]]+\\])+$', entry_title).group(1)\n except AttributeError:\n log.debug('Quality not found')\n continue\n entry_title = re.sub(' \\[.+]$', '', entry_title)\n entry['title'] = entry_title + ' ' + entry_quality_lan\n results.add(entry)\n log.debug('Finish search Descargas2020 with %d entries', len(results))\n return results\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2)\n", "path": "flexget/plugins/sites/descargas2020.py"}]} | 2,743 | 472 |
gh_patches_debug_2192 | rasdani/github-patches | git_diff | LMFDB__lmfdb-5179 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PIP dependencies
We have several deprecated dependencies that we should fix ASAP
```
flask<=1.1.4
markupsafe<=2.0.1
itsdangerous<=2.0.1
```
in particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/local_fields/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from lmfdb.app import app
3 from lmfdb.logger import make_logger
4 from flask import Blueprint, request, redirect
5
6 local_fields_page = Blueprint("local_fields", __name__, template_folder='templates', static_folder="static")
7 logger = make_logger(local_fields_page)
8
9
10 @local_fields_page.context_processor
11 def body_class():
12 return {'body_class': 'local_fields'}
13
14 from . import main
15 assert main
16
17 from urllib.parse import urlparse, urlunparse
18
19
20 @local_fields_page.before_request
21 def redirect_local():
22 urlparts = urlparse(request.url)
23 if 'LocalNumberField' in urlparts.path:
24 urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))
25 return redirect(urlunparse(urlparts), 301)
26 return
27
28
29 app.register_blueprint(local_fields_page, url_prefix="/padicField")
30 app.register_blueprint(local_fields_page, url_prefix="/LocalNumberField")
31
32 # API2 has been disabled for now
33 #from lmfdb.api2.searchers import register_search_function
34 #register_search_function(
35 # "$p$-adic_fields",
36 # "$p$-adic fields",
37 # "Search over $p$-adic fields",
38 # auto_search = 'lf_fields'
39 #)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py
--- a/lmfdb/local_fields/__init__.py
+++ b/lmfdb/local_fields/__init__.py
@@ -27,7 +27,6 @@
app.register_blueprint(local_fields_page, url_prefix="/padicField")
-app.register_blueprint(local_fields_page, url_prefix="/LocalNumberField")
# API2 has been disabled for now
#from lmfdb.api2.searchers import register_search_function
| {"golden_diff": "diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py\n--- a/lmfdb/local_fields/__init__.py\n+++ b/lmfdb/local_fields/__init__.py\n@@ -27,7 +27,6 @@\n \n \n app.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n-app.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n \n # API2 has been disabled for now\n #from lmfdb.api2.searchers import register_search_function\n", "issue": "PIP dependencies\nWe have several deprecated dependencies that we should fix ASAP\r\n```\r\nflask<=1.1.4\r\nmarkupsafe<=2.0.1\r\nitsdangerous<=2.0.1\r\n```\r\n\r\nin particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint, request, redirect\n\nlocal_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\nlogger = make_logger(local_fields_page)\n\n\n@local_fields_page.context_processor\ndef body_class():\n return {'body_class': 'local_fields'}\n\nfrom . import main\nassert main\n\nfrom urllib.parse import urlparse, urlunparse\n\n\n@local_fields_page.before_request\ndef redirect_local():\n urlparts = urlparse(request.url)\n if 'LocalNumberField' in urlparts.path:\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n return redirect(urlunparse(urlparts), 301)\n return\n\n\napp.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\napp.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n\n# API2 has been disabled for now\n#from lmfdb.api2.searchers import register_search_function\n#register_search_function(\n# \"$p$-adic_fields\",\n# \"$p$-adic fields\",\n# \"Search over $p$-adic fields\",\n# auto_search = 'lf_fields'\n#)\n", "path": "lmfdb/local_fields/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint, request, redirect\n\nlocal_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\nlogger = make_logger(local_fields_page)\n\n\n@local_fields_page.context_processor\ndef body_class():\n return {'body_class': 'local_fields'}\n\nfrom . import main\nassert main\n\nfrom urllib.parse import urlparse, urlunparse\n\n\n@local_fields_page.before_request\ndef redirect_local():\n urlparts = urlparse(request.url)\n if 'LocalNumberField' in urlparts.path:\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n return redirect(urlunparse(urlparts), 301)\n return\n\n\napp.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n\n# API2 has been disabled for now\n#from lmfdb.api2.searchers import register_search_function\n#register_search_function(\n# \"$p$-adic_fields\",\n# \"$p$-adic fields\",\n# \"Search over $p$-adic fields\",\n# auto_search = 'lf_fields'\n#)\n", "path": "lmfdb/local_fields/__init__.py"}]} | 695 | 113 |
gh_patches_debug_34613 | rasdani/github-patches | git_diff | svthalia__concrexit-1757 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Event registrations cannot be deleted from admin API
### Describe the bug
Event registrations cannot be deleted from admin API. Users are able to delete event registrations from the admin, so they should also be allowed to do it via the API
### How to reproduce
1. Try to delete an event registration with the API (as admin)
2. You can't
### Expected behaviour
You should be able to do it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/api/v2/admin/views.py`
Content:
```
1 import json
2
3 from django.http import HttpResponse, Http404
4 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
5 from rest_framework import status
6 from rest_framework.generics import get_object_or_404
7 from rest_framework.views import APIView
8 from rest_framework import filters as framework_filters
9
10 from events import services
11 from events.api.v2.admin import filters
12 from events.api.v2.admin.permissions import IsOrganiser
13 from events.api.v2.admin.serializers.event import (
14 EventListAdminSerializer,
15 EventAdminSerializer,
16 )
17 from events.api.v2.admin.serializers.event_registration import (
18 EventRegistrationAdminSerializer,
19 )
20 from events.models import Event, EventRegistration
21 from thaliawebsite.api.v2.admin.views import (
22 AdminListAPIView,
23 AdminRetrieveAPIView,
24 AdminCreateAPIView,
25 AdminUpdateAPIView,
26 AdminDestroyAPIView,
27 AdminPermissionsMixin,
28 )
29 import events.api.v2.filters as normal_filters
30
31
32 class EventAdminListCreateAPIView(AdminListAPIView, AdminCreateAPIView):
33 queryset = Event.objects.prefetch_related("organiser")
34 permission_classes = [IsAuthenticatedOrTokenHasScope]
35 required_scopes = ["events:admin"]
36 filter_backends = [
37 framework_filters.OrderingFilter,
38 normal_filters.CategoryFilter,
39 normal_filters.OrganiserFilter,
40 normal_filters.EventDateFilter,
41 filters.PublishedFilter,
42 ]
43 ordering_fields = (
44 "start",
45 "end",
46 "published",
47 "registration_start",
48 "registration_end",
49 )
50
51 def get_serializer_class(self):
52 if self.request.method.lower() == "post":
53 return EventAdminSerializer
54 return EventListAdminSerializer
55
56
57 class EventAdminDetailAPIView(
58 AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView
59 ):
60 queryset = Event.objects.all()
61 serializer_class = EventAdminSerializer
62 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
63 required_scopes = ["events:admin"]
64
65
66 class EventRegistrationAdminListView(AdminListAPIView, AdminCreateAPIView):
67 """Returns a list of registrations."""
68
69 serializer_class = EventRegistrationAdminSerializer
70 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
71 required_scopes = ["events:admin"]
72 filter_backends = (
73 framework_filters.OrderingFilter,
74 filters.EventRegistrationCancelledFilter,
75 )
76 ordering_fields = ("queue_position", "date", "date_cancelled")
77
78 def get_queryset(self):
79 event = get_object_or_404(Event, pk=self.kwargs.get("pk"))
80 if event:
81 return EventRegistration.objects.filter(event_id=event).prefetch_related(
82 "member", "member__profile"
83 )
84 return EventRegistration.objects.none()
85
86
87 class EventRegistrationAdminDetailView(AdminRetrieveAPIView, AdminUpdateAPIView):
88 """Returns details of an event registration."""
89
90 serializer_class = EventRegistrationAdminSerializer
91 queryset = EventRegistration.objects.all()
92 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
93 required_scopes = ["events:admin"]
94 event_lookup_field = "event_id"
95
96 def get_queryset(self):
97 return super().get_queryset().filter(event=self.kwargs["event_id"])
98
99
100 class EventRegistrationAdminFieldsView(AdminPermissionsMixin, APIView):
101 """Returns details of an event registration."""
102
103 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
104 required_scopes = ["events:admin"]
105
106 def get_object(self):
107 event_registration = get_object_or_404(
108 EventRegistration,
109 event=self.kwargs["event_id"],
110 pk=self.kwargs["registration_id"],
111 )
112
113 if not event_registration.event.has_fields:
114 raise Http404
115
116 return event_registration
117
118 def get(self, request, *args, **kwargs):
119 return HttpResponse(
120 content=json.dumps(
121 services.registration_fields(request, registration=self.get_object())
122 ),
123 status=status.HTTP_200_OK,
124 )
125
126 def put(self, request, *args, **kwargs):
127 original = services.registration_fields(request, registration=self.get_object())
128 required_keys = set(original.keys()) - set(request.data.keys())
129 if len(required_keys) > 0:
130 return HttpResponse(
131 content=f"Missing keys '{', '.join(required_keys)}' in request",
132 status=status.HTTP_400_BAD_REQUEST,
133 )
134 services.update_registration(
135 registration=self.get_object(), field_values=request.data.items()
136 )
137 return HttpResponse(
138 content=json.dumps(
139 services.registration_fields(request, registration=self.get_object())
140 ),
141 status=status.HTTP_200_OK,
142 )
143
144 def patch(self, request, *args, **kwargs):
145 services.update_registration(
146 registration=self.get_object(), field_values=request.data.items()
147 )
148 return HttpResponse(
149 content=json.dumps(
150 services.registration_fields(request, registration=self.get_object())
151 ),
152 status=status.HTTP_200_OK,
153 )
154
```
Path: `website/events/api/v2/views.py`
Content:
```
1 from django.http import HttpResponse
2 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
3 from rest_framework import filters as framework_filters
4 from rest_framework import status
5 from rest_framework.exceptions import PermissionDenied
6 from rest_framework.generics import ListAPIView, RetrieveAPIView, get_object_or_404
7 from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly
8 from rest_framework.response import Response
9 from rest_framework.utils import json
10 from rest_framework.views import APIView
11
12 from events import services
13 from events.api.v2 import filters
14 from events.api.v2.serializers.event import EventSerializer
15 from events.api.v2.serializers.event_registration import EventRegistrationSerializer
16 from events.exceptions import RegistrationError
17 from events.models import Event, EventRegistration
18 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
19 from thaliawebsite.api.v2.serializers import EmptySerializer
20
21
22 class EventListView(ListAPIView):
23 """Returns an overview of all upcoming events."""
24
25 serializer_class = EventSerializer
26 queryset = Event.objects.filter(published=True)
27 filter_backends = (
28 framework_filters.OrderingFilter,
29 framework_filters.SearchFilter,
30 filters.EventDateFilter,
31 filters.CategoryFilter,
32 filters.OrganiserFilter,
33 )
34 ordering_fields = ("start", "end")
35 search_fields = ("title",)
36 permission_classes = [IsAuthenticatedOrTokenHasScope]
37 required_scopes = ["events:read"]
38
39
40 class EventDetailView(RetrieveAPIView):
41 """Returns details of an event."""
42
43 serializer_class = EventSerializer
44 queryset = Event.objects.filter(published=True)
45 permission_classes = [IsAuthenticatedOrTokenHasScope]
46 required_scopes = ["events:read"]
47
48
49 class EventRegistrationsView(ListAPIView):
50 """Returns a list of registrations."""
51
52 serializer_class = EventRegistrationSerializer
53 permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]
54 required_scopes_per_method = {
55 "GET": ["events:read"],
56 "POST": ["events:register"],
57 "DELETE": ["events:register"],
58 }
59 filter_backends = (framework_filters.OrderingFilter,)
60 ordering_fields = (
61 "date",
62 "member",
63 )
64
65 def __init__(self):
66 super(EventRegistrationsView, self).__init__()
67 self.event = None
68
69 def get_serializer_class(self):
70 if self.request.method.lower() == "post":
71 return EmptySerializer
72 return super().get_serializer_class()
73
74 def get_queryset(self):
75 if self.event:
76 return EventRegistration.objects.filter(
77 event=self.event, date_cancelled=None
78 )[: self.event.max_participants]
79 return EventRegistration.objects.none()
80
81 def initial(self, request, *args, **kwargs):
82 """Run anything that needs to occur prior to calling the method handler."""
83 self.format_kwarg = self.get_format_suffix(**kwargs)
84
85 # Perform content negotiation and store the accepted info on the request
86 neg = self.perform_content_negotiation(request)
87 request.accepted_renderer, request.accepted_media_type = neg
88
89 # Determine the API version, if versioning is in use.
90 version, scheme = self.determine_version(request, *args, **kwargs)
91 request.version, request.versioning_scheme = version, scheme
92
93 # Ensure that the incoming request is permitted
94 self.perform_authentication(request)
95
96 self.event = get_object_or_404(Event, pk=self.kwargs.get("pk"), published=True)
97
98 self.check_permissions(request)
99 self.check_throttles(request)
100
101 def post(self, request, *args, **kwargs):
102 try:
103 registration = services.create_registration(request.member, self.event)
104 serializer = EventRegistrationSerializer(
105 instance=registration, context=self.get_serializer_context()
106 )
107 return Response(serializer.data, status=status.HTTP_201_CREATED)
108 except RegistrationError as e:
109 raise PermissionDenied(detail=e) from e
110
111
112 class EventRegistrationDetailView(RetrieveAPIView):
113 """Returns details of an event registration."""
114
115 serializer_class = EventRegistrationSerializer
116 queryset = EventRegistration.objects.all()
117 permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]
118 required_scopes_per_method = {
119 "GET": ["events:read"],
120 "DELETE": ["events:register"],
121 }
122
123 def get_queryset(self):
124 return (
125 super()
126 .get_queryset()
127 .filter(
128 event=self.kwargs["event_id"],
129 event__published=True,
130 date_cancelled=None,
131 )
132 )
133
134 def get_serializer(self, *args, **kwargs):
135 if (
136 len(args) > 0
137 and isinstance(args[0], EventRegistration)
138 and args[0].member == self.request.member
139 ):
140 kwargs.update(
141 fields=(
142 "pk",
143 "member",
144 "name",
145 "present",
146 "queue_position",
147 "date",
148 "payment",
149 )
150 )
151 return super().get_serializer(*args, **kwargs)
152
153 def delete(self, request, *args, **kwargs):
154 if self.get_object().member != request.member:
155 raise PermissionDenied()
156
157 try:
158 services.cancel_registration(request.member, self.get_object().event)
159 return Response(status=status.HTTP_204_NO_CONTENT)
160 except RegistrationError as e:
161 raise PermissionDenied(detail=e) from e
162
163
164 class EventRegistrationFieldsView(APIView):
165 """Returns details of an event registration."""
166
167 permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]
168 required_scopes_per_method = {
169 "GET": ["events:read"],
170 "PUT": ["events:register"],
171 "PATCH": ["events:register"],
172 }
173
174 def get_object(self):
175 return get_object_or_404(
176 EventRegistration,
177 event=self.kwargs["event_id"],
178 event__published=True,
179 pk=self.kwargs["registration_id"],
180 member=self.request.member,
181 )
182
183 def get(self, request, *args, **kwargs):
184 return HttpResponse(
185 content=json.dumps(
186 services.registration_fields(request, registration=self.get_object())
187 ),
188 status=status.HTTP_200_OK,
189 )
190
191 def put(self, request, *args, **kwargs):
192 original = services.registration_fields(request, registration=self.get_object())
193 required_keys = set(original.keys()) - set(request.data.keys())
194 if len(required_keys) > 0:
195 return HttpResponse(
196 content=f"Missing keys '{', '.join(required_keys)}' in request",
197 status=status.HTTP_400_BAD_REQUEST,
198 )
199 services.update_registration(
200 registration=self.get_object(), field_values=request.data.items()
201 )
202 return HttpResponse(
203 content=json.dumps(
204 services.registration_fields(request, registration=self.get_object())
205 ),
206 status=status.HTTP_200_OK,
207 )
208
209 def patch(self, request, *args, **kwargs):
210 services.update_registration(
211 registration=self.get_object(), field_values=request.data.items()
212 )
213 return HttpResponse(
214 content=json.dumps(
215 services.registration_fields(request, registration=self.get_object())
216 ),
217 status=status.HTTP_200_OK,
218 )
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/api/v2/admin/views.py b/website/events/api/v2/admin/views.py
--- a/website/events/api/v2/admin/views.py
+++ b/website/events/api/v2/admin/views.py
@@ -84,7 +84,9 @@
return EventRegistration.objects.none()
-class EventRegistrationAdminDetailView(AdminRetrieveAPIView, AdminUpdateAPIView):
+class EventRegistrationAdminDetailView(
+ AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView
+):
"""Returns details of an event registration."""
serializer_class = EventRegistrationAdminSerializer
diff --git a/website/events/api/v2/views.py b/website/events/api/v2/views.py
--- a/website/events/api/v2/views.py
+++ b/website/events/api/v2/views.py
@@ -3,7 +3,12 @@
from rest_framework import filters as framework_filters
from rest_framework import status
from rest_framework.exceptions import PermissionDenied
-from rest_framework.generics import ListAPIView, RetrieveAPIView, get_object_or_404
+from rest_framework.generics import (
+ ListAPIView,
+ RetrieveAPIView,
+ get_object_or_404,
+ DestroyAPIView,
+)
from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly
from rest_framework.response import Response
from rest_framework.utils import json
@@ -46,7 +51,7 @@
required_scopes = ["events:read"]
-class EventRegistrationsView(ListAPIView):
+class EventRegistrationsView(ListAPIView, DestroyAPIView):
"""Returns a list of registrations."""
serializer_class = EventRegistrationSerializer
@@ -108,8 +113,15 @@
except RegistrationError as e:
raise PermissionDenied(detail=e) from e
+ def delete(self, request, *args, **kwargs):
+ try:
+ services.cancel_registration(request.member, self.event)
+ return Response(status=status.HTTP_204_NO_CONTENT)
+ except RegistrationError as e:
+ raise PermissionDenied(detail=e) from e
+
-class EventRegistrationDetailView(RetrieveAPIView):
+class EventRegistrationDetailView(RetrieveAPIView, DestroyAPIView):
"""Returns details of an event registration."""
serializer_class = EventRegistrationSerializer
| {"golden_diff": "diff --git a/website/events/api/v2/admin/views.py b/website/events/api/v2/admin/views.py\n--- a/website/events/api/v2/admin/views.py\n+++ b/website/events/api/v2/admin/views.py\n@@ -84,7 +84,9 @@\n return EventRegistration.objects.none()\n \n \n-class EventRegistrationAdminDetailView(AdminRetrieveAPIView, AdminUpdateAPIView):\n+class EventRegistrationAdminDetailView(\n+ AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n+):\n \"\"\"Returns details of an event registration.\"\"\"\n \n serializer_class = EventRegistrationAdminSerializer\ndiff --git a/website/events/api/v2/views.py b/website/events/api/v2/views.py\n--- a/website/events/api/v2/views.py\n+++ b/website/events/api/v2/views.py\n@@ -3,7 +3,12 @@\n from rest_framework import filters as framework_filters\n from rest_framework import status\n from rest_framework.exceptions import PermissionDenied\n-from rest_framework.generics import ListAPIView, RetrieveAPIView, get_object_or_404\n+from rest_framework.generics import (\n+ ListAPIView,\n+ RetrieveAPIView,\n+ get_object_or_404,\n+ DestroyAPIView,\n+)\n from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\n from rest_framework.response import Response\n from rest_framework.utils import json\n@@ -46,7 +51,7 @@\n required_scopes = [\"events:read\"]\n \n \n-class EventRegistrationsView(ListAPIView):\n+class EventRegistrationsView(ListAPIView, DestroyAPIView):\n \"\"\"Returns a list of registrations.\"\"\"\n \n serializer_class = EventRegistrationSerializer\n@@ -108,8 +113,15 @@\n except RegistrationError as e:\n raise PermissionDenied(detail=e) from e\n \n+ def delete(self, request, *args, **kwargs):\n+ try:\n+ services.cancel_registration(request.member, self.event)\n+ return Response(status=status.HTTP_204_NO_CONTENT)\n+ except RegistrationError as e:\n+ raise PermissionDenied(detail=e) from e\n+\n \n-class EventRegistrationDetailView(RetrieveAPIView):\n+class EventRegistrationDetailView(RetrieveAPIView, DestroyAPIView):\n \"\"\"Returns details of an event registration.\"\"\"\n \n serializer_class = EventRegistrationSerializer\n", "issue": "Event registrations cannot be deleted from admin API\n### Describe the bug\r\nEvent registrations cannot be deleted from admin API. Users are able to delete event registrations from the admin, so they should also be allowed to do it via the API\r\n\r\n### How to reproduce\r\n1. Try to delete an event registration with the API (as admin)\r\n2. You can't\r\n\r\n### Expected behaviour\r\nYou should be able to do it\r\n\r\n\n", "before_files": [{"content": "import json\n\nfrom django.http import HttpResponse, Http404\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import status\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework import filters as framework_filters\n\nfrom events import services\nfrom events.api.v2.admin import filters\nfrom events.api.v2.admin.permissions import IsOrganiser\nfrom events.api.v2.admin.serializers.event import (\n EventListAdminSerializer,\n EventAdminSerializer,\n)\nfrom events.api.v2.admin.serializers.event_registration import (\n EventRegistrationAdminSerializer,\n)\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.admin.views import (\n AdminListAPIView,\n AdminRetrieveAPIView,\n AdminCreateAPIView,\n AdminUpdateAPIView,\n AdminDestroyAPIView,\n AdminPermissionsMixin,\n)\nimport events.api.v2.filters as normal_filters\n\n\nclass EventAdminListCreateAPIView(AdminListAPIView, AdminCreateAPIView):\n queryset = Event.objects.prefetch_related(\"organiser\")\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = [\n framework_filters.OrderingFilter,\n normal_filters.CategoryFilter,\n normal_filters.OrganiserFilter,\n normal_filters.EventDateFilter,\n filters.PublishedFilter,\n ]\n ordering_fields = (\n \"start\",\n \"end\",\n \"published\",\n \"registration_start\",\n \"registration_end\",\n )\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return EventAdminSerializer\n return EventListAdminSerializer\n\n\nclass EventAdminDetailAPIView(\n AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n):\n queryset = Event.objects.all()\n serializer_class = EventAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n\nclass EventRegistrationAdminListView(AdminListAPIView, AdminCreateAPIView):\n \"\"\"Returns a list of registrations.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.EventRegistrationCancelledFilter,\n )\n ordering_fields = (\"queue_position\", \"date\", \"date_cancelled\")\n\n def get_queryset(self):\n event = get_object_or_404(Event, pk=self.kwargs.get(\"pk\"))\n if event:\n return EventRegistration.objects.filter(event_id=event).prefetch_related(\n \"member\", \"member__profile\"\n )\n return EventRegistration.objects.none()\n\n\nclass EventRegistrationAdminDetailView(AdminRetrieveAPIView, AdminUpdateAPIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n queryset = EventRegistration.objects.all()\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n event_lookup_field = \"event_id\"\n\n def get_queryset(self):\n return super().get_queryset().filter(event=self.kwargs[\"event_id\"])\n\n\nclass EventRegistrationAdminFieldsView(AdminPermissionsMixin, APIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n def get_object(self):\n event_registration = get_object_or_404(\n EventRegistration,\n event=self.kwargs[\"event_id\"],\n pk=self.kwargs[\"registration_id\"],\n )\n\n if not event_registration.event.has_fields:\n raise Http404\n\n return event_registration\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def put(self, request, *args, **kwargs):\n original = services.registration_fields(request, registration=self.get_object())\n required_keys = set(original.keys()) - set(request.data.keys())\n if len(required_keys) > 0:\n return HttpResponse(\n content=f\"Missing keys '{', '.join(required_keys)}' in request\",\n status=status.HTTP_400_BAD_REQUEST,\n )\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def patch(self, request, *args, **kwargs):\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n", "path": "website/events/api/v2/admin/views.py"}, {"content": "from django.http import HttpResponse\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework import status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, get_object_or_404\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.utils import json\nfrom rest_framework.views import APIView\n\nfrom events import services\nfrom events.api.v2 import filters\nfrom events.api.v2.serializers.event import EventSerializer\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.exceptions import RegistrationError\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\nfrom thaliawebsite.api.v2.serializers import EmptySerializer\n\n\nclass EventListView(ListAPIView):\n \"\"\"Returns an overview of all upcoming events.\"\"\"\n\n serializer_class = EventSerializer\n queryset = Event.objects.filter(published=True)\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.EventDateFilter,\n filters.CategoryFilter,\n filters.OrganiserFilter,\n )\n ordering_fields = (\"start\", \"end\")\n search_fields = (\"title\",)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:read\"]\n\n\nclass EventDetailView(RetrieveAPIView):\n \"\"\"Returns details of an event.\"\"\"\n\n serializer_class = EventSerializer\n queryset = Event.objects.filter(published=True)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:read\"]\n\n\nclass EventRegistrationsView(ListAPIView):\n \"\"\"Returns a list of registrations.\"\"\"\n\n serializer_class = EventRegistrationSerializer\n permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n required_scopes_per_method = {\n \"GET\": [\"events:read\"],\n \"POST\": [\"events:register\"],\n \"DELETE\": [\"events:register\"],\n }\n filter_backends = (framework_filters.OrderingFilter,)\n ordering_fields = (\n \"date\",\n \"member\",\n )\n\n def __init__(self):\n super(EventRegistrationsView, self).__init__()\n self.event = None\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return EmptySerializer\n return super().get_serializer_class()\n\n def get_queryset(self):\n if self.event:\n return EventRegistration.objects.filter(\n event=self.event, date_cancelled=None\n )[: self.event.max_participants]\n return EventRegistration.objects.none()\n\n def initial(self, request, *args, **kwargs):\n \"\"\"Run anything that needs to occur prior to calling the method handler.\"\"\"\n self.format_kwarg = self.get_format_suffix(**kwargs)\n\n # Perform content negotiation and store the accepted info on the request\n neg = self.perform_content_negotiation(request)\n request.accepted_renderer, request.accepted_media_type = neg\n\n # Determine the API version, if versioning is in use.\n version, scheme = self.determine_version(request, *args, **kwargs)\n request.version, request.versioning_scheme = version, scheme\n\n # Ensure that the incoming request is permitted\n self.perform_authentication(request)\n\n self.event = get_object_or_404(Event, pk=self.kwargs.get(\"pk\"), published=True)\n\n self.check_permissions(request)\n self.check_throttles(request)\n\n def post(self, request, *args, **kwargs):\n try:\n registration = services.create_registration(request.member, self.event)\n serializer = EventRegistrationSerializer(\n instance=registration, context=self.get_serializer_context()\n )\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except RegistrationError as e:\n raise PermissionDenied(detail=e) from e\n\n\nclass EventRegistrationDetailView(RetrieveAPIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n serializer_class = EventRegistrationSerializer\n queryset = EventRegistration.objects.all()\n permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n required_scopes_per_method = {\n \"GET\": [\"events:read\"],\n \"DELETE\": [\"events:register\"],\n }\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n event=self.kwargs[\"event_id\"],\n event__published=True,\n date_cancelled=None,\n )\n )\n\n def get_serializer(self, *args, **kwargs):\n if (\n len(args) > 0\n and isinstance(args[0], EventRegistration)\n and args[0].member == self.request.member\n ):\n kwargs.update(\n fields=(\n \"pk\",\n \"member\",\n \"name\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n )\n )\n return super().get_serializer(*args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n if self.get_object().member != request.member:\n raise PermissionDenied()\n\n try:\n services.cancel_registration(request.member, self.get_object().event)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except RegistrationError as e:\n raise PermissionDenied(detail=e) from e\n\n\nclass EventRegistrationFieldsView(APIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n required_scopes_per_method = {\n \"GET\": [\"events:read\"],\n \"PUT\": [\"events:register\"],\n \"PATCH\": [\"events:register\"],\n }\n\n def get_object(self):\n return get_object_or_404(\n EventRegistration,\n event=self.kwargs[\"event_id\"],\n event__published=True,\n pk=self.kwargs[\"registration_id\"],\n member=self.request.member,\n )\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def put(self, request, *args, **kwargs):\n original = services.registration_fields(request, registration=self.get_object())\n required_keys = set(original.keys()) - set(request.data.keys())\n if len(required_keys) > 0:\n return HttpResponse(\n content=f\"Missing keys '{', '.join(required_keys)}' in request\",\n status=status.HTTP_400_BAD_REQUEST,\n )\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def patch(self, request, *args, **kwargs):\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n", "path": "website/events/api/v2/views.py"}], "after_files": [{"content": "import json\n\nfrom django.http import HttpResponse, Http404\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import status\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework import filters as framework_filters\n\nfrom events import services\nfrom events.api.v2.admin import filters\nfrom events.api.v2.admin.permissions import IsOrganiser\nfrom events.api.v2.admin.serializers.event import (\n EventListAdminSerializer,\n EventAdminSerializer,\n)\nfrom events.api.v2.admin.serializers.event_registration import (\n EventRegistrationAdminSerializer,\n)\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.admin.views import (\n AdminListAPIView,\n AdminRetrieveAPIView,\n AdminCreateAPIView,\n AdminUpdateAPIView,\n AdminDestroyAPIView,\n AdminPermissionsMixin,\n)\nimport events.api.v2.filters as normal_filters\n\n\nclass EventAdminListCreateAPIView(AdminListAPIView, AdminCreateAPIView):\n queryset = Event.objects.prefetch_related(\"organiser\")\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = [\n framework_filters.OrderingFilter,\n normal_filters.CategoryFilter,\n normal_filters.OrganiserFilter,\n normal_filters.EventDateFilter,\n filters.PublishedFilter,\n ]\n ordering_fields = (\n \"start\",\n \"end\",\n \"published\",\n \"registration_start\",\n \"registration_end\",\n )\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return EventAdminSerializer\n return EventListAdminSerializer\n\n\nclass EventAdminDetailAPIView(\n AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n):\n queryset = Event.objects.all()\n serializer_class = EventAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n\nclass EventRegistrationAdminListView(AdminListAPIView, AdminCreateAPIView):\n \"\"\"Returns a list of registrations.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.EventRegistrationCancelledFilter,\n )\n ordering_fields = (\"queue_position\", \"date\", \"date_cancelled\")\n\n def get_queryset(self):\n event = get_object_or_404(Event, pk=self.kwargs.get(\"pk\"))\n if event:\n return EventRegistration.objects.filter(event_id=event).prefetch_related(\n \"member\", \"member__profile\"\n )\n return EventRegistration.objects.none()\n\n\nclass EventRegistrationAdminDetailView(\n AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n):\n \"\"\"Returns details of an event registration.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n queryset = EventRegistration.objects.all()\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n event_lookup_field = \"event_id\"\n\n def get_queryset(self):\n return super().get_queryset().filter(event=self.kwargs[\"event_id\"])\n\n\nclass EventRegistrationAdminFieldsView(AdminPermissionsMixin, APIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n def get_object(self):\n event_registration = get_object_or_404(\n EventRegistration,\n event=self.kwargs[\"event_id\"],\n pk=self.kwargs[\"registration_id\"],\n )\n\n if not event_registration.event.has_fields:\n raise Http404\n\n return event_registration\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def put(self, request, *args, **kwargs):\n original = services.registration_fields(request, registration=self.get_object())\n required_keys = set(original.keys()) - set(request.data.keys())\n if len(required_keys) > 0:\n return HttpResponse(\n content=f\"Missing keys '{', '.join(required_keys)}' in request\",\n status=status.HTTP_400_BAD_REQUEST,\n )\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def patch(self, request, *args, **kwargs):\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n", "path": "website/events/api/v2/admin/views.py"}, {"content": "from django.http import HttpResponse\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework import status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n get_object_or_404,\n DestroyAPIView,\n)\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.utils import json\nfrom rest_framework.views import APIView\n\nfrom events import services\nfrom events.api.v2 import filters\nfrom events.api.v2.serializers.event import EventSerializer\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.exceptions import RegistrationError\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\nfrom thaliawebsite.api.v2.serializers import EmptySerializer\n\n\nclass EventListView(ListAPIView):\n \"\"\"Returns an overview of all upcoming events.\"\"\"\n\n serializer_class = EventSerializer\n queryset = Event.objects.filter(published=True)\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.EventDateFilter,\n filters.CategoryFilter,\n filters.OrganiserFilter,\n )\n ordering_fields = (\"start\", \"end\")\n search_fields = (\"title\",)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:read\"]\n\n\nclass EventDetailView(RetrieveAPIView):\n \"\"\"Returns details of an event.\"\"\"\n\n serializer_class = EventSerializer\n queryset = Event.objects.filter(published=True)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:read\"]\n\n\nclass EventRegistrationsView(ListAPIView, DestroyAPIView):\n \"\"\"Returns a list of registrations.\"\"\"\n\n serializer_class = EventRegistrationSerializer\n permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n required_scopes_per_method = {\n \"GET\": [\"events:read\"],\n \"POST\": [\"events:register\"],\n \"DELETE\": [\"events:register\"],\n }\n filter_backends = (framework_filters.OrderingFilter,)\n ordering_fields = (\n \"date\",\n \"member\",\n )\n\n def __init__(self):\n super(EventRegistrationsView, self).__init__()\n self.event = None\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return EmptySerializer\n return super().get_serializer_class()\n\n def get_queryset(self):\n if self.event:\n return EventRegistration.objects.filter(\n event=self.event, date_cancelled=None\n )[: self.event.max_participants]\n return EventRegistration.objects.none()\n\n def initial(self, request, *args, **kwargs):\n \"\"\"Run anything that needs to occur prior to calling the method handler.\"\"\"\n self.format_kwarg = self.get_format_suffix(**kwargs)\n\n # Perform content negotiation and store the accepted info on the request\n neg = self.perform_content_negotiation(request)\n request.accepted_renderer, request.accepted_media_type = neg\n\n # Determine the API version, if versioning is in use.\n version, scheme = self.determine_version(request, *args, **kwargs)\n request.version, request.versioning_scheme = version, scheme\n\n # Ensure that the incoming request is permitted\n self.perform_authentication(request)\n\n self.event = get_object_or_404(Event, pk=self.kwargs.get(\"pk\"), published=True)\n\n self.check_permissions(request)\n self.check_throttles(request)\n\n def post(self, request, *args, **kwargs):\n try:\n registration = services.create_registration(request.member, self.event)\n serializer = EventRegistrationSerializer(\n instance=registration, context=self.get_serializer_context()\n )\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except RegistrationError as e:\n raise PermissionDenied(detail=e) from e\n\n def delete(self, request, *args, **kwargs):\n try:\n services.cancel_registration(request.member, self.event)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except RegistrationError as e:\n raise PermissionDenied(detail=e) from e\n\n\nclass EventRegistrationDetailView(RetrieveAPIView, DestroyAPIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n serializer_class = EventRegistrationSerializer\n queryset = EventRegistration.objects.all()\n permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n required_scopes_per_method = {\n \"GET\": [\"events:read\"],\n \"DELETE\": [\"events:register\"],\n }\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n event=self.kwargs[\"event_id\"],\n event__published=True,\n date_cancelled=None,\n )\n )\n\n def get_serializer(self, *args, **kwargs):\n if (\n len(args) > 0\n and isinstance(args[0], EventRegistration)\n and args[0].member == self.request.member\n ):\n kwargs.update(\n fields=(\n \"pk\",\n \"member\",\n \"name\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n )\n )\n return super().get_serializer(*args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n if self.get_object().member != request.member:\n raise PermissionDenied()\n\n try:\n services.cancel_registration(request.member, self.get_object().event)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except RegistrationError as e:\n raise PermissionDenied(detail=e) from e\n\n\nclass EventRegistrationFieldsView(APIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n required_scopes_per_method = {\n \"GET\": [\"events:read\"],\n \"PUT\": [\"events:register\"],\n \"PATCH\": [\"events:register\"],\n }\n\n def get_object(self):\n return get_object_or_404(\n EventRegistration,\n event=self.kwargs[\"event_id\"],\n event__published=True,\n pk=self.kwargs[\"registration_id\"],\n member=self.request.member,\n )\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def put(self, request, *args, **kwargs):\n original = services.registration_fields(request, registration=self.get_object())\n required_keys = set(original.keys()) - set(request.data.keys())\n if len(required_keys) > 0:\n return HttpResponse(\n content=f\"Missing keys '{', '.join(required_keys)}' in request\",\n status=status.HTTP_400_BAD_REQUEST,\n )\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n\n def patch(self, request, *args, **kwargs):\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n return HttpResponse(\n content=json.dumps(\n services.registration_fields(request, registration=self.get_object())\n ),\n status=status.HTTP_200_OK,\n )\n", "path": "website/events/api/v2/views.py"}]} | 3,823 | 480 |
gh_patches_debug_2958 | rasdani/github-patches | git_diff | learningequality__kolibri-4689 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Shows sorry! something went wrong.
### Observed behavior
When coach is going to the recent tab to see exercise and video progress then it shows error.
### Expected behavior
It must show progress instead of error.
### Steps to reproduce
1. Login with coach.
2. go to the recent tab.
3. Go to the exercise/video and see.
### Context
* Kolibri version : kolibri 0.11.0
* Operating system : Ubuntu 14.04
* Browser : chrome
### Screenshot



--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/plugins/coach/api.py`
Content:
```
1 import datetime
2
3 from dateutil.parser import parse
4 from django.db import connection
5 from django.db.models import Min
6 from django.db.models import Q
7 from django.utils import timezone
8 from rest_framework import mixins
9 from rest_framework import pagination
10 from rest_framework import permissions
11 from rest_framework import viewsets
12
13 from .serializers import ContentReportSerializer
14 from .serializers import ContentSummarySerializer
15 from .serializers import LessonReportSerializer
16 from .serializers import UserReportSerializer
17 from .utils.return_users import get_members_or_user
18 from kolibri.core.auth.constants import collection_kinds
19 from kolibri.core.auth.constants import role_kinds
20 from kolibri.core.auth.models import Collection
21 from kolibri.core.auth.models import FacilityUser
22 from kolibri.core.content.models import ContentNode
23 from kolibri.core.decorators import query_params_required
24 from kolibri.core.lessons.models import Lesson
25 from kolibri.core.logger.models import ContentSummaryLog
26 from kolibri.core.logger.models import MasteryLog
27
28
29 collection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])
30
31
32 class OptionalPageNumberPagination(pagination.PageNumberPagination):
33 """
34 Pagination class that allows for page number-style pagination, when requested.
35 To activate, the `page_size` argument must be set. For example, to request the first 20 records:
36 `?page_size=20&page=1`
37 """
38 page_size = None
39 page_size_query_param = "page_size"
40
41
42 class KolibriReportPermissions(permissions.BasePermission):
43
44 # check if requesting user has permission for collection or user
45 def has_permission(self, request, view):
46 if isinstance(view, LessonReportViewset):
47 report_pk = view.kwargs.get('pk', None)
48 if report_pk is None:
49 # If requesting list view, check if requester has coach/admin permissions on whole facility
50 collection_kind = 'facility'
51 collection_or_user_pk = request.user.facility_id
52 else:
53 # If requesting detail view, only check if requester has permissions on the Classroom
54 collection_kind = 'classroom'
55 collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id
56
57 else:
58 collection_kind = view.kwargs.get('collection_kind', 'user')
59 collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))
60
61 allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]
62 try:
63 if 'user' == collection_kind:
64 return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))
65 else:
66 return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))
67 except (FacilityUser.DoesNotExist, Collection.DoesNotExist):
68 return False
69
70
71 @query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)
72 class ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
73
74 permission_classes = (KolibriReportPermissions,)
75
76
77 class UserReportViewSet(ReportBaseViewSet):
78
79 pagination_class = OptionalPageNumberPagination
80 serializer_class = UserReportSerializer
81
82 def get_queryset(self):
83 assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'
84 return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])
85
86
87 class ContentReportViewSet(ReportBaseViewSet):
88
89 pagination_class = OptionalPageNumberPagination
90 serializer_class = ContentReportSerializer
91
92 def get_queryset(self):
93 content_node_id = self.kwargs['content_node_id']
94 return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft')
95
96
97 @query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)
98 class ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet):
99
100 permission_classes = (KolibriReportPermissions,)
101 serializer_class = ContentSummarySerializer
102
103 def get_queryset(self):
104 channel_id = self.kwargs['channel_id']
105 return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')
106
107
108 class RecentReportViewSet(ReportBaseViewSet):
109
110 pagination_class = OptionalPageNumberPagination
111 serializer_class = ContentReportSerializer
112
113 def get_queryset(self):
114 channel_id = self.kwargs['channel_id']
115 attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)
116 query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])
117 if self.request.query_params.get('last_active_time'):
118 # Last active time specified
119 datetime_cutoff = parse(self.request.query_params.get('last_active_time'))
120 else:
121 datetime_cutoff = timezone.now() - datetime.timedelta(7)
122 # Set on the kwargs to pass into the serializer
123 self.kwargs['last_active_time'] = datetime_cutoff.isoformat()
124 recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(
125 Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),
126 user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])),
127 end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True)
128 if connection.vendor == 'postgresql':
129 pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter(
130 channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True)
131 else:
132 # note from rtibbles:
133 # As good as either I or jamalex could come up with to ensure that we only return
134 # unique content_id'ed ContentNodes from the coach recent report endpoint.
135 # Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only
136 pks_with_unique_content_ids = ContentNode.objects.filter(
137 channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate(
138 pk=Min('pk')).values_list('pk', flat=True)
139 return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft')
140
141
142 class LessonReportViewset(viewsets.ReadOnlyModelViewSet):
143 permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,)
144 serializer_class = LessonReportSerializer
145 queryset = Lesson.objects.all()
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py
--- a/kolibri/plugins/coach/api.py
+++ b/kolibri/plugins/coach/api.py
@@ -102,7 +102,7 @@
def get_queryset(self):
channel_id = self.kwargs['channel_id']
- return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')
+ return ContentNode.objects.filter(Q(channel_id=channel_id)).order_by('lft')
class RecentReportViewSet(ReportBaseViewSet):
| {"golden_diff": "diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py\n--- a/kolibri/plugins/coach/api.py\n+++ b/kolibri/plugins/coach/api.py\n@@ -102,7 +102,7 @@\n \n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n- return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\n+ return ContentNode.objects.filter(Q(channel_id=channel_id)).order_by('lft')\n \n \n class RecentReportViewSet(ReportBaseViewSet):\n", "issue": "Shows sorry! something went wrong.\n### Observed behavior\r\nWhen coach is going to the recent tab to see exercise and video progress then it shows error.\r\n\r\n\r\n### Expected behavior\r\nIt must show progress instead of error.\r\n\r\n### Steps to reproduce\r\n1. Login with coach.\r\n2. go to the recent tab.\r\n3. Go to the exercise/video and see.\r\n\r\n### Context\r\n\r\n * Kolibri version : kolibri 0.11.0\r\n * Operating system : Ubuntu 14.04\r\n * Browser : chrome\r\n\r\n### Screenshot\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\n\nfrom dateutil.parser import parse\nfrom django.db import connection\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom rest_framework import mixins\nfrom rest_framework import pagination\nfrom rest_framework import permissions\nfrom rest_framework import viewsets\n\nfrom .serializers import ContentReportSerializer\nfrom .serializers import ContentSummarySerializer\nfrom .serializers import LessonReportSerializer\nfrom .serializers import UserReportSerializer\nfrom .utils.return_users import get_members_or_user\nfrom kolibri.core.auth.constants import collection_kinds\nfrom kolibri.core.auth.constants import role_kinds\nfrom kolibri.core.auth.models import Collection\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.decorators import query_params_required\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import ContentSummaryLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncollection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass KolibriReportPermissions(permissions.BasePermission):\n\n # check if requesting user has permission for collection or user\n def has_permission(self, request, view):\n if isinstance(view, LessonReportViewset):\n report_pk = view.kwargs.get('pk', None)\n if report_pk is None:\n # If requesting list view, check if requester has coach/admin permissions on whole facility\n collection_kind = 'facility'\n collection_or_user_pk = request.user.facility_id\n else:\n # If requesting detail view, only check if requester has permissions on the Classroom\n collection_kind = 'classroom'\n collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id\n\n else:\n collection_kind = view.kwargs.get('collection_kind', 'user')\n collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\n\n allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\n try:\n if 'user' == collection_kind:\n return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\n else:\n return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\n except (FacilityUser.DoesNotExist, Collection.DoesNotExist):\n return False\n\n\n@query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)\nclass ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n\n\nclass UserReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\n return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\n\n\nclass ContentReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n content_node_id = self.kwargs['content_node_id']\n return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft')\n\n\n@query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)\nclass ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = ContentSummarySerializer\n\n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\n\n\nclass RecentReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\n else:\n datetime_cutoff = timezone.now() - datetime.timedelta(7)\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\n user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])),\n end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True)\n if connection.vendor == 'postgresql':\n pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter(\n channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True)\n else:\n # note from rtibbles:\n # As good as either I or jamalex could come up with to ensure that we only return\n # unique content_id'ed ContentNodes from the coach recent report endpoint.\n # Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only\n pks_with_unique_content_ids = ContentNode.objects.filter(\n channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate(\n pk=Min('pk')).values_list('pk', flat=True)\n return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft')\n\n\nclass LessonReportViewset(viewsets.ReadOnlyModelViewSet):\n permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,)\n serializer_class = LessonReportSerializer\n queryset = Lesson.objects.all()\n", "path": "kolibri/plugins/coach/api.py"}], "after_files": [{"content": "import datetime\n\nfrom dateutil.parser import parse\nfrom django.db import connection\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom rest_framework import mixins\nfrom rest_framework import pagination\nfrom rest_framework import permissions\nfrom rest_framework import viewsets\n\nfrom .serializers import ContentReportSerializer\nfrom .serializers import ContentSummarySerializer\nfrom .serializers import LessonReportSerializer\nfrom .serializers import UserReportSerializer\nfrom .utils.return_users import get_members_or_user\nfrom kolibri.core.auth.constants import collection_kinds\nfrom kolibri.core.auth.constants import role_kinds\nfrom kolibri.core.auth.models import Collection\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.decorators import query_params_required\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import ContentSummaryLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncollection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass KolibriReportPermissions(permissions.BasePermission):\n\n # check if requesting user has permission for collection or user\n def has_permission(self, request, view):\n if isinstance(view, LessonReportViewset):\n report_pk = view.kwargs.get('pk', None)\n if report_pk is None:\n # If requesting list view, check if requester has coach/admin permissions on whole facility\n collection_kind = 'facility'\n collection_or_user_pk = request.user.facility_id\n else:\n # If requesting detail view, only check if requester has permissions on the Classroom\n collection_kind = 'classroom'\n collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id\n\n else:\n collection_kind = view.kwargs.get('collection_kind', 'user')\n collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\n\n allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\n try:\n if 'user' == collection_kind:\n return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\n else:\n return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\n except (FacilityUser.DoesNotExist, Collection.DoesNotExist):\n return False\n\n\n@query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)\nclass ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n\n\nclass UserReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\n return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\n\n\nclass ContentReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n content_node_id = self.kwargs['content_node_id']\n return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft')\n\n\n@query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)\nclass ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = ContentSummarySerializer\n\n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n return ContentNode.objects.filter(Q(channel_id=channel_id)).order_by('lft')\n\n\nclass RecentReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\n else:\n datetime_cutoff = timezone.now() - datetime.timedelta(7)\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\n user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])),\n end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True)\n if connection.vendor == 'postgresql':\n pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter(\n channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True)\n else:\n # note from rtibbles:\n # As good as either I or jamalex could come up with to ensure that we only return\n # unique content_id'ed ContentNodes from the coach recent report endpoint.\n # Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only\n pks_with_unique_content_ids = ContentNode.objects.filter(\n channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate(\n pk=Min('pk')).values_list('pk', flat=True)\n return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft')\n\n\nclass LessonReportViewset(viewsets.ReadOnlyModelViewSet):\n permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,)\n serializer_class = LessonReportSerializer\n queryset = Lesson.objects.all()\n", "path": "kolibri/plugins/coach/api.py"}]} | 2,259 | 131 |
gh_patches_debug_28573 | rasdani/github-patches | git_diff | WeblateOrg__weblate-2466 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better display of changes
After request #1569:
Still in the history page, new translation strings are quite hard to check/read (changed introduced maybe 6 months ago): merge of colors (red for removed characters ...) and font style (strikes for removed characters ...).
I suggest to add an option to switch between 2 display styles for the new translations, depending on user preferences.
Proposed named for this option: "Display mode for the translated strings" (in history page and / or email notifications).
Values could be "Highlight changes" or "Raw display (no change highlighted)".
Maybe the same should apply in the mail notifications.
This option can be set globally in the user profil.
Thanks,
<bountysource-plugin>
---
Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/47410434-better-display-of-changes?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github).
</bountysource-plugin>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/trans/simplediff.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © 2012 - 2018 Michal Čihař <[email protected]>
4 #
5 # This file is part of Weblate <https://weblate.org/>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <https://www.gnu.org/licenses/>.
19 #
20 from __future__ import unicode_literals
21
22 from difflib import SequenceMatcher
23
24
25 def html_diff(old, new):
26 """Generate HTML formatted diff of two strings."""
27 diff = SequenceMatcher(None, old, new)
28 result = []
29 for tag, oldpos1, oldpos2, newpos1, newpos2 in diff.get_opcodes():
30 if tag == 'replace':
31 result.append(
32 '<del>{0}</del><ins>{1}</ins>'.format(
33 old[oldpos1:oldpos2], new[newpos1:newpos2]
34 )
35 )
36 elif tag == 'delete':
37 result.append(
38 '<del>{0}</del>'.format(
39 old[oldpos1:oldpos2]
40 )
41 )
42 elif tag == 'insert':
43 result.append(
44 '<ins>{0}</ins>'.format(
45 new[newpos1:newpos2]
46 )
47 )
48 elif tag == 'equal':
49 result.append(new[newpos1:newpos2])
50 return ''.join(result)
51
```
Path: `weblate/utils/requirements.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © 2012 - 2018 Michal Čihař <[email protected]>
4 #
5 # This file is part of Weblate <https://weblate.org/>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <https://www.gnu.org/licenses/>.
19 #
20
21 from __future__ import print_function, unicode_literals
22
23 import importlib
24 import sys
25 from distutils.version import LooseVersion
26
27
28 from django.core.checks import Error
29 from django.core.exceptions import ImproperlyConfigured
30
31 import pkg_resources
32
33 from weblate.utils.docs import get_doc_url
34 from weblate.vcs.git import (
35 GitRepository, SubversionRepository, GitWithGerritRepository,
36 GithubRepository,
37 )
38 from weblate.vcs.mercurial import HgRepository
39 import weblate
40
41
42 def get_version_module(module, name, url, optional=False):
43 """Return module object.
44
45 On error raises verbose exception with name and URL.
46 """
47 try:
48 mod = importlib.import_module(module)
49 except ImportError:
50 if optional:
51 return None
52 raise ImproperlyConfigured(
53 'Failed to import {0}, please install {1} from {2}'.format(
54 module, name, url
55 )
56 )
57 return mod
58
59
60 def get_optional_module(result, module, name, url):
61 """Get metadata for optional dependency"""
62 mod = get_version_module(module, name, url, True)
63 if mod is not None:
64 result.append((
65 name,
66 url,
67 pkg_resources.get_distribution(name).version,
68 None,
69 ))
70
71
72 def get_optional_versions():
73 """Return versions of optional modules."""
74 result = []
75
76 get_optional_module(
77 result, 'pytz', 'pytz', 'https://pypi.org/project/pytz/'
78 )
79
80 get_optional_module(
81 result, 'pyuca', 'pyuca', 'https://github.com/jtauber/pyuca'
82 )
83
84 get_optional_module(
85 result, 'bidi', 'python-bidi',
86 'https://github.com/MeirKriheli/python-bidi'
87 )
88
89 get_optional_module(
90 result, 'yaml', 'PyYAML', 'https://pyyaml.org/wiki/PyYAML'
91 )
92
93 get_optional_module(
94 result, 'tesserocr', 'tesserocr', 'https://github.com/sirfz/tesserocr'
95 )
96
97 if HgRepository.is_supported():
98 result.append((
99 'Mercurial',
100 'https://www.mercurial-scm.org/',
101 HgRepository.get_version(),
102 '2.8',
103 ))
104
105 if SubversionRepository.is_supported():
106 result.append((
107 'git-svn',
108 'https://git-scm.com/docs/git-svn',
109 SubversionRepository.get_version(),
110 '1.6',
111 ))
112
113 if GitWithGerritRepository.is_supported():
114 result.append((
115 'git-review',
116 'https://pypi.org/project/git-review/',
117 GitWithGerritRepository.get_version(),
118 '1.0',
119 ))
120
121 if GithubRepository.is_supported():
122 result.append((
123 'hub',
124 'https://hub.github.com/',
125 GithubRepository.get_version(),
126 '1.0',
127 ))
128
129 return result
130
131
132 def get_single(name, url, module, required):
133 """Return version information for single module"""
134 get_version_module(module, name, url)
135 return (
136 name,
137 url,
138 pkg_resources.get_distribution(name).version,
139 required,
140 )
141
142
143 def get_versions():
144 """Return list of used versions."""
145 result = []
146
147 result.append((
148 'Python',
149 'https://www.python.org/',
150 sys.version.split()[0],
151 '2.7',
152 ))
153
154 result.append(get_single(
155 'Django',
156 'https://www.djangoproject.com/',
157 'django',
158 '1.11',
159 ))
160
161 result.append(get_single(
162 'Celery',
163 'http://www.celeryproject.org/',
164 'celery',
165 '4.0',
166 ))
167
168 result.append(get_single(
169 'celery-batches',
170 'https://pypi.org/project/celery-batches/',
171 'celery_batches',
172 '0.2',
173 ))
174
175 result.append(get_single(
176 'six',
177 'https://pypi.org/project/six/',
178 'six',
179 '1.7.0',
180 ))
181
182 result.append(get_single(
183 'social-auth-core',
184 'https://python-social-auth.readthedocs.io/',
185 'social_core',
186 '2.0.0',
187 ))
188
189 result.append(get_single(
190 'social-auth-app-django',
191 'https://python-social-auth.readthedocs.io/',
192 'social_django',
193 '3.0.0',
194 ))
195
196 result.append(get_single(
197 'django-appconf',
198 'https://github.com/django-compressor/django-appconf',
199 'appconf',
200 '1.0'
201 ))
202
203 result.append(get_single(
204 'translate-toolkit',
205 'https://toolkit.translatehouse.org/',
206 'translate',
207 '2.3.1',
208 ))
209
210 result.append(get_single(
211 'Whoosh',
212 'https://bitbucket.org/mchaput/whoosh/',
213 'whoosh',
214 '2.7',
215 ))
216
217 result.append(get_single(
218 'defusedxml',
219 'https://bitbucket.org/tiran/defusedxml',
220 'defusedxml',
221 '0.4',
222 ))
223
224 try:
225 result.append((
226 'Git',
227 'https://git-scm.com/',
228 GitRepository.get_version(),
229 '1.6',
230 ))
231 except OSError:
232 raise ImproperlyConfigured('Failed to run git, please install it.')
233
234 result.append(get_single(
235 'Pillow',
236 'https://python-pillow.org/',
237 'PIL.Image',
238 '1.1.6',
239 ))
240
241 result.append(get_single(
242 'python-dateutil',
243 'https://labix.org/python-dateutil',
244 'dateutil',
245 '1.0'
246 ))
247
248 result.append(get_single(
249 'lxml',
250 'https://lxml.de/',
251 'lxml.etree',
252 '3.1.0',
253 ))
254
255 result.append(get_single(
256 'django-crispy-forms',
257 'https://django-crispy-forms.readthedocs.io/',
258 'crispy_forms',
259 '1.6.1',
260 ))
261
262 result.append(get_single(
263 'django_compressor',
264 'https://github.com/django-compressor/django-compressor',
265 'compressor',
266 '2.1',
267 ))
268
269 result.append(get_single(
270 'djangorestframework',
271 'https://www.django-rest-framework.org/',
272 'rest_framework',
273 '3.8',
274 ))
275
276 result.append(get_single(
277 'user-agents',
278 'https://github.com/selwin/python-user-agents',
279 'user_agents',
280 '1.1.0',
281 ))
282
283 result.append(get_single(
284 'jellyfish',
285 'https://github.com/jamesturk/jellyfish',
286 'jellyfish',
287 '0.6.1',
288 ))
289
290 return result
291
292
293 def check_version(version, expected):
294 """Check for single module version."""
295 if expected is None:
296 return False
297 return LooseVersion(version) < LooseVersion(expected)
298
299
300 def check_requirements(app_configs, **kwargs):
301 """Perform check on requirements and raises an exception on error."""
302 versions = get_versions() + get_optional_versions()
303 errors = []
304 message = '{0} <{1}> is too old. Installed version {2}, required {3}.'
305
306 for name, url, version, expected in versions:
307 if check_version(version, expected):
308 errors.append(
309 Error(
310 message.format(name, url, version, expected),
311 hint=get_doc_url('admin/install', 'requirements'),
312 id='weblate.E001',
313 )
314 )
315
316 return errors
317
318
319 def get_versions_list():
320 """Return list with version information summary."""
321 return (
322 [('Weblate', '', weblate.GIT_VERSION)] +
323 get_versions() +
324 get_optional_versions()
325 )
326
327
328 def get_versions_string():
329 """Return string with version information summary."""
330 result = []
331 for version in get_versions_list():
332 result.append(
333 ' * {0} {1}'.format(
334 version[0],
335 version[2]
336 )
337 )
338 return '\n'.join(result)
339
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/trans/simplediff.py b/weblate/trans/simplediff.py
--- a/weblate/trans/simplediff.py
+++ b/weblate/trans/simplediff.py
@@ -19,32 +19,21 @@
#
from __future__ import unicode_literals
-from difflib import SequenceMatcher
+from diff_match_patch import diff_match_patch
def html_diff(old, new):
"""Generate HTML formatted diff of two strings."""
- diff = SequenceMatcher(None, old, new)
+ dmp = diff_match_patch()
+ diff = dmp.diff_main(old, new)
+ dmp.diff_cleanupSemantic(diff)
+
result = []
- for tag, oldpos1, oldpos2, newpos1, newpos2 in diff.get_opcodes():
- if tag == 'replace':
- result.append(
- '<del>{0}</del><ins>{1}</ins>'.format(
- old[oldpos1:oldpos2], new[newpos1:newpos2]
- )
- )
- elif tag == 'delete':
- result.append(
- '<del>{0}</del>'.format(
- old[oldpos1:oldpos2]
- )
- )
- elif tag == 'insert':
- result.append(
- '<ins>{0}</ins>'.format(
- new[newpos1:newpos2]
- )
- )
- elif tag == 'equal':
- result.append(new[newpos1:newpos2])
+ for op, data in diff:
+ if op == dmp.DIFF_DELETE:
+ result.append('<del>{0}</del>'.format(data))
+ elif op == dmp.DIFF_INSERT:
+ result.append('<ins>{0}</ins>'.format(data))
+ elif op == dmp.DIFF_EQUAL:
+ result.append(data)
return ''.join(result)
diff --git a/weblate/utils/requirements.py b/weblate/utils/requirements.py
--- a/weblate/utils/requirements.py
+++ b/weblate/utils/requirements.py
@@ -287,6 +287,13 @@
'0.6.1',
))
+ result.append(get_single(
+ 'diff-match-patch',
+ 'https://github.com/diff-match-patch-python/diff-match-patch',
+ 'diff_match_patch',
+ '20121119',
+ ))
+
return result
| {"golden_diff": "diff --git a/weblate/trans/simplediff.py b/weblate/trans/simplediff.py\n--- a/weblate/trans/simplediff.py\n+++ b/weblate/trans/simplediff.py\n@@ -19,32 +19,21 @@\n #\n from __future__ import unicode_literals\n \n-from difflib import SequenceMatcher\n+from diff_match_patch import diff_match_patch\n \n \n def html_diff(old, new):\n \"\"\"Generate HTML formatted diff of two strings.\"\"\"\n- diff = SequenceMatcher(None, old, new)\n+ dmp = diff_match_patch()\n+ diff = dmp.diff_main(old, new)\n+ dmp.diff_cleanupSemantic(diff)\n+\n result = []\n- for tag, oldpos1, oldpos2, newpos1, newpos2 in diff.get_opcodes():\n- if tag == 'replace':\n- result.append(\n- '<del>{0}</del><ins>{1}</ins>'.format(\n- old[oldpos1:oldpos2], new[newpos1:newpos2]\n- )\n- )\n- elif tag == 'delete':\n- result.append(\n- '<del>{0}</del>'.format(\n- old[oldpos1:oldpos2]\n- )\n- )\n- elif tag == 'insert':\n- result.append(\n- '<ins>{0}</ins>'.format(\n- new[newpos1:newpos2]\n- )\n- )\n- elif tag == 'equal':\n- result.append(new[newpos1:newpos2])\n+ for op, data in diff:\n+ if op == dmp.DIFF_DELETE:\n+ result.append('<del>{0}</del>'.format(data))\n+ elif op == dmp.DIFF_INSERT:\n+ result.append('<ins>{0}</ins>'.format(data))\n+ elif op == dmp.DIFF_EQUAL:\n+ result.append(data)\n return ''.join(result)\ndiff --git a/weblate/utils/requirements.py b/weblate/utils/requirements.py\n--- a/weblate/utils/requirements.py\n+++ b/weblate/utils/requirements.py\n@@ -287,6 +287,13 @@\n '0.6.1',\n ))\n \n+ result.append(get_single(\n+ 'diff-match-patch',\n+ 'https://github.com/diff-match-patch-python/diff-match-patch',\n+ 'diff_match_patch',\n+ '20121119',\n+ ))\n+\n return result\n", "issue": "Better display of changes\nAfter request #1569:\r\n\r\nStill in the history page, new translation strings are quite hard to check/read (changed introduced maybe 6 months ago): merge of colors (red for removed characters ...) and font style (strikes for removed characters ...).\r\n\r\nI suggest to add an option to switch between 2 display styles for the new translations, depending on user preferences.\r\n\r\nProposed named for this option: \"Display mode for the translated strings\" (in history page and / or email notifications).\r\nValues could be \"Highlight changes\" or \"Raw display (no change highlighted)\".\r\nMaybe the same should apply in the mail notifications.\r\n\r\nThis option can be set globally in the user profil.\r\n\r\nThanks,\r\n\r\n<bountysource-plugin>\r\n\r\n---\r\nWant to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/47410434-better-display-of-changes?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github).\r\n</bountysource-plugin>\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2018 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\nfrom __future__ import unicode_literals\n\nfrom difflib import SequenceMatcher\n\n\ndef html_diff(old, new):\n \"\"\"Generate HTML formatted diff of two strings.\"\"\"\n diff = SequenceMatcher(None, old, new)\n result = []\n for tag, oldpos1, oldpos2, newpos1, newpos2 in diff.get_opcodes():\n if tag == 'replace':\n result.append(\n '<del>{0}</del><ins>{1}</ins>'.format(\n old[oldpos1:oldpos2], new[newpos1:newpos2]\n )\n )\n elif tag == 'delete':\n result.append(\n '<del>{0}</del>'.format(\n old[oldpos1:oldpos2]\n )\n )\n elif tag == 'insert':\n result.append(\n '<ins>{0}</ins>'.format(\n new[newpos1:newpos2]\n )\n )\n elif tag == 'equal':\n result.append(new[newpos1:newpos2])\n return ''.join(result)\n", "path": "weblate/trans/simplediff.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2018 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nfrom __future__ import print_function, unicode_literals\n\nimport importlib\nimport sys\nfrom distutils.version import LooseVersion\n\n\nfrom django.core.checks import Error\nfrom django.core.exceptions import ImproperlyConfigured\n\nimport pkg_resources\n\nfrom weblate.utils.docs import get_doc_url\nfrom weblate.vcs.git import (\n GitRepository, SubversionRepository, GitWithGerritRepository,\n GithubRepository,\n)\nfrom weblate.vcs.mercurial import HgRepository\nimport weblate\n\n\ndef get_version_module(module, name, url, optional=False):\n \"\"\"Return module object.\n\n On error raises verbose exception with name and URL.\n \"\"\"\n try:\n mod = importlib.import_module(module)\n except ImportError:\n if optional:\n return None\n raise ImproperlyConfigured(\n 'Failed to import {0}, please install {1} from {2}'.format(\n module, name, url\n )\n )\n return mod\n\n\ndef get_optional_module(result, module, name, url):\n \"\"\"Get metadata for optional dependency\"\"\"\n mod = get_version_module(module, name, url, True)\n if mod is not None:\n result.append((\n name,\n url,\n pkg_resources.get_distribution(name).version,\n None,\n ))\n\n\ndef get_optional_versions():\n \"\"\"Return versions of optional modules.\"\"\"\n result = []\n\n get_optional_module(\n result, 'pytz', 'pytz', 'https://pypi.org/project/pytz/'\n )\n\n get_optional_module(\n result, 'pyuca', 'pyuca', 'https://github.com/jtauber/pyuca'\n )\n\n get_optional_module(\n result, 'bidi', 'python-bidi',\n 'https://github.com/MeirKriheli/python-bidi'\n )\n\n get_optional_module(\n result, 'yaml', 'PyYAML', 'https://pyyaml.org/wiki/PyYAML'\n )\n\n get_optional_module(\n result, 'tesserocr', 'tesserocr', 'https://github.com/sirfz/tesserocr'\n )\n\n if HgRepository.is_supported():\n result.append((\n 'Mercurial',\n 'https://www.mercurial-scm.org/',\n HgRepository.get_version(),\n '2.8',\n ))\n\n if SubversionRepository.is_supported():\n result.append((\n 'git-svn',\n 'https://git-scm.com/docs/git-svn',\n SubversionRepository.get_version(),\n '1.6',\n ))\n\n if GitWithGerritRepository.is_supported():\n result.append((\n 'git-review',\n 'https://pypi.org/project/git-review/',\n GitWithGerritRepository.get_version(),\n '1.0',\n ))\n\n if GithubRepository.is_supported():\n result.append((\n 'hub',\n 'https://hub.github.com/',\n GithubRepository.get_version(),\n '1.0',\n ))\n\n return result\n\n\ndef get_single(name, url, module, required):\n \"\"\"Return version information for single module\"\"\"\n get_version_module(module, name, url)\n return (\n name,\n url,\n pkg_resources.get_distribution(name).version,\n required,\n )\n\n\ndef get_versions():\n \"\"\"Return list of used versions.\"\"\"\n result = []\n\n result.append((\n 'Python',\n 'https://www.python.org/',\n sys.version.split()[0],\n '2.7',\n ))\n\n result.append(get_single(\n 'Django',\n 'https://www.djangoproject.com/',\n 'django',\n '1.11',\n ))\n\n result.append(get_single(\n 'Celery',\n 'http://www.celeryproject.org/',\n 'celery',\n '4.0',\n ))\n\n result.append(get_single(\n 'celery-batches',\n 'https://pypi.org/project/celery-batches/',\n 'celery_batches',\n '0.2',\n ))\n\n result.append(get_single(\n 'six',\n 'https://pypi.org/project/six/',\n 'six',\n '1.7.0',\n ))\n\n result.append(get_single(\n 'social-auth-core',\n 'https://python-social-auth.readthedocs.io/',\n 'social_core',\n '2.0.0',\n ))\n\n result.append(get_single(\n 'social-auth-app-django',\n 'https://python-social-auth.readthedocs.io/',\n 'social_django',\n '3.0.0',\n ))\n\n result.append(get_single(\n 'django-appconf',\n 'https://github.com/django-compressor/django-appconf',\n 'appconf',\n '1.0'\n ))\n\n result.append(get_single(\n 'translate-toolkit',\n 'https://toolkit.translatehouse.org/',\n 'translate',\n '2.3.1',\n ))\n\n result.append(get_single(\n 'Whoosh',\n 'https://bitbucket.org/mchaput/whoosh/',\n 'whoosh',\n '2.7',\n ))\n\n result.append(get_single(\n 'defusedxml',\n 'https://bitbucket.org/tiran/defusedxml',\n 'defusedxml',\n '0.4',\n ))\n\n try:\n result.append((\n 'Git',\n 'https://git-scm.com/',\n GitRepository.get_version(),\n '1.6',\n ))\n except OSError:\n raise ImproperlyConfigured('Failed to run git, please install it.')\n\n result.append(get_single(\n 'Pillow',\n 'https://python-pillow.org/',\n 'PIL.Image',\n '1.1.6',\n ))\n\n result.append(get_single(\n 'python-dateutil',\n 'https://labix.org/python-dateutil',\n 'dateutil',\n '1.0'\n ))\n\n result.append(get_single(\n 'lxml',\n 'https://lxml.de/',\n 'lxml.etree',\n '3.1.0',\n ))\n\n result.append(get_single(\n 'django-crispy-forms',\n 'https://django-crispy-forms.readthedocs.io/',\n 'crispy_forms',\n '1.6.1',\n ))\n\n result.append(get_single(\n 'django_compressor',\n 'https://github.com/django-compressor/django-compressor',\n 'compressor',\n '2.1',\n ))\n\n result.append(get_single(\n 'djangorestframework',\n 'https://www.django-rest-framework.org/',\n 'rest_framework',\n '3.8',\n ))\n\n result.append(get_single(\n 'user-agents',\n 'https://github.com/selwin/python-user-agents',\n 'user_agents',\n '1.1.0',\n ))\n\n result.append(get_single(\n 'jellyfish',\n 'https://github.com/jamesturk/jellyfish',\n 'jellyfish',\n '0.6.1',\n ))\n\n return result\n\n\ndef check_version(version, expected):\n \"\"\"Check for single module version.\"\"\"\n if expected is None:\n return False\n return LooseVersion(version) < LooseVersion(expected)\n\n\ndef check_requirements(app_configs, **kwargs):\n \"\"\"Perform check on requirements and raises an exception on error.\"\"\"\n versions = get_versions() + get_optional_versions()\n errors = []\n message = '{0} <{1}> is too old. Installed version {2}, required {3}.'\n\n for name, url, version, expected in versions:\n if check_version(version, expected):\n errors.append(\n Error(\n message.format(name, url, version, expected),\n hint=get_doc_url('admin/install', 'requirements'),\n id='weblate.E001',\n )\n )\n\n return errors\n\n\ndef get_versions_list():\n \"\"\"Return list with version information summary.\"\"\"\n return (\n [('Weblate', '', weblate.GIT_VERSION)] +\n get_versions() +\n get_optional_versions()\n )\n\n\ndef get_versions_string():\n \"\"\"Return string with version information summary.\"\"\"\n result = []\n for version in get_versions_list():\n result.append(\n ' * {0} {1}'.format(\n version[0],\n version[2]\n )\n )\n return '\\n'.join(result)\n", "path": "weblate/utils/requirements.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2018 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\nfrom __future__ import unicode_literals\n\nfrom diff_match_patch import diff_match_patch\n\n\ndef html_diff(old, new):\n \"\"\"Generate HTML formatted diff of two strings.\"\"\"\n dmp = diff_match_patch()\n diff = dmp.diff_main(old, new)\n dmp.diff_cleanupSemantic(diff)\n\n result = []\n for op, data in diff:\n if op == dmp.DIFF_DELETE:\n result.append('<del>{0}</del>'.format(data))\n elif op == dmp.DIFF_INSERT:\n result.append('<ins>{0}</ins>'.format(data))\n elif op == dmp.DIFF_EQUAL:\n result.append(data)\n return ''.join(result)\n", "path": "weblate/trans/simplediff.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2018 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nfrom __future__ import print_function, unicode_literals\n\nimport importlib\nimport sys\nfrom distutils.version import LooseVersion\n\n\nfrom django.core.checks import Error\nfrom django.core.exceptions import ImproperlyConfigured\n\nimport pkg_resources\n\nfrom weblate.utils.docs import get_doc_url\nfrom weblate.vcs.git import (\n GitRepository, SubversionRepository, GitWithGerritRepository,\n GithubRepository,\n)\nfrom weblate.vcs.mercurial import HgRepository\nimport weblate\n\n\ndef get_version_module(module, name, url, optional=False):\n \"\"\"Return module object.\n\n On error raises verbose exception with name and URL.\n \"\"\"\n try:\n mod = importlib.import_module(module)\n except ImportError:\n if optional:\n return None\n raise ImproperlyConfigured(\n 'Failed to import {0}, please install {1} from {2}'.format(\n module, name, url\n )\n )\n return mod\n\n\ndef get_optional_module(result, module, name, url):\n \"\"\"Get metadata for optional dependency\"\"\"\n mod = get_version_module(module, name, url, True)\n if mod is not None:\n result.append((\n name,\n url,\n pkg_resources.get_distribution(name).version,\n None,\n ))\n\n\ndef get_optional_versions():\n \"\"\"Return versions of optional modules.\"\"\"\n result = []\n\n get_optional_module(\n result, 'pytz', 'pytz', 'https://pypi.org/project/pytz/'\n )\n\n get_optional_module(\n result, 'pyuca', 'pyuca', 'https://github.com/jtauber/pyuca'\n )\n\n get_optional_module(\n result, 'bidi', 'python-bidi',\n 'https://github.com/MeirKriheli/python-bidi'\n )\n\n get_optional_module(\n result, 'yaml', 'PyYAML', 'https://pyyaml.org/wiki/PyYAML'\n )\n\n get_optional_module(\n result, 'tesserocr', 'tesserocr', 'https://github.com/sirfz/tesserocr'\n )\n\n if HgRepository.is_supported():\n result.append((\n 'Mercurial',\n 'https://www.mercurial-scm.org/',\n HgRepository.get_version(),\n '2.8',\n ))\n\n if SubversionRepository.is_supported():\n result.append((\n 'git-svn',\n 'https://git-scm.com/docs/git-svn',\n SubversionRepository.get_version(),\n '1.6',\n ))\n\n if GitWithGerritRepository.is_supported():\n result.append((\n 'git-review',\n 'https://pypi.org/project/git-review/',\n GitWithGerritRepository.get_version(),\n '1.0',\n ))\n\n if GithubRepository.is_supported():\n result.append((\n 'hub',\n 'https://hub.github.com/',\n GithubRepository.get_version(),\n '1.0',\n ))\n\n return result\n\n\ndef get_single(name, url, module, required):\n \"\"\"Return version information for single module\"\"\"\n get_version_module(module, name, url)\n return (\n name,\n url,\n pkg_resources.get_distribution(name).version,\n required,\n )\n\n\ndef get_versions():\n \"\"\"Return list of used versions.\"\"\"\n result = []\n\n result.append((\n 'Python',\n 'https://www.python.org/',\n sys.version.split()[0],\n '2.7',\n ))\n\n result.append(get_single(\n 'Django',\n 'https://www.djangoproject.com/',\n 'django',\n '1.11',\n ))\n\n result.append(get_single(\n 'Celery',\n 'http://www.celeryproject.org/',\n 'celery',\n '4.0',\n ))\n\n result.append(get_single(\n 'celery-batches',\n 'https://pypi.org/project/celery-batches/',\n 'celery_batches',\n '0.2',\n ))\n\n result.append(get_single(\n 'six',\n 'https://pypi.org/project/six/',\n 'six',\n '1.7.0',\n ))\n\n result.append(get_single(\n 'social-auth-core',\n 'https://python-social-auth.readthedocs.io/',\n 'social_core',\n '2.0.0',\n ))\n\n result.append(get_single(\n 'social-auth-app-django',\n 'https://python-social-auth.readthedocs.io/',\n 'social_django',\n '3.0.0',\n ))\n\n result.append(get_single(\n 'django-appconf',\n 'https://github.com/django-compressor/django-appconf',\n 'appconf',\n '1.0'\n ))\n\n result.append(get_single(\n 'translate-toolkit',\n 'https://toolkit.translatehouse.org/',\n 'translate',\n '2.3.1',\n ))\n\n result.append(get_single(\n 'Whoosh',\n 'https://bitbucket.org/mchaput/whoosh/',\n 'whoosh',\n '2.7',\n ))\n\n result.append(get_single(\n 'defusedxml',\n 'https://bitbucket.org/tiran/defusedxml',\n 'defusedxml',\n '0.4',\n ))\n\n try:\n result.append((\n 'Git',\n 'https://git-scm.com/',\n GitRepository.get_version(),\n '1.6',\n ))\n except OSError:\n raise ImproperlyConfigured('Failed to run git, please install it.')\n\n result.append(get_single(\n 'Pillow',\n 'https://python-pillow.org/',\n 'PIL.Image',\n '1.1.6',\n ))\n\n result.append(get_single(\n 'python-dateutil',\n 'https://labix.org/python-dateutil',\n 'dateutil',\n '1.0'\n ))\n\n result.append(get_single(\n 'lxml',\n 'https://lxml.de/',\n 'lxml.etree',\n '3.1.0',\n ))\n\n result.append(get_single(\n 'django-crispy-forms',\n 'https://django-crispy-forms.readthedocs.io/',\n 'crispy_forms',\n '1.6.1',\n ))\n\n result.append(get_single(\n 'django_compressor',\n 'https://github.com/django-compressor/django-compressor',\n 'compressor',\n '2.1',\n ))\n\n result.append(get_single(\n 'djangorestframework',\n 'https://www.django-rest-framework.org/',\n 'rest_framework',\n '3.8',\n ))\n\n result.append(get_single(\n 'user-agents',\n 'https://github.com/selwin/python-user-agents',\n 'user_agents',\n '1.1.0',\n ))\n\n result.append(get_single(\n 'jellyfish',\n 'https://github.com/jamesturk/jellyfish',\n 'jellyfish',\n '0.6.1',\n ))\n\n result.append(get_single(\n 'diff-match-patch',\n 'https://github.com/diff-match-patch-python/diff-match-patch',\n 'diff_match_patch',\n '20121119',\n ))\n\n return result\n\n\ndef check_version(version, expected):\n \"\"\"Check for single module version.\"\"\"\n if expected is None:\n return False\n return LooseVersion(version) < LooseVersion(expected)\n\n\ndef check_requirements(app_configs, **kwargs):\n \"\"\"Perform check on requirements and raises an exception on error.\"\"\"\n versions = get_versions() + get_optional_versions()\n errors = []\n message = '{0} <{1}> is too old. Installed version {2}, required {3}.'\n\n for name, url, version, expected in versions:\n if check_version(version, expected):\n errors.append(\n Error(\n message.format(name, url, version, expected),\n hint=get_doc_url('admin/install', 'requirements'),\n id='weblate.E001',\n )\n )\n\n return errors\n\n\ndef get_versions_list():\n \"\"\"Return list with version information summary.\"\"\"\n return (\n [('Weblate', '', weblate.GIT_VERSION)] +\n get_versions() +\n get_optional_versions()\n )\n\n\ndef get_versions_string():\n \"\"\"Return string with version information summary.\"\"\"\n result = []\n for version in get_versions_list():\n result.append(\n ' * {0} {1}'.format(\n version[0],\n version[2]\n )\n )\n return '\\n'.join(result)\n", "path": "weblate/utils/requirements.py"}]} | 3,986 | 539 |
gh_patches_debug_30051 | rasdani/github-patches | git_diff | doccano__doccano-2228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a function to filter labels
When I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.
Thanks for every contributor!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/examples/filters.py`
Content:
```
1 from django.db.models import Count, Q
2 from django_filters.rest_framework import BooleanFilter, FilterSet
3
4 from .models import Example
5
6
7 class ExampleFilter(FilterSet):
8 confirmed = BooleanFilter(field_name="states", method="filter_by_state")
9
10 def filter_by_state(self, queryset, field_name, is_confirmed: bool):
11 queryset = queryset.annotate(
12 num_confirm=Count(
13 expression=field_name,
14 filter=Q(**{f"{field_name}__confirmed_by": self.request.user})
15 | Q(project__collaborative_annotation=True),
16 )
17 )
18 if is_confirmed:
19 queryset = queryset.filter(num_confirm__gte=1)
20 else:
21 queryset = queryset.filter(num_confirm__lte=0)
22 return queryset
23
24 class Meta:
25 model = Example
26 fields = ("project", "text", "created_at", "updated_at")
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/examples/filters.py b/backend/examples/filters.py
--- a/backend/examples/filters.py
+++ b/backend/examples/filters.py
@@ -1,11 +1,12 @@
-from django.db.models import Count, Q
-from django_filters.rest_framework import BooleanFilter, FilterSet
+from django.db.models import Count, Q, QuerySet
+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet
from .models import Example
class ExampleFilter(FilterSet):
confirmed = BooleanFilter(field_name="states", method="filter_by_state")
+ label = CharFilter(method="filter_by_label")
def filter_by_state(self, queryset, field_name, is_confirmed: bool):
queryset = queryset.annotate(
@@ -21,6 +22,35 @@
queryset = queryset.filter(num_confirm__lte=0)
return queryset
+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:
+ """Filter examples by a given label name.
+
+ This performs filtering on all of the following labels at once:
+ - categories
+ - spans
+ - relations
+ - bboxes
+ - segmentations
+
+ Todo: Consider project type to make filtering more efficient.
+
+ Args:
+ queryset (QuerySet): QuerySet to filter.
+ field_name (str): This equals to `label`.
+ label (str): The label name to filter.
+
+ Returns:
+ QuerySet: Filtered examples.
+ """
+ queryset = queryset.filter(
+ Q(categories__label__text=label)
+ | Q(spans__label__text=label)
+ | Q(relations__type__text=label)
+ | Q(bboxes__label__text=label)
+ | Q(segmentations__label__text=label)
+ )
+ return queryset
+
class Meta:
model = Example
- fields = ("project", "text", "created_at", "updated_at")
+ fields = ("project", "text", "created_at", "updated_at", "label")
| {"golden_diff": "diff --git a/backend/examples/filters.py b/backend/examples/filters.py\n--- a/backend/examples/filters.py\n+++ b/backend/examples/filters.py\n@@ -1,11 +1,12 @@\n-from django.db.models import Count, Q\n-from django_filters.rest_framework import BooleanFilter, FilterSet\n+from django.db.models import Count, Q, QuerySet\n+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet\n \n from .models import Example\n \n \n class ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n+ label = CharFilter(method=\"filter_by_label\")\n \n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n@@ -21,6 +22,35 @@\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n \n+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:\n+ \"\"\"Filter examples by a given label name.\n+\n+ This performs filtering on all of the following labels at once:\n+ - categories\n+ - spans\n+ - relations\n+ - bboxes\n+ - segmentations\n+\n+ Todo: Consider project type to make filtering more efficient.\n+\n+ Args:\n+ queryset (QuerySet): QuerySet to filter.\n+ field_name (str): This equals to `label`.\n+ label (str): The label name to filter.\n+\n+ Returns:\n+ QuerySet: Filtered examples.\n+ \"\"\"\n+ queryset = queryset.filter(\n+ Q(categories__label__text=label)\n+ | Q(spans__label__text=label)\n+ | Q(relations__type__text=label)\n+ | Q(bboxes__label__text=label)\n+ | Q(segmentations__label__text=label)\n+ )\n+ return queryset\n+\n class Meta:\n model = Example\n- fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n+ fields = (\"project\", \"text\", \"created_at\", \"updated_at\", \"label\")\n", "issue": "Add a function to filter labels\nWhen I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.\r\n\r\nThanks for every contributor!\n", "before_files": [{"content": "from django.db.models import Count, Q\nfrom django_filters.rest_framework import BooleanFilter, FilterSet\n\nfrom .models import Example\n\n\nclass ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n\n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n num_confirm=Count(\n expression=field_name,\n filter=Q(**{f\"{field_name}__confirmed_by\": self.request.user})\n | Q(project__collaborative_annotation=True),\n )\n )\n if is_confirmed:\n queryset = queryset.filter(num_confirm__gte=1)\n else:\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n\n class Meta:\n model = Example\n fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n", "path": "backend/examples/filters.py"}], "after_files": [{"content": "from django.db.models import Count, Q, QuerySet\nfrom django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet\n\nfrom .models import Example\n\n\nclass ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n label = CharFilter(method=\"filter_by_label\")\n\n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n num_confirm=Count(\n expression=field_name,\n filter=Q(**{f\"{field_name}__confirmed_by\": self.request.user})\n | Q(project__collaborative_annotation=True),\n )\n )\n if is_confirmed:\n queryset = queryset.filter(num_confirm__gte=1)\n else:\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n\n def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:\n \"\"\"Filter examples by a given label name.\n\n This performs filtering on all of the following labels at once:\n - categories\n - spans\n - relations\n - bboxes\n - segmentations\n\n Todo: Consider project type to make filtering more efficient.\n\n Args:\n queryset (QuerySet): QuerySet to filter.\n field_name (str): This equals to `label`.\n label (str): The label name to filter.\n\n Returns:\n QuerySet: Filtered examples.\n \"\"\"\n queryset = queryset.filter(\n Q(categories__label__text=label)\n | Q(spans__label__text=label)\n | Q(relations__type__text=label)\n | Q(bboxes__label__text=label)\n | Q(segmentations__label__text=label)\n )\n return queryset\n\n class Meta:\n model = Example\n fields = (\"project\", \"text\", \"created_at\", \"updated_at\", \"label\")\n", "path": "backend/examples/filters.py"}]} | 603 | 463 |
gh_patches_debug_13543 | rasdani/github-patches | git_diff | pre-commit__pre-commit-33 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pre-commit -i does not install the file with +x
No executable = no run :'(
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 import functools
2 import os
3 import os.path
4 import pkg_resources
5 import re
6 from plumbum import local
7
8 from pre_commit.util import memoize_by_cwd
9
10
11 def _get_root_new():
12 path = os.getcwd()
13 while len(path) > 1:
14 if os.path.exists(os.path.join(path, '.git')):
15 return path
16 else:
17 path = os.path.normpath(os.path.join(path, '../'))
18 raise AssertionError('called from outside of the gits')
19
20
21 @memoize_by_cwd
22 def get_root():
23 return _get_root_new()
24
25
26 @memoize_by_cwd
27 def get_pre_commit_path():
28 return os.path.join(get_root(), '.git/hooks/pre-commit')
29
30
31 def create_pre_commit():
32 path = get_pre_commit_path()
33 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
34 local.path(path).write(local.path(pre_commit_file).read())
35
36
37 def remove_pre_commit():
38 local.path(get_pre_commit_path()).delete()
39
40
41 def get_head_sha(git_repo_path):
42 with local.cwd(git_repo_path):
43 return local['git']['rev-parse', 'HEAD']().strip()
44
45
46 @memoize_by_cwd
47 def get_staged_files():
48 return local['git']['diff', '--staged', '--name-only']().splitlines()
49
50
51 @memoize_by_cwd
52 def get_all_files():
53 return local['git']['ls-files']().splitlines()
54
55
56 def get_files_matching(all_file_list_strategy):
57 @functools.wraps(all_file_list_strategy)
58 @memoize_by_cwd
59 def wrapper(expr):
60 regex = re.compile(expr)
61 return set(filter(os.path.exists, (
62 filename
63 for filename in all_file_list_strategy()
64 if regex.search(filename)
65 )))
66 return wrapper
67
68
69 get_staged_files_matching = get_files_matching(get_staged_files)
70 get_all_files_matching = get_files_matching(get_all_files)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -3,6 +3,7 @@
import os.path
import pkg_resources
import re
+import stat
from plumbum import local
from pre_commit.util import memoize_by_cwd
@@ -32,6 +33,8 @@
path = get_pre_commit_path()
pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
local.path(path).write(local.path(pre_commit_file).read())
+ original_mode = os.stat(path).st_mode
+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def remove_pre_commit():
| {"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -3,6 +3,7 @@\n import os.path\n import pkg_resources\n import re\n+import stat\n from plumbum import local\n \n from pre_commit.util import memoize_by_cwd\n@@ -32,6 +33,8 @@\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n+ original_mode = os.stat(path).st_mode\n+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n \n \n def remove_pre_commit():\n", "issue": "pre-commit -i does not install the file with +x\nNo executable = no run :'(\n\n", "before_files": [{"content": "import functools\nimport os\nimport os.path\nimport pkg_resources\nimport re\nfrom plumbum import local\n\nfrom pre_commit.util import memoize_by_cwd\n\n\ndef _get_root_new():\n path = os.getcwd()\n while len(path) > 1:\n if os.path.exists(os.path.join(path, '.git')):\n return path\n else:\n path = os.path.normpath(os.path.join(path, '../'))\n raise AssertionError('called from outside of the gits')\n\n\n@memoize_by_cwd\ndef get_root():\n return _get_root_new()\n\n\n@memoize_by_cwd\ndef get_pre_commit_path():\n return os.path.join(get_root(), '.git/hooks/pre-commit')\n\n\ndef create_pre_commit():\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n\n\ndef remove_pre_commit():\n local.path(get_pre_commit_path()).delete()\n\n\ndef get_head_sha(git_repo_path):\n with local.cwd(git_repo_path):\n return local['git']['rev-parse', 'HEAD']().strip()\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return local['git']['diff', '--staged', '--name-only']().splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return local['git']['ls-files']().splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(expr):\n regex = re.compile(expr)\n return set(filter(os.path.exists, (\n filename\n for filename in all_file_list_strategy()\n if regex.search(filename)\n )))\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\n", "path": "pre_commit/git.py"}], "after_files": [{"content": "import functools\nimport os\nimport os.path\nimport pkg_resources\nimport re\nimport stat\nfrom plumbum import local\n\nfrom pre_commit.util import memoize_by_cwd\n\n\ndef _get_root_new():\n path = os.getcwd()\n while len(path) > 1:\n if os.path.exists(os.path.join(path, '.git')):\n return path\n else:\n path = os.path.normpath(os.path.join(path, '../'))\n raise AssertionError('called from outside of the gits')\n\n\n@memoize_by_cwd\ndef get_root():\n return _get_root_new()\n\n\n@memoize_by_cwd\ndef get_pre_commit_path():\n return os.path.join(get_root(), '.git/hooks/pre-commit')\n\n\ndef create_pre_commit():\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n original_mode = os.stat(path).st_mode\n os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\ndef remove_pre_commit():\n local.path(get_pre_commit_path()).delete()\n\n\ndef get_head_sha(git_repo_path):\n with local.cwd(git_repo_path):\n return local['git']['rev-parse', 'HEAD']().strip()\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return local['git']['diff', '--staged', '--name-only']().splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return local['git']['ls-files']().splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(expr):\n regex = re.compile(expr)\n return set(filter(os.path.exists, (\n filename\n for filename in all_file_list_strategy()\n if regex.search(filename)\n )))\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\n", "path": "pre_commit/git.py"}]} | 833 | 170 |
gh_patches_debug_37725 | rasdani/github-patches | git_diff | pwndbg__pwndbg-291 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typesetting seems to be wrong

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import gdb
9
10 import pwndbg.android
11 import pwndbg.arch
12 import pwndbg.arguments
13 import pwndbg.argv
14 import pwndbg.color
15 import pwndbg.commands
16 import pwndbg.commands.argv
17 import pwndbg.commands.aslr
18 import pwndbg.commands.auxv
19 import pwndbg.commands.checksec
20 import pwndbg.commands.config
21 import pwndbg.commands.context
22 import pwndbg.commands.cpsr
23 import pwndbg.commands.dt
24 import pwndbg.commands.dumpargs
25 import pwndbg.commands.elf
26 import pwndbg.commands.gdbinit
27 import pwndbg.commands.got
28 import pwndbg.commands.heap
29 import pwndbg.commands.hexdump
30 import pwndbg.commands.ida
31 import pwndbg.commands.misc
32 import pwndbg.commands.next
33 import pwndbg.commands.peda
34 import pwndbg.commands.procinfo
35 import pwndbg.commands.radare2
36 import pwndbg.commands.reload
37 import pwndbg.commands.rop
38 import pwndbg.commands.ropper
39 import pwndbg.commands.search
40 import pwndbg.commands.segments
41 import pwndbg.commands.shell
42 import pwndbg.commands.stack
43 import pwndbg.commands.start
44 import pwndbg.commands.telescope
45 import pwndbg.commands.theme
46 import pwndbg.commands.version
47 import pwndbg.commands.vmmap
48 import pwndbg.commands.windbg
49 import pwndbg.commands.xor
50 import pwndbg.constants
51 import pwndbg.disasm
52 import pwndbg.disasm.arm
53 import pwndbg.disasm.jump
54 import pwndbg.disasm.mips
55 import pwndbg.disasm.ppc
56 import pwndbg.disasm.sparc
57 import pwndbg.disasm.x86
58 import pwndbg.dt
59 import pwndbg.elf
60 import pwndbg.exception
61 import pwndbg.heap
62 import pwndbg.inthook
63 import pwndbg.memory
64 import pwndbg.net
65 import pwndbg.proc
66 import pwndbg.prompt
67 import pwndbg.regs
68 import pwndbg.stack
69 import pwndbg.typeinfo
70 import pwndbg.version
71 import pwndbg.vmmap
72 import pwndbg.wrappers
73
74 __version__ = pwndbg.version.__version__
75 version = __version__
76
77 try:
78 import unicorn
79 import pwndbg.emu
80 except:
81 pass
82
83 __all__ = [
84 'arch',
85 'auxv',
86 'chain',
87 'color',
88 'compat',
89 'disasm',
90 'dt',
91 'elf',
92 'enhance',
93 'events',
94 'file',
95 'function',
96 'heap',
97 'hexdump',
98 'ida',
99 'info',
100 'linkmap',
101 'malloc',
102 'memoize',
103 'memory',
104 'proc',
105 'regs',
106 'remote',
107 'search',
108 'stack',
109 'strings',
110 'symbol',
111 'typeinfo',
112 'ui',
113 'vmmap'
114 ]
115
116 prompt = "pwndbg> "
117 prompt = "\x02" + prompt + "\x01" # STX + prompt + SOH
118 prompt = pwndbg.color.red(prompt)
119 prompt = pwndbg.color.bold(prompt)
120 prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
121
122 pre_commands = """
123 set confirm off
124 set verbose off
125 set prompt %s
126 set pagination off
127 set height 0
128 set history expansion on
129 set history save on
130 set follow-fork-mode child
131 set backtrace past-main on
132 set step-mode on
133 set print pretty on
134 set width 0
135 set print elements 15
136 handle SIGALRM nostop print nopass
137 handle SIGBUS stop print nopass
138 handle SIGPIPE nostop print nopass
139 handle SIGSEGV stop print nopass
140 """.strip() % prompt
141
142 for line in pre_commands.strip().splitlines():
143 gdb.execute(line)
144
145 # This may throw an exception, see pwndbg/pwndbg#27
146 try:
147 gdb.execute("set disassembly-flavor intel")
148 except gdb.error:
149 pass
150
```
Path: `pwndbg/ui.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 A few helpers for making things print pretty-like.
5 """
6 from __future__ import absolute_import
7 from __future__ import division
8 from __future__ import print_function
9 from __future__ import unicode_literals
10
11 import fcntl
12 import struct
13 import sys
14 import termios
15
16 import pwndbg.arch
17 import pwndbg.color.context as C
18 import pwndbg.color.theme as theme
19 import pwndbg.config as config
20
21 theme.Parameter('banner-separator', '─', 'repeated banner separator character')
22
23 def banner(title):
24 title = title.upper()
25 try:
26 _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))
27 except:
28 width = 80
29 width -= 2
30 return C.banner(("[{:%s^%ss}]" % (config.banner_separator, width)).format(title))
31
32 def addrsz(address):
33 address = int(address) & pwndbg.arch.ptrmask
34 return "%{}x".format(2*pwndbg.arch.ptrsize) % address
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py
--- a/pwndbg/__init__.py
+++ b/pwndbg/__init__.py
@@ -5,6 +5,8 @@
from __future__ import print_function
from __future__ import unicode_literals
+import signal
+
import gdb
import pwndbg.android
@@ -67,6 +69,7 @@
import pwndbg.regs
import pwndbg.stack
import pwndbg.typeinfo
+import pwndbg.ui
import pwndbg.version
import pwndbg.vmmap
import pwndbg.wrappers
@@ -131,13 +134,13 @@
set backtrace past-main on
set step-mode on
set print pretty on
-set width 0
+set width %i
set print elements 15
handle SIGALRM nostop print nopass
handle SIGBUS stop print nopass
handle SIGPIPE nostop print nopass
handle SIGSEGV stop print nopass
-""".strip() % prompt
+""".strip() % (prompt, pwndbg.ui.get_window_size()[1])
for line in pre_commands.strip().splitlines():
gdb.execute(line)
@@ -147,3 +150,7 @@
gdb.execute("set disassembly-flavor intel")
except gdb.error:
pass
+
+
+# handle resize event to align width and completion
+signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute("set width %i" % pwndbg.ui.get_window_size()[1]))
diff --git a/pwndbg/ui.py b/pwndbg/ui.py
--- a/pwndbg/ui.py
+++ b/pwndbg/ui.py
@@ -9,6 +9,7 @@
from __future__ import unicode_literals
import fcntl
+import os
import struct
import sys
import termios
@@ -22,13 +23,21 @@
def banner(title):
title = title.upper()
- try:
- _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))
- except:
- width = 80
+ _height, width = get_window_size()
width -= 2
return C.banner(("[{:%s^%ss}]" % (config.banner_separator, width)).format(title))
def addrsz(address):
address = int(address) & pwndbg.arch.ptrmask
return "%{}x".format(2*pwndbg.arch.ptrsize) % address
+
+def get_window_size():
+ fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))
+ if not sys.stdin.isatty:
+ return fallback
+ try:
+ # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg
+ rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))
+ except:
+ rows, cols = fallback
+ return rows, cols
| {"golden_diff": "diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py\n--- a/pwndbg/__init__.py\n+++ b/pwndbg/__init__.py\n@@ -5,6 +5,8 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import signal\n+\n import gdb\n \n import pwndbg.android\n@@ -67,6 +69,7 @@\n import pwndbg.regs\n import pwndbg.stack\n import pwndbg.typeinfo\n+import pwndbg.ui\n import pwndbg.version\n import pwndbg.vmmap\n import pwndbg.wrappers\n@@ -131,13 +134,13 @@\n set backtrace past-main on\n set step-mode on\n set print pretty on\n-set width 0\n+set width %i\n set print elements 15\n handle SIGALRM nostop print nopass\n handle SIGBUS stop print nopass\n handle SIGPIPE nostop print nopass\n handle SIGSEGV stop print nopass\n-\"\"\".strip() % prompt\n+\"\"\".strip() % (prompt, pwndbg.ui.get_window_size()[1])\n \n for line in pre_commands.strip().splitlines():\n gdb.execute(line)\n@@ -147,3 +150,7 @@\n gdb.execute(\"set disassembly-flavor intel\")\n except gdb.error:\n pass\n+\n+\n+# handle resize event to align width and completion\n+signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute(\"set width %i\" % pwndbg.ui.get_window_size()[1]))\ndiff --git a/pwndbg/ui.py b/pwndbg/ui.py\n--- a/pwndbg/ui.py\n+++ b/pwndbg/ui.py\n@@ -9,6 +9,7 @@\n from __future__ import unicode_literals\n \n import fcntl\n+import os\n import struct\n import sys\n import termios\n@@ -22,13 +23,21 @@\n \n def banner(title):\n title = title.upper()\n- try:\n- _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n- except:\n- width = 80\n+ _height, width = get_window_size()\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n \n def addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n+\n+def get_window_size():\n+ fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))\n+ if not sys.stdin.isatty:\n+ return fallback\n+ try:\n+ # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg\n+ rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n+ except:\n+ rows, cols = fallback\n+ return rows, cols\n", "issue": "Typesetting seems to be wrong\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.android\nimport pwndbg.arch\nimport pwndbg.arguments\nimport pwndbg.argv\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.commands.argv\nimport pwndbg.commands.aslr\nimport pwndbg.commands.auxv\nimport pwndbg.commands.checksec\nimport pwndbg.commands.config\nimport pwndbg.commands.context\nimport pwndbg.commands.cpsr\nimport pwndbg.commands.dt\nimport pwndbg.commands.dumpargs\nimport pwndbg.commands.elf\nimport pwndbg.commands.gdbinit\nimport pwndbg.commands.got\nimport pwndbg.commands.heap\nimport pwndbg.commands.hexdump\nimport pwndbg.commands.ida\nimport pwndbg.commands.misc\nimport pwndbg.commands.next\nimport pwndbg.commands.peda\nimport pwndbg.commands.procinfo\nimport pwndbg.commands.radare2\nimport pwndbg.commands.reload\nimport pwndbg.commands.rop\nimport pwndbg.commands.ropper\nimport pwndbg.commands.search\nimport pwndbg.commands.segments\nimport pwndbg.commands.shell\nimport pwndbg.commands.stack\nimport pwndbg.commands.start\nimport pwndbg.commands.telescope\nimport pwndbg.commands.theme\nimport pwndbg.commands.version\nimport pwndbg.commands.vmmap\nimport pwndbg.commands.windbg\nimport pwndbg.commands.xor\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.disasm.arm\nimport pwndbg.disasm.jump\nimport pwndbg.disasm.mips\nimport pwndbg.disasm.ppc\nimport pwndbg.disasm.sparc\nimport pwndbg.disasm.x86\nimport pwndbg.dt\nimport pwndbg.elf\nimport pwndbg.exception\nimport pwndbg.heap\nimport pwndbg.inthook\nimport pwndbg.memory\nimport pwndbg.net\nimport pwndbg.proc\nimport pwndbg.prompt\nimport pwndbg.regs\nimport pwndbg.stack\nimport pwndbg.typeinfo\nimport pwndbg.version\nimport pwndbg.vmmap\nimport pwndbg.wrappers\n\n__version__ = pwndbg.version.__version__\nversion = __version__\n\ntry:\n import unicorn\n import pwndbg.emu\nexcept:\n pass\n\n__all__ = [\n'arch',\n'auxv',\n'chain',\n'color',\n'compat',\n'disasm',\n'dt',\n'elf',\n'enhance',\n'events',\n'file',\n'function',\n'heap',\n'hexdump',\n'ida',\n'info',\n'linkmap',\n'malloc',\n'memoize',\n'memory',\n'proc',\n'regs',\n'remote',\n'search',\n'stack',\n'strings',\n'symbol',\n'typeinfo',\n'ui',\n'vmmap'\n]\n\nprompt = \"pwndbg> \"\nprompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\nprompt = pwndbg.color.red(prompt)\nprompt = pwndbg.color.bold(prompt)\nprompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\npre_commands = \"\"\"\nset confirm off\nset verbose off\nset prompt %s\nset pagination off\nset height 0\nset history expansion on\nset history save on\nset follow-fork-mode child\nset backtrace past-main on\nset step-mode on\nset print pretty on\nset width 0\nset print elements 15\nhandle SIGALRM nostop print nopass\nhandle SIGBUS stop print nopass\nhandle SIGPIPE nostop print nopass\nhandle SIGSEGV stop print nopass\n\"\"\".strip() % prompt\n\nfor line in pre_commands.strip().splitlines():\n gdb.execute(line)\n\n# This may throw an exception, see pwndbg/pwndbg#27\ntry:\n gdb.execute(\"set disassembly-flavor intel\")\nexcept gdb.error:\n pass\n", "path": "pwndbg/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nA few helpers for making things print pretty-like.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport fcntl\nimport struct\nimport sys\nimport termios\n\nimport pwndbg.arch\nimport pwndbg.color.context as C\nimport pwndbg.color.theme as theme\nimport pwndbg.config as config\n\ntheme.Parameter('banner-separator', '\u2500', 'repeated banner separator character')\n\ndef banner(title):\n title = title.upper()\n try:\n _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n except:\n width = 80\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n\ndef addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n", "path": "pwndbg/ui.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport signal\n\nimport gdb\n\nimport pwndbg.android\nimport pwndbg.arch\nimport pwndbg.arguments\nimport pwndbg.argv\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.commands.argv\nimport pwndbg.commands.aslr\nimport pwndbg.commands.auxv\nimport pwndbg.commands.checksec\nimport pwndbg.commands.config\nimport pwndbg.commands.context\nimport pwndbg.commands.cpsr\nimport pwndbg.commands.dt\nimport pwndbg.commands.dumpargs\nimport pwndbg.commands.elf\nimport pwndbg.commands.gdbinit\nimport pwndbg.commands.got\nimport pwndbg.commands.heap\nimport pwndbg.commands.hexdump\nimport pwndbg.commands.ida\nimport pwndbg.commands.misc\nimport pwndbg.commands.next\nimport pwndbg.commands.peda\nimport pwndbg.commands.procinfo\nimport pwndbg.commands.radare2\nimport pwndbg.commands.reload\nimport pwndbg.commands.rop\nimport pwndbg.commands.ropper\nimport pwndbg.commands.search\nimport pwndbg.commands.segments\nimport pwndbg.commands.shell\nimport pwndbg.commands.stack\nimport pwndbg.commands.start\nimport pwndbg.commands.telescope\nimport pwndbg.commands.theme\nimport pwndbg.commands.version\nimport pwndbg.commands.vmmap\nimport pwndbg.commands.windbg\nimport pwndbg.commands.xor\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.disasm.arm\nimport pwndbg.disasm.jump\nimport pwndbg.disasm.mips\nimport pwndbg.disasm.ppc\nimport pwndbg.disasm.sparc\nimport pwndbg.disasm.x86\nimport pwndbg.dt\nimport pwndbg.elf\nimport pwndbg.exception\nimport pwndbg.heap\nimport pwndbg.inthook\nimport pwndbg.memory\nimport pwndbg.net\nimport pwndbg.proc\nimport pwndbg.prompt\nimport pwndbg.regs\nimport pwndbg.stack\nimport pwndbg.typeinfo\nimport pwndbg.ui\nimport pwndbg.version\nimport pwndbg.vmmap\nimport pwndbg.wrappers\n\n__version__ = pwndbg.version.__version__\nversion = __version__\n\ntry:\n import unicorn\n import pwndbg.emu\nexcept:\n pass\n\n__all__ = [\n'arch',\n'auxv',\n'chain',\n'color',\n'compat',\n'disasm',\n'dt',\n'elf',\n'enhance',\n'events',\n'file',\n'function',\n'heap',\n'hexdump',\n'ida',\n'info',\n'linkmap',\n'malloc',\n'memoize',\n'memory',\n'proc',\n'regs',\n'remote',\n'search',\n'stack',\n'strings',\n'symbol',\n'typeinfo',\n'ui',\n'vmmap'\n]\n\nprompt = \"pwndbg> \"\nprompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\nprompt = pwndbg.color.red(prompt)\nprompt = pwndbg.color.bold(prompt)\nprompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\npre_commands = \"\"\"\nset confirm off\nset verbose off\nset prompt %s\nset pagination off\nset height 0\nset history expansion on\nset history save on\nset follow-fork-mode child\nset backtrace past-main on\nset step-mode on\nset print pretty on\nset width %i\nset print elements 15\nhandle SIGALRM nostop print nopass\nhandle SIGBUS stop print nopass\nhandle SIGPIPE nostop print nopass\nhandle SIGSEGV stop print nopass\n\"\"\".strip() % (prompt, pwndbg.ui.get_window_size()[1])\n\nfor line in pre_commands.strip().splitlines():\n gdb.execute(line)\n\n# This may throw an exception, see pwndbg/pwndbg#27\ntry:\n gdb.execute(\"set disassembly-flavor intel\")\nexcept gdb.error:\n pass\n\n\n# handle resize event to align width and completion\nsignal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute(\"set width %i\" % pwndbg.ui.get_window_size()[1]))\n", "path": "pwndbg/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nA few helpers for making things print pretty-like.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport fcntl\nimport os\nimport struct\nimport sys\nimport termios\n\nimport pwndbg.arch\nimport pwndbg.color.context as C\nimport pwndbg.color.theme as theme\nimport pwndbg.config as config\n\ntheme.Parameter('banner-separator', '\u2500', 'repeated banner separator character')\n\ndef banner(title):\n title = title.upper()\n _height, width = get_window_size()\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n\ndef addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n\ndef get_window_size():\n fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))\n if not sys.stdin.isatty:\n return fallback\n try:\n # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg\n rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n except:\n rows, cols = fallback\n return rows, cols\n", "path": "pwndbg/ui.py"}]} | 1,904 | 707 |
gh_patches_debug_39662 | rasdani/github-patches | git_diff | jupyterhub__zero-to-jupyterhub-k8s-531 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
culler is failing and exiting when culling users and servers are slow to stop
Two issues:
1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this
2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `images/hub/cull_idle_servers.py`
Content:
```
1 #!/usr/bin/env python3
2 # Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py
3 """script to monitor and cull idle single-user servers
4
5 Caveats:
6
7 last_activity is not updated with high frequency,
8 so cull timeout should be greater than the sum of:
9
10 - single-user websocket ping interval (default: 30s)
11 - JupyterHub.last_activity_interval (default: 5 minutes)
12
13 You can run this as a service managed by JupyterHub with this in your config::
14
15
16 c.JupyterHub.services = [
17 {
18 'name': 'cull-idle',
19 'admin': True,
20 'command': 'python cull_idle_servers.py --timeout=3600'.split(),
21 }
22 ]
23
24 Or run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:
25
26 export JUPYTERHUB_API_TOKEN=`jupyterhub token`
27 python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]
28 """
29
30 import datetime
31 import json
32 import os
33
34 from dateutil.parser import parse as parse_date
35
36 from tornado.gen import coroutine
37 from tornado.log import app_log
38 from tornado.httpclient import AsyncHTTPClient, HTTPRequest
39 from tornado.ioloop import IOLoop, PeriodicCallback
40 from tornado.options import define, options, parse_command_line
41
42
43 @coroutine
44 def cull_idle(url, api_token, timeout, cull_users=False):
45 """Shutdown idle single-user servers
46
47 If cull_users, inactive *users* will be deleted as well.
48 """
49 auth_header = {
50 'Authorization': 'token %s' % api_token
51 }
52 req = HTTPRequest(url=url + '/users',
53 headers=auth_header,
54 )
55 now = datetime.datetime.utcnow()
56 cull_limit = now - datetime.timedelta(seconds=timeout)
57 client = AsyncHTTPClient()
58 resp = yield client.fetch(req)
59 users = json.loads(resp.body.decode('utf8', 'replace'))
60 futures = []
61
62 @coroutine
63 def cull_one(user, last_activity):
64 """cull one user"""
65
66 # shutdown server first. Hub doesn't allow deleting users with running servers.
67 if user['server']:
68 app_log.info("Culling server for %s (inactive since %s)", user['name'], last_activity)
69 req = HTTPRequest(url=url + '/users/%s/server' % user['name'],
70 method='DELETE',
71 headers=auth_header,
72 )
73 yield client.fetch(req)
74 if cull_users:
75 app_log.info("Culling user %s (inactive since %s)", user['name'], last_activity)
76 req = HTTPRequest(url=url + '/users/%s' % user['name'],
77 method='DELETE',
78 headers=auth_header,
79 )
80 yield client.fetch(req)
81
82 for user in users:
83 if not user['server'] and not cull_users:
84 # server not running and not culling users, nothing to do
85 continue
86 last_activity = parse_date(user['last_activity'])
87 if last_activity < cull_limit:
88 futures.append((user['name'], cull_one(user, last_activity)))
89 else:
90 app_log.debug("Not culling %s (active since %s)", user['name'], last_activity)
91
92 for (name, f) in futures:
93 yield f
94 app_log.debug("Finished culling %s", name)
95
96
97 if __name__ == '__main__':
98 define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help="The JupyterHub API URL")
99 define('timeout', default=600, help="The idle timeout (in seconds)")
100 define('cull_every', default=0, help="The interval (in seconds) for checking for idle servers to cull")
101 define('cull_users', default=False,
102 help="""Cull users in addition to servers.
103 This is for use in temporary-user cases such as tmpnb.""",
104 )
105
106 parse_command_line()
107 if not options.cull_every:
108 options.cull_every = options.timeout // 2
109 api_token = os.environ['JUPYTERHUB_API_TOKEN']
110
111 loop = IOLoop.current()
112 cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)
113 # run once before scheduling periodic call
114 loop.run_sync(cull)
115 # schedule periodic cull
116 pc = PeriodicCallback(cull, 1e3 * options.cull_every)
117 pc.start()
118 try:
119 loop.start()
120 except KeyboardInterrupt:
121 pass
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py
--- a/images/hub/cull_idle_servers.py
+++ b/images/hub/cull_idle_servers.py
@@ -70,7 +70,15 @@
method='DELETE',
headers=auth_header,
)
- yield client.fetch(req)
+ resp = yield client.fetch(req)
+ if resp.code == 202:
+ msg = "Server for {} is slow to stop.".format(user['name'])
+ if cull_users:
+ app_log.warning(msg + " Not culling user yet.")
+ # return here so we don't continue to cull the user
+ # which will fail if the server is still trying to shutdown
+ return
+ app_log.warning(msg)
if cull_users:
app_log.info("Culling user %s (inactive since %s)", user['name'], last_activity)
req = HTTPRequest(url=url + '/users/%s' % user['name'],
@@ -83,12 +91,19 @@
if not user['server'] and not cull_users:
# server not running and not culling users, nothing to do
continue
+ if not user['last_activity']:
+ continue
last_activity = parse_date(user['last_activity'])
if last_activity < cull_limit:
+ # user might be in a transition (e.g. starting or stopping)
+ # don't try to cull if this is happening
+ if user['pending']:
+ app_log.warning("Not culling user %s with pending %s", user['name'], user['pending'])
+ continue
futures.append((user['name'], cull_one(user, last_activity)))
else:
app_log.debug("Not culling %s (active since %s)", user['name'], last_activity)
-
+
for (name, f) in futures:
yield f
app_log.debug("Finished culling %s", name)
@@ -102,16 +117,17 @@
help="""Cull users in addition to servers.
This is for use in temporary-user cases such as tmpnb.""",
)
-
+
parse_command_line()
if not options.cull_every:
options.cull_every = options.timeout // 2
api_token = os.environ['JUPYTERHUB_API_TOKEN']
-
+
loop = IOLoop.current()
cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)
- # run once before scheduling periodic call
- loop.run_sync(cull)
+ # schedule first cull immediately
+ # because PeriodicCallback doesn't start until the end of the first interval
+ loop.add_callback(cull)
# schedule periodic cull
pc = PeriodicCallback(cull, 1e3 * options.cull_every)
pc.start()
| {"golden_diff": "diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py\n--- a/images/hub/cull_idle_servers.py\n+++ b/images/hub/cull_idle_servers.py\n@@ -70,7 +70,15 @@\n method='DELETE',\n headers=auth_header,\n )\n- yield client.fetch(req)\n+ resp = yield client.fetch(req)\n+ if resp.code == 202:\n+ msg = \"Server for {} is slow to stop.\".format(user['name'])\n+ if cull_users:\n+ app_log.warning(msg + \" Not culling user yet.\")\n+ # return here so we don't continue to cull the user\n+ # which will fail if the server is still trying to shutdown\n+ return\n+ app_log.warning(msg)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n@@ -83,12 +91,19 @@\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n+ if not user['last_activity']:\n+ continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n+ # user might be in a transition (e.g. starting or stopping)\n+ # don't try to cull if this is happening\n+ if user['pending']:\n+ app_log.warning(\"Not culling user %s with pending %s\", user['name'], user['pending'])\n+ continue\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n- \n+\n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n@@ -102,16 +117,17 @@\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n- \n+\n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n- \n+\n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n- # run once before scheduling periodic call\n- loop.run_sync(cull)\n+ # schedule first cull immediately\n+ # because PeriodicCallback doesn't start until the end of the first interval\n+ loop.add_callback(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n", "issue": "culler is failing and exiting when culling users and servers are slow to stop\nTwo issues:\r\n\r\n1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this\r\n2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py\n\"\"\"script to monitor and cull idle single-user servers\n\nCaveats:\n\nlast_activity is not updated with high frequency,\nso cull timeout should be greater than the sum of:\n\n- single-user websocket ping interval (default: 30s)\n- JupyterHub.last_activity_interval (default: 5 minutes)\n\nYou can run this as a service managed by JupyterHub with this in your config::\n\n\n c.JupyterHub.services = [\n {\n 'name': 'cull-idle',\n 'admin': True,\n 'command': 'python cull_idle_servers.py --timeout=3600'.split(),\n }\n ]\n\nOr run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:\n\n export JUPYTERHUB_API_TOKEN=`jupyterhub token`\n python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]\n\"\"\"\n\nimport datetime\nimport json\nimport os\n\nfrom dateutil.parser import parse as parse_date\n\nfrom tornado.gen import coroutine\nfrom tornado.log import app_log\nfrom tornado.httpclient import AsyncHTTPClient, HTTPRequest\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.options import define, options, parse_command_line\n\n\n@coroutine\ndef cull_idle(url, api_token, timeout, cull_users=False):\n \"\"\"Shutdown idle single-user servers\n\n If cull_users, inactive *users* will be deleted as well.\n \"\"\"\n auth_header = {\n 'Authorization': 'token %s' % api_token\n }\n req = HTTPRequest(url=url + '/users',\n headers=auth_header,\n )\n now = datetime.datetime.utcnow()\n cull_limit = now - datetime.timedelta(seconds=timeout)\n client = AsyncHTTPClient()\n resp = yield client.fetch(req)\n users = json.loads(resp.body.decode('utf8', 'replace'))\n futures = []\n\n @coroutine\n def cull_one(user, last_activity):\n \"\"\"cull one user\"\"\"\n\n # shutdown server first. Hub doesn't allow deleting users with running servers.\n if user['server']:\n app_log.info(\"Culling server for %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s/server' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n yield client.fetch(req)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n yield client.fetch(req)\n\n for user in users:\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n \n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n\n\nif __name__ == '__main__':\n define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help=\"The JupyterHub API URL\")\n define('timeout', default=600, help=\"The idle timeout (in seconds)\")\n define('cull_every', default=0, help=\"The interval (in seconds) for checking for idle servers to cull\")\n define('cull_users', default=False,\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n \n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n \n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n # run once before scheduling periodic call\n loop.run_sync(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n try:\n loop.start()\n except KeyboardInterrupt:\n pass\n", "path": "images/hub/cull_idle_servers.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py\n\"\"\"script to monitor and cull idle single-user servers\n\nCaveats:\n\nlast_activity is not updated with high frequency,\nso cull timeout should be greater than the sum of:\n\n- single-user websocket ping interval (default: 30s)\n- JupyterHub.last_activity_interval (default: 5 minutes)\n\nYou can run this as a service managed by JupyterHub with this in your config::\n\n\n c.JupyterHub.services = [\n {\n 'name': 'cull-idle',\n 'admin': True,\n 'command': 'python cull_idle_servers.py --timeout=3600'.split(),\n }\n ]\n\nOr run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:\n\n export JUPYTERHUB_API_TOKEN=`jupyterhub token`\n python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]\n\"\"\"\n\nimport datetime\nimport json\nimport os\n\nfrom dateutil.parser import parse as parse_date\n\nfrom tornado.gen import coroutine\nfrom tornado.log import app_log\nfrom tornado.httpclient import AsyncHTTPClient, HTTPRequest\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.options import define, options, parse_command_line\n\n\n@coroutine\ndef cull_idle(url, api_token, timeout, cull_users=False):\n \"\"\"Shutdown idle single-user servers\n\n If cull_users, inactive *users* will be deleted as well.\n \"\"\"\n auth_header = {\n 'Authorization': 'token %s' % api_token\n }\n req = HTTPRequest(url=url + '/users',\n headers=auth_header,\n )\n now = datetime.datetime.utcnow()\n cull_limit = now - datetime.timedelta(seconds=timeout)\n client = AsyncHTTPClient()\n resp = yield client.fetch(req)\n users = json.loads(resp.body.decode('utf8', 'replace'))\n futures = []\n\n @coroutine\n def cull_one(user, last_activity):\n \"\"\"cull one user\"\"\"\n\n # shutdown server first. Hub doesn't allow deleting users with running servers.\n if user['server']:\n app_log.info(\"Culling server for %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s/server' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n resp = yield client.fetch(req)\n if resp.code == 202:\n msg = \"Server for {} is slow to stop.\".format(user['name'])\n if cull_users:\n app_log.warning(msg + \" Not culling user yet.\")\n # return here so we don't continue to cull the user\n # which will fail if the server is still trying to shutdown\n return\n app_log.warning(msg)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n yield client.fetch(req)\n\n for user in users:\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n if not user['last_activity']:\n continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n # user might be in a transition (e.g. starting or stopping)\n # don't try to cull if this is happening\n if user['pending']:\n app_log.warning(\"Not culling user %s with pending %s\", user['name'], user['pending'])\n continue\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n\n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n\n\nif __name__ == '__main__':\n define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help=\"The JupyterHub API URL\")\n define('timeout', default=600, help=\"The idle timeout (in seconds)\")\n define('cull_every', default=0, help=\"The interval (in seconds) for checking for idle servers to cull\")\n define('cull_users', default=False,\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n\n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n\n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n # schedule first cull immediately\n # because PeriodicCallback doesn't start until the end of the first interval\n loop.add_callback(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n try:\n loop.start()\n except KeyboardInterrupt:\n pass\n", "path": "images/hub/cull_idle_servers.py"}]} | 1,716 | 647 |
gh_patches_debug_13209 | rasdani/github-patches | git_diff | cal-itp__benefits-38 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure cookies are enabled
Need a basic client-side check that cookies are enabled (and UI if not) as soon as the app loads, since we require cookies to store the temporary transaction data.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/settings.py`
Content:
```
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8
9 # SECURITY WARNING: keep the secret key used in production secret!
10 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
11
12 # SECURITY WARNING: don't run with debug turned on in production!
13 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
14
15 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
16
17 ALLOWED_HOSTS = []
18
19 if DEBUG:
20 ALLOWED_HOSTS.extend(["*"])
21 else:
22 hosts = os.environ["DJANGO_ALLOWED_HOSTS"].split()
23 ALLOWED_HOSTS.extend(hosts)
24
25 # Application definition
26
27 INSTALLED_APPS = [
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 ]
34
35 if ADMIN:
36 INSTALLED_APPS.extend(
37 [
38 "django.contrib.admin",
39 "django.contrib.auth",
40 "django.contrib.contenttypes",
41 "django.contrib.messages",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.middleware.locale.LocaleMiddleware",
49 "django.middleware.common.CommonMiddleware",
50 "django.middleware.csrf.CsrfViewMiddleware",
51 "django.middleware.clickjacking.XFrameOptionsMiddleware",
52 "benefits.core.middleware.DebugSession",
53 "benefits.core.middleware.ChangedLanguageEvent",
54 ]
55
56 if ADMIN:
57 MIDDLEWARE.extend(
58 [
59 "django.contrib.auth.middleware.AuthenticationMiddleware",
60 "django.contrib.messages.middleware.MessageMiddleware",
61 ]
62 )
63
64 CSRF_COOKIE_HTTPONLY = True
65
66 SESSION_COOKIE_AGE = 3600
67 SESSION_COOKIE_SAMESITE = "Strict"
68 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
69
70 if not DEBUG:
71 CSRF_COOKIE_SECURE = True
72 SESSION_COOKIE_SECURE = True
73
74 ROOT_URLCONF = "benefits.urls"
75
76 template_ctx_processors = [
77 "django.template.context_processors.request",
78 "benefits.core.context_processors.analytics",
79 ]
80
81 if DEBUG:
82 template_ctx_processors.extend(
83 [
84 "django.template.context_processors.debug",
85 "benefits.core.context_processors.debug",
86 ]
87 )
88
89 if ADMIN:
90 template_ctx_processors.extend(
91 [
92 "django.contrib.auth.context_processors.auth",
93 "django.contrib.messages.context_processors.messages",
94 ]
95 )
96
97 TEMPLATES = [
98 {
99 "BACKEND": "django.template.backends.django.DjangoTemplates",
100 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
101 "APP_DIRS": True,
102 "OPTIONS": {
103 "context_processors": template_ctx_processors,
104 },
105 },
106 ]
107
108 WSGI_APPLICATION = "benefits.wsgi.application"
109
110 DATABASES = {
111 "default": {
112 "ENGINE": "django.db.backends.sqlite3",
113 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
114 }
115 }
116
117 # Password validation
118
119 AUTH_PASSWORD_VALIDATORS = []
120
121 if ADMIN:
122 AUTH_PASSWORD_VALIDATORS.extend(
123 [
124 {
125 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
126 },
127 {
128 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
129 },
130 {
131 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
132 },
133 {
134 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
135 },
136 ]
137 )
138
139 # Internationalization
140
141 LANGUAGE_CODE = "en"
142
143 LANGUAGES = [("en", "English"), ("es", "Español")]
144
145 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
146
147 USE_I18N = True
148 USE_L10N = True
149
150 TIME_ZONE = "UTC"
151 USE_TZ = True
152
153 # Static files (CSS, JavaScript, Images)
154
155 STATIC_URL = "/static/"
156 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
157 STATIC_ROOT = os.path.join(BASE_DIR, "static")
158
159 # Logging configuration
160
161 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
162 LOGGING = {
163 "version": 1,
164 "disable_existing_loggers": False,
165 "formatters": {
166 "default": {
167 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
168 "datefmt": "%d/%b/%Y %H:%M:%S",
169 "style": "{",
170 },
171 },
172 "handlers": {
173 "default": {"class": "logging.StreamHandler", "formatter": "default"},
174 },
175 "root": {
176 "handlers": ["default"],
177 "level": LOG_LEVEL,
178 },
179 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
180 }
181
182 # Analytics configuration
183
184 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
185
```
Path: `benefits/urls.py`
Content:
```
1 """
2 benefits URL Configuration
3
4 The `urlpatterns` list routes URLs to views. For more information please see:
5 https://docs.djangoproject.com/en/3.1/topics/http/urls/
6 """
7 import logging
8
9 from django.urls import include, path
10
11 from benefits.settings import ADMIN
12
13
14 logger = logging.getLogger(__name__)
15
16 handler400 = "benefits.core.views.bad_request"
17 handler403 = "benefits.core.views.bad_request"
18 handler404 = "benefits.core.views.page_not_found"
19 handler500 = "benefits.core.views.server_error"
20
21 urlpatterns = [
22 path("", include("benefits.core.urls")),
23 path("enrollment/", include("benefits.enrollment.urls")),
24 path("eligibility/", include("benefits.eligibility.urls")),
25 path("i18n/", include("django.conf.urls.i18n")),
26 ]
27
28 if ADMIN:
29 from django.contrib import admin
30
31 logger.debug("Register admin/ urls")
32 urlpatterns.append(path("admin/", admin.site.urls))
33 else:
34 logger.debug("Skip url registrations for admin")
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -69,6 +69,7 @@
if not DEBUG:
CSRF_COOKIE_SECURE = True
+ CSRF_FAILURE_VIEW = "benefits.core.views.bad_request"
SESSION_COOKIE_SECURE = True
ROOT_URLCONF = "benefits.urls"
diff --git a/benefits/urls.py b/benefits/urls.py
--- a/benefits/urls.py
+++ b/benefits/urls.py
@@ -20,8 +20,8 @@
urlpatterns = [
path("", include("benefits.core.urls")),
- path("enrollment/", include("benefits.enrollment.urls")),
path("eligibility/", include("benefits.eligibility.urls")),
+ path("enrollment/", include("benefits.enrollment.urls")),
path("i18n/", include("django.conf.urls.i18n")),
]
| {"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -69,6 +69,7 @@\n \n if not DEBUG:\n CSRF_COOKIE_SECURE = True\n+ CSRF_FAILURE_VIEW = \"benefits.core.views.bad_request\"\n SESSION_COOKIE_SECURE = True\n \n ROOT_URLCONF = \"benefits.urls\"\ndiff --git a/benefits/urls.py b/benefits/urls.py\n--- a/benefits/urls.py\n+++ b/benefits/urls.py\n@@ -20,8 +20,8 @@\n \n urlpatterns = [\n path(\"\", include(\"benefits.core.urls\")),\n- path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"eligibility/\", include(\"benefits.eligibility.urls\")),\n+ path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n ]\n", "issue": "Ensure cookies are enabled\nNeed a basic client-side check that cookies are enabled (and UI if not) as soon as the app loads, since we require cookies to store the temporary transaction data.\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split()\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}, {"content": "\"\"\"\nbenefits URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\n\"\"\"\nimport logging\n\nfrom django.urls import include, path\n\nfrom benefits.settings import ADMIN\n\n\nlogger = logging.getLogger(__name__)\n\nhandler400 = \"benefits.core.views.bad_request\"\nhandler403 = \"benefits.core.views.bad_request\"\nhandler404 = \"benefits.core.views.page_not_found\"\nhandler500 = \"benefits.core.views.server_error\"\n\nurlpatterns = [\n path(\"\", include(\"benefits.core.urls\")),\n path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"eligibility/\", include(\"benefits.eligibility.urls\")),\n path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n]\n\nif ADMIN:\n from django.contrib import admin\n\n logger.debug(\"Register admin/ urls\")\n urlpatterns.append(path(\"admin/\", admin.site.urls))\nelse:\n logger.debug(\"Skip url registrations for admin\")\n", "path": "benefits/urls.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split()\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.bad_request\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}, {"content": "\"\"\"\nbenefits URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\n\"\"\"\nimport logging\n\nfrom django.urls import include, path\n\nfrom benefits.settings import ADMIN\n\n\nlogger = logging.getLogger(__name__)\n\nhandler400 = \"benefits.core.views.bad_request\"\nhandler403 = \"benefits.core.views.bad_request\"\nhandler404 = \"benefits.core.views.page_not_found\"\nhandler500 = \"benefits.core.views.server_error\"\n\nurlpatterns = [\n path(\"\", include(\"benefits.core.urls\")),\n path(\"eligibility/\", include(\"benefits.eligibility.urls\")),\n path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n]\n\nif ADMIN:\n from django.contrib import admin\n\n logger.debug(\"Register admin/ urls\")\n urlpatterns.append(path(\"admin/\", admin.site.urls))\nelse:\n logger.debug(\"Skip url registrations for admin\")\n", "path": "benefits/urls.py"}]} | 2,132 | 212 |
gh_patches_debug_19230 | rasdani/github-patches | git_diff | google__clusterfuzz-863 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fuzzers page does not work well with large number of jobs
For libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated.
@oliverchang as fyi.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/fuzzing/fuzzer_selection.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Helper functions to update fuzzer-job mappings, and select fuzzers to run."""
15
16 import collections
17
18 from base import utils
19 from datastore import data_types
20 from datastore import fuzz_target_utils
21 from datastore import ndb
22 from datastore import ndb_utils
23 from metrics import logs
24 from system import environment
25
26 # Used to prepare targets to be passed to utils.random_weighted_choice.
27 WeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])
28
29
30 def update_mappings_for_fuzzer(fuzzer, mappings=None):
31 """Clear existing mappings for a fuzzer, and replace them."""
32 if mappings is None:
33 mappings = fuzzer.jobs
34
35 query = data_types.FuzzerJob.query()
36 query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)
37 entities = ndb_utils.get_all_from_query(query)
38 old_mappings = {}
39 for entity in entities:
40 old_mappings[(entity.job, entity.platform)] = entity
41
42 new_mappings = []
43 for job_name in mappings:
44 job = data_types.Job.query(data_types.Job.name == job_name).get()
45 if not job:
46 logs.log_error('An unknown job %s was selected for fuzzer %s.' %
47 (job_name, fuzzer.name))
48 continue
49
50 mapping = old_mappings.pop((job_name, job.platform), None)
51 if mapping:
52 continue
53
54 mapping = data_types.FuzzerJob()
55 mapping.fuzzer = fuzzer.name
56 mapping.job = job_name
57 mapping.platform = job.platform
58 new_mappings.append(mapping)
59
60 ndb.put_multi(new_mappings)
61 ndb.delete_multi([m.key for m in list(old_mappings.values())])
62
63
64 def update_platform_for_job(job_name, new_platform):
65 """Update platform for all mappings for a particular job."""
66 query = data_types.FuzzerJob.query()
67 query = query.filter(data_types.FuzzerJob.job == job_name)
68 mappings = ndb_utils.get_all_from_query(query)
69 new_mappings = []
70 for mapping in mappings:
71 mapping.platform = new_platform
72 new_mappings.append(mapping)
73 ndb.put_multi(new_mappings)
74
75
76 def get_fuzz_task_payload(platform=None):
77 """Select a fuzzer that can run on this platform."""
78 if not platform:
79 queue_override = environment.get_value('QUEUE_OVERRIDE')
80 platform = queue_override if queue_override else environment.platform()
81
82 query = data_types.FuzzerJob.query()
83 query = query.filter(data_types.FuzzerJob.platform == platform)
84
85 mappings = list(ndb_utils.get_all_from_query(query))
86 if not mappings:
87 return None, None
88
89 selection = utils.random_weighted_choice(mappings)
90 return selection.fuzzer, selection.job
91
92
93 def select_fuzz_target(targets, target_weights):
94 """Select a fuzz target from a list of potential targets."""
95 assert targets
96
97 weighted_targets = []
98 for target in targets:
99 weight = target_weights.get(target, 1.0)
100 weighted_targets.append(WeightedTarget(target, weight))
101
102 return utils.random_weighted_choice(weighted_targets).target
103
104
105 def get_fuzz_target_weights():
106 """Get a list of fuzz target weights based on the current fuzzer."""
107 job_type = environment.get_value('JOB_NAME')
108
109 target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))
110 fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)
111
112 weights = {}
113 for fuzz_target, target_job in zip(fuzz_targets, target_jobs):
114 if not fuzz_target:
115 logs.log_error('Skipping weight assignment for fuzz target %s.' %
116 target_job.fuzz_target_name)
117 continue
118
119 weights[fuzz_target.binary] = target_job.weight
120
121 return weights
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py
--- a/src/python/fuzzing/fuzzer_selection.py
+++ b/src/python/fuzzing/fuzzer_selection.py
@@ -37,20 +37,20 @@
entities = ndb_utils.get_all_from_query(query)
old_mappings = {}
for entity in entities:
- old_mappings[(entity.job, entity.platform)] = entity
+ old_mappings[entity.job] = entity
new_mappings = []
for job_name in mappings:
+ mapping = old_mappings.pop(job_name, None)
+ if mapping:
+ continue
+
job = data_types.Job.query(data_types.Job.name == job_name).get()
if not job:
logs.log_error('An unknown job %s was selected for fuzzer %s.' %
(job_name, fuzzer.name))
continue
- mapping = old_mappings.pop((job_name, job.platform), None)
- if mapping:
- continue
-
mapping = data_types.FuzzerJob()
mapping.fuzzer = fuzzer.name
mapping.job = job_name
| {"golden_diff": "diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py\n--- a/src/python/fuzzing/fuzzer_selection.py\n+++ b/src/python/fuzzing/fuzzer_selection.py\n@@ -37,20 +37,20 @@\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n- old_mappings[(entity.job, entity.platform)] = entity\n+ old_mappings[entity.job] = entity\n \n new_mappings = []\n for job_name in mappings:\n+ mapping = old_mappings.pop(job_name, None)\n+ if mapping:\n+ continue\n+\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n \n- mapping = old_mappings.pop((job_name, job.platform), None)\n- if mapping:\n- continue\n-\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n", "issue": "Fuzzers page does not work well with large number of jobs\nFor libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated.\r\n\r\n@oliverchang as fyi.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Helper functions to update fuzzer-job mappings, and select fuzzers to run.\"\"\"\n\nimport collections\n\nfrom base import utils\nfrom datastore import data_types\nfrom datastore import fuzz_target_utils\nfrom datastore import ndb\nfrom datastore import ndb_utils\nfrom metrics import logs\nfrom system import environment\n\n# Used to prepare targets to be passed to utils.random_weighted_choice.\nWeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])\n\n\ndef update_mappings_for_fuzzer(fuzzer, mappings=None):\n \"\"\"Clear existing mappings for a fuzzer, and replace them.\"\"\"\n if mappings is None:\n mappings = fuzzer.jobs\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n old_mappings[(entity.job, entity.platform)] = entity\n\n new_mappings = []\n for job_name in mappings:\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n\n mapping = old_mappings.pop((job_name, job.platform), None)\n if mapping:\n continue\n\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n mapping.platform = job.platform\n new_mappings.append(mapping)\n\n ndb.put_multi(new_mappings)\n ndb.delete_multi([m.key for m in list(old_mappings.values())])\n\n\ndef update_platform_for_job(job_name, new_platform):\n \"\"\"Update platform for all mappings for a particular job.\"\"\"\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.job == job_name)\n mappings = ndb_utils.get_all_from_query(query)\n new_mappings = []\n for mapping in mappings:\n mapping.platform = new_platform\n new_mappings.append(mapping)\n ndb.put_multi(new_mappings)\n\n\ndef get_fuzz_task_payload(platform=None):\n \"\"\"Select a fuzzer that can run on this platform.\"\"\"\n if not platform:\n queue_override = environment.get_value('QUEUE_OVERRIDE')\n platform = queue_override if queue_override else environment.platform()\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.platform == platform)\n\n mappings = list(ndb_utils.get_all_from_query(query))\n if not mappings:\n return None, None\n\n selection = utils.random_weighted_choice(mappings)\n return selection.fuzzer, selection.job\n\n\ndef select_fuzz_target(targets, target_weights):\n \"\"\"Select a fuzz target from a list of potential targets.\"\"\"\n assert targets\n\n weighted_targets = []\n for target in targets:\n weight = target_weights.get(target, 1.0)\n weighted_targets.append(WeightedTarget(target, weight))\n\n return utils.random_weighted_choice(weighted_targets).target\n\n\ndef get_fuzz_target_weights():\n \"\"\"Get a list of fuzz target weights based on the current fuzzer.\"\"\"\n job_type = environment.get_value('JOB_NAME')\n\n target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))\n fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)\n\n weights = {}\n for fuzz_target, target_job in zip(fuzz_targets, target_jobs):\n if not fuzz_target:\n logs.log_error('Skipping weight assignment for fuzz target %s.' %\n target_job.fuzz_target_name)\n continue\n\n weights[fuzz_target.binary] = target_job.weight\n\n return weights\n", "path": "src/python/fuzzing/fuzzer_selection.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Helper functions to update fuzzer-job mappings, and select fuzzers to run.\"\"\"\n\nimport collections\n\nfrom base import utils\nfrom datastore import data_types\nfrom datastore import fuzz_target_utils\nfrom datastore import ndb\nfrom datastore import ndb_utils\nfrom metrics import logs\nfrom system import environment\n\n# Used to prepare targets to be passed to utils.random_weighted_choice.\nWeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])\n\n\ndef update_mappings_for_fuzzer(fuzzer, mappings=None):\n \"\"\"Clear existing mappings for a fuzzer, and replace them.\"\"\"\n if mappings is None:\n mappings = fuzzer.jobs\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n old_mappings[entity.job] = entity\n\n new_mappings = []\n for job_name in mappings:\n mapping = old_mappings.pop(job_name, None)\n if mapping:\n continue\n\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n mapping.platform = job.platform\n new_mappings.append(mapping)\n\n ndb.put_multi(new_mappings)\n ndb.delete_multi([m.key for m in list(old_mappings.values())])\n\n\ndef update_platform_for_job(job_name, new_platform):\n \"\"\"Update platform for all mappings for a particular job.\"\"\"\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.job == job_name)\n mappings = ndb_utils.get_all_from_query(query)\n new_mappings = []\n for mapping in mappings:\n mapping.platform = new_platform\n new_mappings.append(mapping)\n ndb.put_multi(new_mappings)\n\n\ndef get_fuzz_task_payload(platform=None):\n \"\"\"Select a fuzzer that can run on this platform.\"\"\"\n if not platform:\n queue_override = environment.get_value('QUEUE_OVERRIDE')\n platform = queue_override if queue_override else environment.platform()\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.platform == platform)\n\n mappings = list(ndb_utils.get_all_from_query(query))\n if not mappings:\n return None, None\n\n selection = utils.random_weighted_choice(mappings)\n return selection.fuzzer, selection.job\n\n\ndef select_fuzz_target(targets, target_weights):\n \"\"\"Select a fuzz target from a list of potential targets.\"\"\"\n assert targets\n\n weighted_targets = []\n for target in targets:\n weight = target_weights.get(target, 1.0)\n weighted_targets.append(WeightedTarget(target, weight))\n\n return utils.random_weighted_choice(weighted_targets).target\n\n\ndef get_fuzz_target_weights():\n \"\"\"Get a list of fuzz target weights based on the current fuzzer.\"\"\"\n job_type = environment.get_value('JOB_NAME')\n\n target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))\n fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)\n\n weights = {}\n for fuzz_target, target_job in zip(fuzz_targets, target_jobs):\n if not fuzz_target:\n logs.log_error('Skipping weight assignment for fuzz target %s.' %\n target_job.fuzz_target_name)\n continue\n\n weights[fuzz_target.binary] = target_job.weight\n\n return weights\n", "path": "src/python/fuzzing/fuzzer_selection.py"}]} | 1,537 | 248 |
gh_patches_debug_20320 | rasdani/github-patches | git_diff | praw-dev__praw-1104 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replying to comments in quarantined subreddits returns an empty object
I have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error.
```
praw/models/reddit/mixins/replyable.py", line 26, in reply
return self._reddit.post(API_PATH["comment"], data=data)[0]
IndexError: list index out of range
```
The reply call succeeds, returning a valid http code and the following json
```
{'json': {'errors': [], 'data': {'things': []}}}
```
`objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created.
I'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `praw/models/reddit/mixins/replyable.py`
Content:
```
1 """Provide the ReplyableMixin class."""
2 from ....const import API_PATH
3
4
5 class ReplyableMixin:
6 """Interface for RedditBase classes that can be replied to."""
7
8 def reply(self, body):
9 """Reply to the object.
10
11 :param body: The markdown formatted content for a comment.
12 :returns: A :class:`~.Comment` object for the newly created comment.
13
14 Example usage:
15
16 .. code:: python
17
18 submission = reddit.submission(id='5or86n')
19 submission.reply('reply')
20
21 comment = reddit.comment(id='dxolpyc')
22 comment.reply('reply')
23
24 """
25 data = {"text": body, "thing_id": self.fullname}
26 return self._reddit.post(API_PATH["comment"], data=data)[0]
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py
--- a/praw/models/reddit/mixins/replyable.py
+++ b/praw/models/reddit/mixins/replyable.py
@@ -9,7 +9,14 @@
"""Reply to the object.
:param body: The markdown formatted content for a comment.
- :returns: A :class:`~.Comment` object for the newly created comment.
+ :returns: A :class:`~.Comment` object for the newly created
+ comment or ``None`` if Reddit doesn't provide one.
+
+ A ``None`` value can be returned if the target is a comment or
+ submission in a quarantined subreddit and the authenticated user
+ has not opt-ed in to viewing the content. When this happens the
+ comment will be sucessfully created on Reddit and can be retried
+ by drawing the comment from the user's comment history.
Example usage:
@@ -23,4 +30,8 @@
"""
data = {"text": body, "thing_id": self.fullname}
- return self._reddit.post(API_PATH["comment"], data=data)[0]
+ comments = self._reddit.post(API_PATH["comment"], data=data)
+ try:
+ return comments[0]
+ except IndexError:
+ return None
| {"golden_diff": "diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py\n--- a/praw/models/reddit/mixins/replyable.py\n+++ b/praw/models/reddit/mixins/replyable.py\n@@ -9,7 +9,14 @@\n \"\"\"Reply to the object.\n \n :param body: The markdown formatted content for a comment.\n- :returns: A :class:`~.Comment` object for the newly created comment.\n+ :returns: A :class:`~.Comment` object for the newly created\n+ comment or ``None`` if Reddit doesn't provide one.\n+\n+ A ``None`` value can be returned if the target is a comment or\n+ submission in a quarantined subreddit and the authenticated user\n+ has not opt-ed in to viewing the content. When this happens the\n+ comment will be sucessfully created on Reddit and can be retried\n+ by drawing the comment from the user's comment history.\n \n Example usage:\n \n@@ -23,4 +30,8 @@\n \n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n- return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n+ comments = self._reddit.post(API_PATH[\"comment\"], data=data)\n+ try:\n+ return comments[0]\n+ except IndexError:\n+ return None\n", "issue": "Replying to comments in quarantined subreddits returns an empty object\nI have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error.\r\n\r\n```\r\npraw/models/reddit/mixins/replyable.py\", line 26, in reply\r\n return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\r\nIndexError: list index out of range\r\n```\r\nThe reply call succeeds, returning a valid http code and the following json\r\n```\r\n{'json': {'errors': [], 'data': {'things': []}}}\r\n```\r\n`objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created.\r\n\r\nI'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas.\n", "before_files": [{"content": "\"\"\"Provide the ReplyableMixin class.\"\"\"\nfrom ....const import API_PATH\n\n\nclass ReplyableMixin:\n \"\"\"Interface for RedditBase classes that can be replied to.\"\"\"\n\n def reply(self, body):\n \"\"\"Reply to the object.\n\n :param body: The markdown formatted content for a comment.\n :returns: A :class:`~.Comment` object for the newly created comment.\n\n Example usage:\n\n .. code:: python\n\n submission = reddit.submission(id='5or86n')\n submission.reply('reply')\n\n comment = reddit.comment(id='dxolpyc')\n comment.reply('reply')\n\n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n", "path": "praw/models/reddit/mixins/replyable.py"}], "after_files": [{"content": "\"\"\"Provide the ReplyableMixin class.\"\"\"\nfrom ....const import API_PATH\n\n\nclass ReplyableMixin:\n \"\"\"Interface for RedditBase classes that can be replied to.\"\"\"\n\n def reply(self, body):\n \"\"\"Reply to the object.\n\n :param body: The markdown formatted content for a comment.\n :returns: A :class:`~.Comment` object for the newly created\n comment or ``None`` if Reddit doesn't provide one.\n\n A ``None`` value can be returned if the target is a comment or\n submission in a quarantined subreddit and the authenticated user\n has not opt-ed in to viewing the content. When this happens the\n comment will be sucessfully created on Reddit and can be retried\n by drawing the comment from the user's comment history.\n\n Example usage:\n\n .. code:: python\n\n submission = reddit.submission(id='5or86n')\n submission.reply('reply')\n\n comment = reddit.comment(id='dxolpyc')\n comment.reply('reply')\n\n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n comments = self._reddit.post(API_PATH[\"comment\"], data=data)\n try:\n return comments[0]\n except IndexError:\n return None\n", "path": "praw/models/reddit/mixins/replyable.py"}]} | 694 | 313 |
gh_patches_debug_25945 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-721 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: Checkpointing Test Failed with PyTorch 1.9
### 🐛 Describe the bug
When running unit testing for model checkpointing, the following exception occurs.
<img width="1370" alt="Screenshot 2022-04-11 at 5 10 48 PM" src="https://user-images.githubusercontent.com/31818963/162704185-ad267034-2f49-4d18-82a1-70a08c59bcd6.png">
This is because that the `_rank_not_in_group` API is not exposed at the `torch.distributed` level with PyTorch 1.9.
### Environment
CUDA: 11.1
PyTorch 1.9.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/communication/collective.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- encoding: utf-8 -*-
3
4 import torch
5 import torch.distributed as dist
6 from torch.distributed import ReduceOp
7 from torch import Tensor
8
9 from colossalai.context import ParallelMode
10 from colossalai.core import global_context as gpc
11
12
13 def all_gather(tensor: Tensor,
14 dim: int,
15 parallel_mode: ParallelMode,
16 on_cpu: bool = False,
17 async_op: bool = False) -> Tensor:
18 r"""Gathers all tensors from the parallel group and concatenates them in a
19 specific dimension.
20
21 Note:
22 The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
23 in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
24
25 Args:
26 tensor (:class:`torch.Tensor`): Tensor to be gathered.
27 dim (int): The dimension concatenating in.
28 parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.
29 on_cpu (bool, optional): Whether to communicate with Gloo backend.
30 async_op (bool, optional): Whether operations are asynchronous.
31
32 Returns:
33 Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-together only,
34 if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.
35 """
36 depth = gpc.get_world_size(parallel_mode)
37 if depth == 1:
38 out = tensor
39 work = None
40 else:
41 shape = list(tensor.shape)
42 shape[0], shape[dim] = shape[dim], shape[0]
43 shape[0] *= depth
44 out = torch.empty(shape, dtype=tensor.dtype, device=tensor.device)
45 temp = list(torch.chunk(out, depth, dim=0))
46 group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)
47 work = dist.all_gather(tensor_list=temp,
48 tensor=tensor.transpose(0, dim).contiguous(),
49 group=group,
50 async_op=async_op)
51 out = torch.transpose(out, 0, dim)
52 if async_op:
53 return out, work
54 else:
55 return out
56
57
58 def reduce_scatter(tensor: Tensor,
59 dim: int,
60 parallel_mode: ParallelMode,
61 op: ReduceOp = ReduceOp.SUM,
62 on_cpu: bool = False,
63 async_op: bool = False) -> Tensor:
64 r"""Reduces all tensors then scatters it in a specific dimension to all
65 members in the parallel group.
66
67 Note:
68 The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
69 in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
70
71 Args:
72 tensor (:class:`torch.Tensor`): Tensor to be reduce_scattered.
73 dim (int): The dimension concatenating in.
74 parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.
75 op (torch.distributed.ReduceOp, optional): The type of reduce operation,
76 should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR].
77 More details about ReduceOp please refer to
78 `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_.
79 on_cpu (bool, optional): Whether to communicate with Gloo backend.
80 async_op (bool, optional): Whether operations are asynchronous.
81
82 Returns:
83 Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce_scatter only,
84 if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.
85 """
86 depth = gpc.get_world_size(parallel_mode)
87 if depth == 1:
88 out = tensor
89 work = None
90 else:
91 temp = list(map(lambda x: x.contiguous(), torch.chunk(tensor, depth, dim=dim)))
92 out = torch.empty(temp[0].shape, dtype=tensor.dtype, device=tensor.device)
93 group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)
94 work = dist.reduce_scatter(output=out, input_list=temp, op=op, group=group, async_op=async_op)
95 if async_op:
96 return out, work
97 else:
98 return out
99
100
101 def all_reduce(tensor: Tensor,
102 parallel_mode: ParallelMode,
103 op: ReduceOp = ReduceOp.SUM,
104 on_cpu: bool = False,
105 async_op: bool = False) -> Tensor:
106 r"""Reduces the tensor data across whole parallel group in such a way that all get the final result.
107
108 Note:
109 The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
110 in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
111
112 Args:
113 tensor (:class:`torch.Tensor`): Tensor to be all-reduced.
114 parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.
115 op (torch.distributed.ReduceOp, optional): The type of reduce operation,
116 should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR].
117 More details about ReduceOp please refer to
118 `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_.
119 on_cpu (bool, optional): Whether to communicate with Gloo backend.
120 async_op (bool, optional): Whether operations are asynchronous.
121
122 Returns:
123 Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-gather only,
124 if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.
125 """
126 depth = gpc.get_world_size(parallel_mode)
127 if depth == 1:
128 out = tensor
129 work = None
130 else:
131 out = tensor.contiguous()
132 group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)
133 work = dist.all_reduce(out, op=op, group=group, async_op=async_op)
134 if async_op:
135 return out, work
136 else:
137 return out
138
139
140 def broadcast(tensor: Tensor, src: int, parallel_mode: ParallelMode, on_cpu: bool = False, async_op: bool = False):
141 r"""Broadcast tensors to whole parallel group. Tensor must have the same
142 number of elements in all processes participating in the collective.
143
144 Note:
145 The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
146 in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
147
148 Args:
149 tensor (:class:`torch.Tensor`): Tensor to be broadcast.
150 src (int): Source rank.
151 parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.
152 on_cpu (bool, optional): Whether to communicate with Gloo backend.
153 async_op (bool, optional): Whether operations are asynchronous.
154
155 Returns:
156 Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The tensor need to be broadcast only,
157 if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.
158 """
159 depth = gpc.get_world_size(parallel_mode)
160 if depth == 1:
161 out = tensor
162 work = None
163 else:
164 out = tensor.contiguous()
165 group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)
166 work = dist.broadcast(out, src=src, group=group, async_op=async_op)
167 if async_op:
168 return out, work
169 else:
170 return out
171
172
173 def reduce(tensor: Tensor,
174 dst: int,
175 parallel_mode: ParallelMode,
176 op: ReduceOp = ReduceOp.SUM,
177 on_cpu: bool = False,
178 async_op: bool = False):
179 r"""Reduce tensors across whole parallel group. Only the process with
180 rank ``dst`` is going to receive the final result.
181
182 Note:
183 The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
184 in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
185
186 Args:
187 tensor (:class:`torch.Tensor`): Tensor to be reduced.
188 dst (int): Destination rank.
189 parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.
190 on_cpu (bool, optional): Whether to communicate with Gloo backend.
191 async_op (bool, optional): Whether operations are asynchronous.
192
193 Returns:
194 Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce only,
195 if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.
196 """
197 depth = gpc.get_world_size(parallel_mode)
198 if depth == 1:
199 out = tensor
200 work = None
201 else:
202 out = tensor.contiguous()
203 group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)
204 work = dist.reduce(out, dst=dst, op=op, group=group, async_op=async_op)
205 if async_op:
206 return out, work
207 else:
208 return out
209
210
211 def scatter_object_list(scatter_object_output_list, scatter_object_input_list, src=0, group=None):
212 r"""Modified from `torch.distributed.scatter_object_list <https://pytorch.org/docs/stable/_modules/torch/distributed/distributed_c10d.html#scatter_object_list>` to fix issues
213 """
214 if dist._rank_not_in_group(group):
215 return
216
217 if (not isinstance(scatter_object_output_list, list) or len(scatter_object_output_list) < 1):
218 raise RuntimeError("Expected argument scatter_object_output_list to be a list of size at least 1.")
219
220 # set tensor device to cuda if backend is nccl
221 device = torch.cuda.current_device() if dist.get_backend(group) == 'nccl' else torch.device("cpu")
222
223 my_rank = dist.get_rank() # use global rank
224 if my_rank == src:
225 tensor_list, tensor_sizes = zip(
226 *[dist.distributed_c10d._object_to_tensor(obj) for obj in scatter_object_input_list])
227 tensor_list = list(map(lambda x: x.to(device), tensor_list))
228 tensor_sizes = list(map(lambda x: x.to(device), tensor_sizes))
229
230 # Src rank broadcasts the maximum tensor size. This is because all ranks are
231 # expected to call into scatter() with equal-sized tensors.
232 if my_rank == src:
233 max_tensor_size = max(tensor_sizes)
234 for tensor in tensor_list:
235 tensor.resize_(max_tensor_size)
236 else:
237 max_tensor_size = torch.tensor([0], dtype=torch.long).to(device)
238
239 dist.broadcast(max_tensor_size, src=src, group=group)
240
241 # Scatter actual serialized objects
242 output_tensor = torch.empty(max_tensor_size.item(), dtype=torch.uint8).to(device)
243 dist.scatter(
244 output_tensor,
245 scatter_list=None if my_rank != src else tensor_list,
246 src=src,
247 group=group,
248 )
249
250 # Scatter per-object sizes to trim tensors when deserializing back to object
251 obj_tensor_size = torch.tensor([0], dtype=torch.long).to(device)
252 dist.scatter(
253 obj_tensor_size,
254 scatter_list=None if my_rank != src else tensor_sizes,
255 src=src,
256 group=group,
257 )
258
259 output_tensor, obj_tensor_size = output_tensor.cpu(), obj_tensor_size.cpu()
260 # Deserialize back to object
261 scatter_object_output_list[0] = dist.distributed_c10d._tensor_to_object(output_tensor, obj_tensor_size)
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/communication/collective.py b/colossalai/communication/collective.py
--- a/colossalai/communication/collective.py
+++ b/colossalai/communication/collective.py
@@ -211,7 +211,7 @@
def scatter_object_list(scatter_object_output_list, scatter_object_input_list, src=0, group=None):
r"""Modified from `torch.distributed.scatter_object_list <https://pytorch.org/docs/stable/_modules/torch/distributed/distributed_c10d.html#scatter_object_list>` to fix issues
"""
- if dist._rank_not_in_group(group):
+ if dist.distributed_c10d._rank_not_in_group(group):
return
if (not isinstance(scatter_object_output_list, list) or len(scatter_object_output_list) < 1):
@@ -220,7 +220,7 @@
# set tensor device to cuda if backend is nccl
device = torch.cuda.current_device() if dist.get_backend(group) == 'nccl' else torch.device("cpu")
- my_rank = dist.get_rank() # use global rank
+ my_rank = dist.get_rank() # use global rank
if my_rank == src:
tensor_list, tensor_sizes = zip(
*[dist.distributed_c10d._object_to_tensor(obj) for obj in scatter_object_input_list])
| {"golden_diff": "diff --git a/colossalai/communication/collective.py b/colossalai/communication/collective.py\n--- a/colossalai/communication/collective.py\n+++ b/colossalai/communication/collective.py\n@@ -211,7 +211,7 @@\n def scatter_object_list(scatter_object_output_list, scatter_object_input_list, src=0, group=None):\n r\"\"\"Modified from `torch.distributed.scatter_object_list <https://pytorch.org/docs/stable/_modules/torch/distributed/distributed_c10d.html#scatter_object_list>` to fix issues\n \"\"\"\n- if dist._rank_not_in_group(group):\n+ if dist.distributed_c10d._rank_not_in_group(group):\n return\n \n if (not isinstance(scatter_object_output_list, list) or len(scatter_object_output_list) < 1):\n@@ -220,7 +220,7 @@\n # set tensor device to cuda if backend is nccl\n device = torch.cuda.current_device() if dist.get_backend(group) == 'nccl' else torch.device(\"cpu\")\n \n- my_rank = dist.get_rank() # use global rank\n+ my_rank = dist.get_rank() # use global rank\n if my_rank == src:\n tensor_list, tensor_sizes = zip(\n *[dist.distributed_c10d._object_to_tensor(obj) for obj in scatter_object_input_list])\n", "issue": "[BUG]: Checkpointing Test Failed with PyTorch 1.9\n### \ud83d\udc1b Describe the bug\n\nWhen running unit testing for model checkpointing, the following exception occurs.\r\n\r\n<img width=\"1370\" alt=\"Screenshot 2022-04-11 at 5 10 48 PM\" src=\"https://user-images.githubusercontent.com/31818963/162704185-ad267034-2f49-4d18-82a1-70a08c59bcd6.png\">\r\n\r\nThis is because that the `_rank_not_in_group` API is not exposed at the `torch.distributed` level with PyTorch 1.9.\n\n### Environment\n\nCUDA: 11.1\r\nPyTorch 1.9.1\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport torch\nimport torch.distributed as dist\nfrom torch.distributed import ReduceOp\nfrom torch import Tensor\n\nfrom colossalai.context import ParallelMode\nfrom colossalai.core import global_context as gpc\n\n\ndef all_gather(tensor: Tensor,\n dim: int,\n parallel_mode: ParallelMode,\n on_cpu: bool = False,\n async_op: bool = False) -> Tensor:\n r\"\"\"Gathers all tensors from the parallel group and concatenates them in a\n specific dimension.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be gathered.\n dim (int): The dimension concatenating in.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-together only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n shape = list(tensor.shape)\n shape[0], shape[dim] = shape[dim], shape[0]\n shape[0] *= depth\n out = torch.empty(shape, dtype=tensor.dtype, device=tensor.device)\n temp = list(torch.chunk(out, depth, dim=0))\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.all_gather(tensor_list=temp,\n tensor=tensor.transpose(0, dim).contiguous(),\n group=group,\n async_op=async_op)\n out = torch.transpose(out, 0, dim)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef reduce_scatter(tensor: Tensor,\n dim: int,\n parallel_mode: ParallelMode,\n op: ReduceOp = ReduceOp.SUM,\n on_cpu: bool = False,\n async_op: bool = False) -> Tensor:\n r\"\"\"Reduces all tensors then scatters it in a specific dimension to all\n members in the parallel group.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be reduce_scattered.\n dim (int): The dimension concatenating in.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n op (torch.distributed.ReduceOp, optional): The type of reduce operation,\n should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR].\n More details about ReduceOp please refer to\n `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce_scatter only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n temp = list(map(lambda x: x.contiguous(), torch.chunk(tensor, depth, dim=dim)))\n out = torch.empty(temp[0].shape, dtype=tensor.dtype, device=tensor.device)\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.reduce_scatter(output=out, input_list=temp, op=op, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef all_reduce(tensor: Tensor,\n parallel_mode: ParallelMode,\n op: ReduceOp = ReduceOp.SUM,\n on_cpu: bool = False,\n async_op: bool = False) -> Tensor:\n r\"\"\"Reduces the tensor data across whole parallel group in such a way that all get the final result.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be all-reduced.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n op (torch.distributed.ReduceOp, optional): The type of reduce operation,\n should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR].\n More details about ReduceOp please refer to\n `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-gather only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n out = tensor.contiguous()\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.all_reduce(out, op=op, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef broadcast(tensor: Tensor, src: int, parallel_mode: ParallelMode, on_cpu: bool = False, async_op: bool = False):\n r\"\"\"Broadcast tensors to whole parallel group. Tensor must have the same\n number of elements in all processes participating in the collective.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be broadcast.\n src (int): Source rank.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The tensor need to be broadcast only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n out = tensor.contiguous()\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.broadcast(out, src=src, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef reduce(tensor: Tensor,\n dst: int,\n parallel_mode: ParallelMode,\n op: ReduceOp = ReduceOp.SUM,\n on_cpu: bool = False,\n async_op: bool = False):\n r\"\"\"Reduce tensors across whole parallel group. Only the process with\n rank ``dst`` is going to receive the final result.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be reduced.\n dst (int): Destination rank.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n out = tensor.contiguous()\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.reduce(out, dst=dst, op=op, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef scatter_object_list(scatter_object_output_list, scatter_object_input_list, src=0, group=None):\n r\"\"\"Modified from `torch.distributed.scatter_object_list <https://pytorch.org/docs/stable/_modules/torch/distributed/distributed_c10d.html#scatter_object_list>` to fix issues\n \"\"\"\n if dist._rank_not_in_group(group):\n return\n\n if (not isinstance(scatter_object_output_list, list) or len(scatter_object_output_list) < 1):\n raise RuntimeError(\"Expected argument scatter_object_output_list to be a list of size at least 1.\")\n\n # set tensor device to cuda if backend is nccl\n device = torch.cuda.current_device() if dist.get_backend(group) == 'nccl' else torch.device(\"cpu\")\n\n my_rank = dist.get_rank() # use global rank\n if my_rank == src:\n tensor_list, tensor_sizes = zip(\n *[dist.distributed_c10d._object_to_tensor(obj) for obj in scatter_object_input_list])\n tensor_list = list(map(lambda x: x.to(device), tensor_list))\n tensor_sizes = list(map(lambda x: x.to(device), tensor_sizes))\n\n # Src rank broadcasts the maximum tensor size. This is because all ranks are\n # expected to call into scatter() with equal-sized tensors.\n if my_rank == src:\n max_tensor_size = max(tensor_sizes)\n for tensor in tensor_list:\n tensor.resize_(max_tensor_size)\n else:\n max_tensor_size = torch.tensor([0], dtype=torch.long).to(device)\n\n dist.broadcast(max_tensor_size, src=src, group=group)\n\n # Scatter actual serialized objects\n output_tensor = torch.empty(max_tensor_size.item(), dtype=torch.uint8).to(device)\n dist.scatter(\n output_tensor,\n scatter_list=None if my_rank != src else tensor_list,\n src=src,\n group=group,\n )\n\n # Scatter per-object sizes to trim tensors when deserializing back to object\n obj_tensor_size = torch.tensor([0], dtype=torch.long).to(device)\n dist.scatter(\n obj_tensor_size,\n scatter_list=None if my_rank != src else tensor_sizes,\n src=src,\n group=group,\n )\n\n output_tensor, obj_tensor_size = output_tensor.cpu(), obj_tensor_size.cpu()\n # Deserialize back to object\n scatter_object_output_list[0] = dist.distributed_c10d._tensor_to_object(output_tensor, obj_tensor_size)\n", "path": "colossalai/communication/collective.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport torch\nimport torch.distributed as dist\nfrom torch.distributed import ReduceOp\nfrom torch import Tensor\n\nfrom colossalai.context import ParallelMode\nfrom colossalai.core import global_context as gpc\n\n\ndef all_gather(tensor: Tensor,\n dim: int,\n parallel_mode: ParallelMode,\n on_cpu: bool = False,\n async_op: bool = False) -> Tensor:\n r\"\"\"Gathers all tensors from the parallel group and concatenates them in a\n specific dimension.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be gathered.\n dim (int): The dimension concatenating in.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-together only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n shape = list(tensor.shape)\n shape[0], shape[dim] = shape[dim], shape[0]\n shape[0] *= depth\n out = torch.empty(shape, dtype=tensor.dtype, device=tensor.device)\n temp = list(torch.chunk(out, depth, dim=0))\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.all_gather(tensor_list=temp,\n tensor=tensor.transpose(0, dim).contiguous(),\n group=group,\n async_op=async_op)\n out = torch.transpose(out, 0, dim)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef reduce_scatter(tensor: Tensor,\n dim: int,\n parallel_mode: ParallelMode,\n op: ReduceOp = ReduceOp.SUM,\n on_cpu: bool = False,\n async_op: bool = False) -> Tensor:\n r\"\"\"Reduces all tensors then scatters it in a specific dimension to all\n members in the parallel group.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be reduce_scattered.\n dim (int): The dimension concatenating in.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n op (torch.distributed.ReduceOp, optional): The type of reduce operation,\n should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR].\n More details about ReduceOp please refer to\n `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce_scatter only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n temp = list(map(lambda x: x.contiguous(), torch.chunk(tensor, depth, dim=dim)))\n out = torch.empty(temp[0].shape, dtype=tensor.dtype, device=tensor.device)\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.reduce_scatter(output=out, input_list=temp, op=op, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef all_reduce(tensor: Tensor,\n parallel_mode: ParallelMode,\n op: ReduceOp = ReduceOp.SUM,\n on_cpu: bool = False,\n async_op: bool = False) -> Tensor:\n r\"\"\"Reduces the tensor data across whole parallel group in such a way that all get the final result.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be all-reduced.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n op (torch.distributed.ReduceOp, optional): The type of reduce operation,\n should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR].\n More details about ReduceOp please refer to\n `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-gather only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n out = tensor.contiguous()\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.all_reduce(out, op=op, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef broadcast(tensor: Tensor, src: int, parallel_mode: ParallelMode, on_cpu: bool = False, async_op: bool = False):\n r\"\"\"Broadcast tensors to whole parallel group. Tensor must have the same\n number of elements in all processes participating in the collective.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be broadcast.\n src (int): Source rank.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The tensor need to be broadcast only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n out = tensor.contiguous()\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.broadcast(out, src=src, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef reduce(tensor: Tensor,\n dst: int,\n parallel_mode: ParallelMode,\n op: ReduceOp = ReduceOp.SUM,\n on_cpu: bool = False,\n async_op: bool = False):\n r\"\"\"Reduce tensors across whole parallel group. Only the process with\n rank ``dst`` is going to receive the final result.\n\n Note:\n The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found\n in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.\n\n Args:\n tensor (:class:`torch.Tensor`): Tensor to be reduced.\n dst (int): Destination rank.\n parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this communication.\n on_cpu (bool, optional): Whether to communicate with Gloo backend.\n async_op (bool, optional): Whether operations are asynchronous.\n\n Returns:\n Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce only,\n if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True.\n \"\"\"\n depth = gpc.get_world_size(parallel_mode)\n if depth == 1:\n out = tensor\n work = None\n else:\n out = tensor.contiguous()\n group = gpc.get_cpu_group(parallel_mode) if on_cpu else gpc.get_group(parallel_mode)\n work = dist.reduce(out, dst=dst, op=op, group=group, async_op=async_op)\n if async_op:\n return out, work\n else:\n return out\n\n\ndef scatter_object_list(scatter_object_output_list, scatter_object_input_list, src=0, group=None):\n r\"\"\"Modified from `torch.distributed.scatter_object_list <https://pytorch.org/docs/stable/_modules/torch/distributed/distributed_c10d.html#scatter_object_list>` to fix issues\n \"\"\"\n if dist.distributed_c10d._rank_not_in_group(group):\n return\n\n if (not isinstance(scatter_object_output_list, list) or len(scatter_object_output_list) < 1):\n raise RuntimeError(\"Expected argument scatter_object_output_list to be a list of size at least 1.\")\n\n # set tensor device to cuda if backend is nccl\n device = torch.cuda.current_device() if dist.get_backend(group) == 'nccl' else torch.device(\"cpu\")\n\n my_rank = dist.get_rank() # use global rank\n if my_rank == src:\n tensor_list, tensor_sizes = zip(\n *[dist.distributed_c10d._object_to_tensor(obj) for obj in scatter_object_input_list])\n tensor_list = list(map(lambda x: x.to(device), tensor_list))\n tensor_sizes = list(map(lambda x: x.to(device), tensor_sizes))\n\n # Src rank broadcasts the maximum tensor size. This is because all ranks are\n # expected to call into scatter() with equal-sized tensors.\n if my_rank == src:\n max_tensor_size = max(tensor_sizes)\n for tensor in tensor_list:\n tensor.resize_(max_tensor_size)\n else:\n max_tensor_size = torch.tensor([0], dtype=torch.long).to(device)\n\n dist.broadcast(max_tensor_size, src=src, group=group)\n\n # Scatter actual serialized objects\n output_tensor = torch.empty(max_tensor_size.item(), dtype=torch.uint8).to(device)\n dist.scatter(\n output_tensor,\n scatter_list=None if my_rank != src else tensor_list,\n src=src,\n group=group,\n )\n\n # Scatter per-object sizes to trim tensors when deserializing back to object\n obj_tensor_size = torch.tensor([0], dtype=torch.long).to(device)\n dist.scatter(\n obj_tensor_size,\n scatter_list=None if my_rank != src else tensor_sizes,\n src=src,\n group=group,\n )\n\n output_tensor, obj_tensor_size = output_tensor.cpu(), obj_tensor_size.cpu()\n # Deserialize back to object\n scatter_object_output_list[0] = dist.distributed_c10d._tensor_to_object(output_tensor, obj_tensor_size)\n", "path": "colossalai/communication/collective.py"}]} | 3,872 | 311 |
gh_patches_debug_22216 | rasdani/github-patches | git_diff | spacetelescope__jwql-483 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Simplify conda environments
Since `conda` will automatically determine which libraries are needed for dependencies (e.g. `numpy_base` is installed when installing `numpy`), we could probably use to trim down our conda environments to only those high-level packages that are used within our repo, and `conda` will figure out the rest. It might also be a good time to make sure the `conda` environment is consistent with the dependencies listed in `setup.py` and `requirements.txt`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.21.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 REQUIRES = [
13 'astropy>=3.2.1',
14 'astroquery>=0.3.9',
15 'authlib',
16 'bokeh>=1.0',
17 'django>=2.0',
18 'jinja2',
19 'jsonschema==2.6.0',
20 'jwedb',
21 'jwst',
22 'matplotlib',
23 'numpy',
24 'numpydoc',
25 'pandas',
26 'psycopg2',
27 'pysiaf',
28 'pytest',
29 'sphinx',
30 'sqlalchemy',
31 'stsci_rtd_theme'
32 ]
33
34 setup(
35 name='jwql',
36 version=VERSION,
37 description=DESCRIPTION,
38 url='https://github.com/spacetelescope/jwql.git',
39 author=AUTHORS,
40 author_email='[email protected]',
41 license='BSD',
42 keywords=['astronomy', 'python'],
43 classifiers=['Programming Language :: Python'],
44 packages=find_packages(),
45 install_requires=REQUIRES,
46 include_package_data=True,
47 include_dirs=[np.get_include()],
48 )
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,21 +4,26 @@
VERSION = '0.21.0'
-AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
-AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
+AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'
DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
REQUIRES = [
+ 'asdf>=2.3.3',
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
'bokeh>=1.0',
+ 'codecov',
'django>=2.0',
+ 'flake8',
+ 'inflection',
+ 'ipython',
'jinja2',
'jsonschema==2.6.0',
- 'jwedb',
- 'jwst',
+ 'jwedb>=0.0.3',
+ 'jwst==0.13.0',
'matplotlib',
'numpy',
'numpydoc',
@@ -26,9 +31,12 @@
'psycopg2',
'pysiaf',
'pytest',
+ 'pytest-cov',
+ 'scipy',
'sphinx',
'sqlalchemy',
- 'stsci_rtd_theme'
+ 'stsci_rtd_theme',
+ 'twine'
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,21 +4,26 @@\n \n VERSION = '0.21.0'\n \n-AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n-AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n+AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n \n DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n \n REQUIRES = [\n+ 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n+ 'codecov',\n 'django>=2.0',\n+ 'flake8',\n+ 'inflection',\n+ 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n- 'jwedb',\n- 'jwst',\n+ 'jwedb>=0.0.3',\n+ 'jwst==0.13.0',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n@@ -26,9 +31,12 @@\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n+ 'pytest-cov',\n+ 'scipy',\n 'sphinx',\n 'sqlalchemy',\n- 'stsci_rtd_theme'\n+ 'stsci_rtd_theme',\n+ 'twine'\n ]\n \n setup(\n", "issue": "Simplify conda environments \nSince `conda` will automatically determine which libraries are needed for dependencies (e.g. `numpy_base` is installed when installing `numpy`), we could probably use to trim down our conda environments to only those high-level packages that are used within our repo, and `conda` will figure out the rest. It might also be a good time to make sure the `conda` environment is consistent with the dependencies listed in `setup.py` and `requirements.txt`\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.21.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nREQUIRES = [\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'django>=2.0',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb',\n 'jwst',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.21.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'jwst==0.13.0',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 766 | 391 |
gh_patches_debug_37169 | rasdani/github-patches | git_diff | HybirdCorp__creme_crm-84 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicated buttons
The configuration allows adding a button twice to a content type:
- On the button configuration page `/creme_config/button_menu/portal`
- Given a button that is not specific to a content type (Usable in the default configuration)
- Remove this button from the default configuration (if exists)
- Add this button to a content type (Contact for example)
- Add this button again to the default configuration
- The button is present twice on a Contact page.
version 2.2 alpha (master)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `creme/creme_core/templatetags/creme_menu.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 ################################################################################
4 # Creme is a free/open-source Customer Relationship Management software
5 # Copyright (C) 2009-2020 Hybird
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 ################################################################################
20
21 from django.db.models import Q
22 from django.template import Library
23
24 from ..gui.button_menu import button_registry
25 from ..gui.menu import creme_menu
26 from ..models import ButtonMenuItem
27
28 register = Library()
29
30
31 @register.simple_tag(takes_context=True)
32 def menu_display(context):
33 return creme_menu.render(context)
34
35
36 # TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)
37 @register.inclusion_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)
38 def menu_buttons_display(context):
39 entity = context['object']
40 bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |
41 Q(content_type__isnull=True)
42 ) \
43 .exclude(button_id='') \
44 .order_by('order') \
45 .values_list('button_id', flat=True)
46
47 button_ctxt = context.flatten()
48 # TODO: pass the registry in the context ?
49 context['buttons'] = [
50 button.render(button_ctxt)
51 for button in button_registry.get_buttons(bmi, entity)
52 ]
53
54 return context
55
```
Path: `creme/creme_core/gui/button_menu.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 ################################################################################
4 # Creme is a free/open-source Customer Relationship Management software
5 # Copyright (C) 2009-2020 Hybird
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 ################################################################################
20
21 import logging
22 from typing import Dict, Iterable, Iterator, Optional, Sequence, Tuple, Type
23
24 from django.template.loader import get_template
25
26 from ..models import CremeEntity
27
28 logger = logging.getLogger(__name__)
29
30
31 class Button:
32 # ID of the button, stored in DB (ie: the button configuration), to retrieve
33 # the right button class (so it must be unique)
34 # Override it in child class with a value generated by 'generate_id()'.
35 # id_ = None
36 id_: str = ''
37
38 # Label used in the configuration GUI to display the button (see models.ButtonMenuItem)
39 # Tips: use gettext_lazy()
40 verbose_name: str = 'BUTTON'
41
42 # Name/path of the template used to render the button.
43 template_name: str = 'creme_core/buttons/place-holder.html'
44
45 # Permission string ; None means not permission needed.
46 # eg :'myapp.add_mymodel'
47 # BEWARE: you have to use the template context variable 'has_perm' yourself !!
48 permission: Optional[str] = None # TODO: <permission: str = ''> ??
49
50 @staticmethod
51 def generate_id(app_name: str, name: str) -> str:
52 return f'button_{app_name}-{name}'
53
54 def get_ctypes(self) -> Sequence[Type[CremeEntity]]:
55 """
56 @return A sequence of CremeEntity class that can have this type of button.
57 Void sequence means that all types are ok.
58 eg: (Contact, Organisation)
59 """
60 return ()
61
62 def has_perm(self, context) -> bool:
63 permission = self.permission
64 return context['request'].user.has_perm(permission) if permission else True
65
66 def ok_4_display(self, entity: CremeEntity) -> bool:
67 """Can this button be displayed on this entity's detail-view ?
68 @param entity: CremeEntity which detail-view is displayed.
69 @return True if the button can be displayed for 'entity'.
70 """
71 return True
72
73 def render(self, context) -> str:
74 context['has_perm'] = self.has_perm(context)
75
76 return get_template(self.template_name).render(context)
77
78
79 class ButtonsRegistry:
80 class RegistrationError(Exception):
81 pass
82
83 def __init__(self):
84 self._button_classes: Dict[str, Type[Button]] = {}
85
86 def register(self, *button_classes: Type[Button]) -> None:
87 """
88 @type button_classes: creme_core.gui.menu_buttons.Button child classes.
89 """
90 setdefault = self._button_classes.setdefault
91
92 for button_cls in button_classes:
93 button_id = button_cls.id_
94
95 if not button_id:
96 raise self.RegistrationError(
97 f'Button class with empty id_: {button_cls}'
98 )
99
100 if setdefault(button_id, button_cls) is not button_cls:
101 raise self.RegistrationError(
102 f"Duplicated button's ID (or button registered twice) : {button_id}"
103 )
104
105 def get_button(self, button_id: str) -> Optional[Button]:
106 cls = self._button_classes.get(button_id)
107
108 return cls() if cls else None
109
110 def get_buttons(self, id_list: Iterable[str], entity: CremeEntity) -> Iterator[Button]:
111 """Generate the Buttons to be displayed on the detail-view of an entity.
112 Deprecated buttons & buttons that should not be displayed for this entity
113 are ignored.
114 @param id_list: Sequence of button IDs.
115 @param entity: CremeEntity instance.
116 @yield creme_core.gui.button_menu.Button instances.
117 """
118 button_classes = self._button_classes
119
120 for button_id in id_list:
121 button_cls = button_classes.get(button_id)
122
123 if button_cls is None:
124 logger.warning('Button seems deprecated: %s', button_id)
125 else:
126 button = button_cls()
127
128 if button.ok_4_display(entity):
129 yield button
130
131 def __iter__(self) -> Iterator[Tuple[str, Button]]:
132 for b_id, b_cls in self._button_classes.items():
133 yield b_id, b_cls()
134
135
136 button_registry = ButtonsRegistry()
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/creme/creme_core/gui/button_menu.py b/creme/creme_core/gui/button_menu.py
--- a/creme/creme_core/gui/button_menu.py
+++ b/creme/creme_core/gui/button_menu.py
@@ -61,7 +61,8 @@
def has_perm(self, context) -> bool:
permission = self.permission
- return context['request'].user.has_perm(permission) if permission else True
+ # return context['request'].user.has_perm(permission) if permission else True
+ return context['user'].has_perm(permission) if permission else True
def ok_4_display(self, entity: CremeEntity) -> bool:
"""Can this button be displayed on this entity's detail-view ?
diff --git a/creme/creme_core/templatetags/creme_menu.py b/creme/creme_core/templatetags/creme_menu.py
--- a/creme/creme_core/templatetags/creme_menu.py
+++ b/creme/creme_core/templatetags/creme_menu.py
@@ -18,10 +18,12 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
+from collections import OrderedDict
+
from django.db.models import Q
from django.template import Library
-from ..gui.button_menu import button_registry
+from ..gui import button_menu
from ..gui.menu import creme_menu
from ..models import ButtonMenuItem
@@ -34,21 +36,29 @@
# TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)
[email protected]_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)
[email protected]_tag(
+ 'creme_core/templatetags/menu_buttons.html', takes_context=True,
+)
def menu_buttons_display(context):
entity = context['object']
- bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |
- Q(content_type__isnull=True)
- ) \
- .exclude(button_id='') \
- .order_by('order') \
- .values_list('button_id', flat=True)
+ bmi = ButtonMenuItem.objects.filter(
+ Q(content_type=entity.entity_type)
+ | Q(content_type__isnull=True)
+ ).exclude(
+ button_id='',
+ ).order_by(
+ 'order',
+ ).values_list(
+ 'button_id', flat=True,
+ )
button_ctxt = context.flatten()
# TODO: pass the registry in the context ?
- context['buttons'] = [
- button.render(button_ctxt)
- for button in button_registry.get_buttons(bmi, entity)
- ]
+ buttons = OrderedDict()
+
+ for button in button_menu.button_registry.get_buttons(bmi, entity):
+ buttons[button.id_] = button.render(button_ctxt)
+
+ context['buttons'] = [*buttons.values()]
return context
| {"golden_diff": "diff --git a/creme/creme_core/gui/button_menu.py b/creme/creme_core/gui/button_menu.py\n--- a/creme/creme_core/gui/button_menu.py\n+++ b/creme/creme_core/gui/button_menu.py\n@@ -61,7 +61,8 @@\n \n def has_perm(self, context) -> bool:\n permission = self.permission\n- return context['request'].user.has_perm(permission) if permission else True\n+ # return context['request'].user.has_perm(permission) if permission else True\n+ return context['user'].has_perm(permission) if permission else True\n \n def ok_4_display(self, entity: CremeEntity) -> bool:\n \"\"\"Can this button be displayed on this entity's detail-view ?\ndiff --git a/creme/creme_core/templatetags/creme_menu.py b/creme/creme_core/templatetags/creme_menu.py\n--- a/creme/creme_core/templatetags/creme_menu.py\n+++ b/creme/creme_core/templatetags/creme_menu.py\n@@ -18,10 +18,12 @@\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n ################################################################################\n \n+from collections import OrderedDict\n+\n from django.db.models import Q\n from django.template import Library\n \n-from ..gui.button_menu import button_registry\n+from ..gui import button_menu\n from ..gui.menu import creme_menu\n from ..models import ButtonMenuItem\n \n@@ -34,21 +36,29 @@\n \n \n # TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)\[email protected]_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)\[email protected]_tag(\n+ 'creme_core/templatetags/menu_buttons.html', takes_context=True,\n+)\n def menu_buttons_display(context):\n entity = context['object']\n- bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |\n- Q(content_type__isnull=True)\n- ) \\\n- .exclude(button_id='') \\\n- .order_by('order') \\\n- .values_list('button_id', flat=True)\n+ bmi = ButtonMenuItem.objects.filter(\n+ Q(content_type=entity.entity_type)\n+ | Q(content_type__isnull=True)\n+ ).exclude(\n+ button_id='',\n+ ).order_by(\n+ 'order',\n+ ).values_list(\n+ 'button_id', flat=True,\n+ )\n \n button_ctxt = context.flatten()\n # TODO: pass the registry in the context ?\n- context['buttons'] = [\n- button.render(button_ctxt)\n- for button in button_registry.get_buttons(bmi, entity)\n- ]\n+ buttons = OrderedDict()\n+\n+ for button in button_menu.button_registry.get_buttons(bmi, entity):\n+ buttons[button.id_] = button.render(button_ctxt)\n+\n+ context['buttons'] = [*buttons.values()]\n \n return context\n", "issue": "Duplicated buttons\nThe configuration allows adding a button twice to a content type:\r\n\r\n- On the button configuration page `/creme_config/button_menu/portal`\r\n- Given a button that is not specific to a content type (Usable in the default configuration)\r\n- Remove this button from the default configuration (if exists)\r\n- Add this button to a content type (Contact for example)\r\n- Add this button again to the default configuration\r\n- The button is present twice on a Contact page.\r\n\r\nversion 2.2 alpha (master)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2009-2020 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n################################################################################\n\nfrom django.db.models import Q\nfrom django.template import Library\n\nfrom ..gui.button_menu import button_registry\nfrom ..gui.menu import creme_menu\nfrom ..models import ButtonMenuItem\n\nregister = Library()\n\n\[email protected]_tag(takes_context=True)\ndef menu_display(context):\n return creme_menu.render(context)\n\n\n# TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)\[email protected]_tag('creme_core/templatetags/menu_buttons.html', takes_context=True)\ndef menu_buttons_display(context):\n entity = context['object']\n bmi = ButtonMenuItem.objects.filter(Q(content_type=entity.entity_type) |\n Q(content_type__isnull=True)\n ) \\\n .exclude(button_id='') \\\n .order_by('order') \\\n .values_list('button_id', flat=True)\n\n button_ctxt = context.flatten()\n # TODO: pass the registry in the context ?\n context['buttons'] = [\n button.render(button_ctxt)\n for button in button_registry.get_buttons(bmi, entity)\n ]\n\n return context\n", "path": "creme/creme_core/templatetags/creme_menu.py"}, {"content": "# -*- coding: utf-8 -*-\n\n################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2009-2020 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n################################################################################\n\nimport logging\nfrom typing import Dict, Iterable, Iterator, Optional, Sequence, Tuple, Type\n\nfrom django.template.loader import get_template\n\nfrom ..models import CremeEntity\n\nlogger = logging.getLogger(__name__)\n\n\nclass Button:\n # ID of the button, stored in DB (ie: the button configuration), to retrieve\n # the right button class (so it must be unique)\n # Override it in child class with a value generated by 'generate_id()'.\n # id_ = None\n id_: str = ''\n\n # Label used in the configuration GUI to display the button (see models.ButtonMenuItem)\n # Tips: use gettext_lazy()\n verbose_name: str = 'BUTTON'\n\n # Name/path of the template used to render the button.\n template_name: str = 'creme_core/buttons/place-holder.html'\n\n # Permission string ; None means not permission needed.\n # eg :'myapp.add_mymodel'\n # BEWARE: you have to use the template context variable 'has_perm' yourself !!\n permission: Optional[str] = None # TODO: <permission: str = ''> ??\n\n @staticmethod\n def generate_id(app_name: str, name: str) -> str:\n return f'button_{app_name}-{name}'\n\n def get_ctypes(self) -> Sequence[Type[CremeEntity]]:\n \"\"\"\n @return A sequence of CremeEntity class that can have this type of button.\n Void sequence means that all types are ok.\n eg: (Contact, Organisation)\n \"\"\"\n return ()\n\n def has_perm(self, context) -> bool:\n permission = self.permission\n return context['request'].user.has_perm(permission) if permission else True\n\n def ok_4_display(self, entity: CremeEntity) -> bool:\n \"\"\"Can this button be displayed on this entity's detail-view ?\n @param entity: CremeEntity which detail-view is displayed.\n @return True if the button can be displayed for 'entity'.\n \"\"\"\n return True\n\n def render(self, context) -> str:\n context['has_perm'] = self.has_perm(context)\n\n return get_template(self.template_name).render(context)\n\n\nclass ButtonsRegistry:\n class RegistrationError(Exception):\n pass\n\n def __init__(self):\n self._button_classes: Dict[str, Type[Button]] = {}\n\n def register(self, *button_classes: Type[Button]) -> None:\n \"\"\"\n @type button_classes: creme_core.gui.menu_buttons.Button child classes.\n \"\"\"\n setdefault = self._button_classes.setdefault\n\n for button_cls in button_classes:\n button_id = button_cls.id_\n\n if not button_id:\n raise self.RegistrationError(\n f'Button class with empty id_: {button_cls}'\n )\n\n if setdefault(button_id, button_cls) is not button_cls:\n raise self.RegistrationError(\n f\"Duplicated button's ID (or button registered twice) : {button_id}\"\n )\n\n def get_button(self, button_id: str) -> Optional[Button]:\n cls = self._button_classes.get(button_id)\n\n return cls() if cls else None\n\n def get_buttons(self, id_list: Iterable[str], entity: CremeEntity) -> Iterator[Button]:\n \"\"\"Generate the Buttons to be displayed on the detail-view of an entity.\n Deprecated buttons & buttons that should not be displayed for this entity\n are ignored.\n @param id_list: Sequence of button IDs.\n @param entity: CremeEntity instance.\n @yield creme_core.gui.button_menu.Button instances.\n \"\"\"\n button_classes = self._button_classes\n\n for button_id in id_list:\n button_cls = button_classes.get(button_id)\n\n if button_cls is None:\n logger.warning('Button seems deprecated: %s', button_id)\n else:\n button = button_cls()\n\n if button.ok_4_display(entity):\n yield button\n\n def __iter__(self) -> Iterator[Tuple[str, Button]]:\n for b_id, b_cls in self._button_classes.items():\n yield b_id, b_cls()\n\n\nbutton_registry = ButtonsRegistry()\n", "path": "creme/creme_core/gui/button_menu.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2009-2020 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n################################################################################\n\nfrom collections import OrderedDict\n\nfrom django.db.models import Q\nfrom django.template import Library\n\nfrom ..gui import button_menu\nfrom ..gui.menu import creme_menu\nfrom ..models import ButtonMenuItem\n\nregister = Library()\n\n\[email protected]_tag(takes_context=True)\ndef menu_display(context):\n return creme_menu.render(context)\n\n\n# TODO: rename template file (menu-buttons.html ? detailview-buttons.html ? menu/buttons.html ?)\[email protected]_tag(\n 'creme_core/templatetags/menu_buttons.html', takes_context=True,\n)\ndef menu_buttons_display(context):\n entity = context['object']\n bmi = ButtonMenuItem.objects.filter(\n Q(content_type=entity.entity_type)\n | Q(content_type__isnull=True)\n ).exclude(\n button_id='',\n ).order_by(\n 'order',\n ).values_list(\n 'button_id', flat=True,\n )\n\n button_ctxt = context.flatten()\n # TODO: pass the registry in the context ?\n buttons = OrderedDict()\n\n for button in button_menu.button_registry.get_buttons(bmi, entity):\n buttons[button.id_] = button.render(button_ctxt)\n\n context['buttons'] = [*buttons.values()]\n\n return context\n", "path": "creme/creme_core/templatetags/creme_menu.py"}, {"content": "# -*- coding: utf-8 -*-\n\n################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2009-2020 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n################################################################################\n\nimport logging\nfrom typing import Dict, Iterable, Iterator, Optional, Sequence, Tuple, Type\n\nfrom django.template.loader import get_template\n\nfrom ..models import CremeEntity\n\nlogger = logging.getLogger(__name__)\n\n\nclass Button:\n # ID of the button, stored in DB (ie: the button configuration), to retrieve\n # the right button class (so it must be unique)\n # Override it in child class with a value generated by 'generate_id()'.\n # id_ = None\n id_: str = ''\n\n # Label used in the configuration GUI to display the button (see models.ButtonMenuItem)\n # Tips: use gettext_lazy()\n verbose_name: str = 'BUTTON'\n\n # Name/path of the template used to render the button.\n template_name: str = 'creme_core/buttons/place-holder.html'\n\n # Permission string ; None means not permission needed.\n # eg :'myapp.add_mymodel'\n # BEWARE: you have to use the template context variable 'has_perm' yourself !!\n permission: Optional[str] = None # TODO: <permission: str = ''> ??\n\n @staticmethod\n def generate_id(app_name: str, name: str) -> str:\n return f'button_{app_name}-{name}'\n\n def get_ctypes(self) -> Sequence[Type[CremeEntity]]:\n \"\"\"\n @return A sequence of CremeEntity class that can have this type of button.\n Void sequence means that all types are ok.\n eg: (Contact, Organisation)\n \"\"\"\n return ()\n\n def has_perm(self, context) -> bool:\n permission = self.permission\n # return context['request'].user.has_perm(permission) if permission else True\n return context['user'].has_perm(permission) if permission else True\n\n def ok_4_display(self, entity: CremeEntity) -> bool:\n \"\"\"Can this button be displayed on this entity's detail-view ?\n @param entity: CremeEntity which detail-view is displayed.\n @return True if the button can be displayed for 'entity'.\n \"\"\"\n return True\n\n def render(self, context) -> str:\n context['has_perm'] = self.has_perm(context)\n\n return get_template(self.template_name).render(context)\n\n\nclass ButtonsRegistry:\n class RegistrationError(Exception):\n pass\n\n def __init__(self):\n self._button_classes: Dict[str, Type[Button]] = {}\n\n def register(self, *button_classes: Type[Button]) -> None:\n \"\"\"\n @type button_classes: creme_core.gui.menu_buttons.Button child classes.\n \"\"\"\n setdefault = self._button_classes.setdefault\n\n for button_cls in button_classes:\n button_id = button_cls.id_\n\n if not button_id:\n raise self.RegistrationError(\n f'Button class with empty id_: {button_cls}'\n )\n\n if setdefault(button_id, button_cls) is not button_cls:\n raise self.RegistrationError(\n f\"Duplicated button's ID (or button registered twice) : {button_id}\"\n )\n\n def get_button(self, button_id: str) -> Optional[Button]:\n cls = self._button_classes.get(button_id)\n\n return cls() if cls else None\n\n def get_buttons(self, id_list: Iterable[str], entity: CremeEntity) -> Iterator[Button]:\n \"\"\"Generate the Buttons to be displayed on the detail-view of an entity.\n Deprecated buttons & buttons that should not be displayed for this entity\n are ignored.\n @param id_list: Sequence of button IDs.\n @param entity: CremeEntity instance.\n @yield creme_core.gui.button_menu.Button instances.\n \"\"\"\n button_classes = self._button_classes\n\n for button_id in id_list:\n button_cls = button_classes.get(button_id)\n\n if button_cls is None:\n logger.warning('Button seems deprecated: %s', button_id)\n else:\n button = button_cls()\n\n if button.ok_4_display(entity):\n yield button\n\n def __iter__(self) -> Iterator[Tuple[str, Button]]:\n for b_id, b_cls in self._button_classes.items():\n yield b_id, b_cls()\n\n\nbutton_registry = ButtonsRegistry()\n", "path": "creme/creme_core/gui/button_menu.py"}]} | 2,331 | 671 |
gh_patches_debug_29419 | rasdani/github-patches | git_diff | sublimelsp__LSP-2232 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Settings for editing inlay hints
**Is your feature request related to a problem? Please describe.**
I wish to style the inlay hints that LSP provides, however, LSP only allows for very limited styling, basically background color, font type, font color, but nothing else. There are these ugly round borders on the little hint that cannot be removed without editing inlay_hint.py directly to change the enforced CSS.
**Describe the solution you'd like**
The ability to either change all CSS values of the inlay hint though variables, or the ability to outright overwrite the css that's currently there from a settings file.
**Describe alternatives you've considered**
Editing LSP with PackageResourceViewer by hand each time there is an update.
**Additional context**
Here is my cute little CSS improvement for my own enjoyment, no more distracting button-like border :<)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/css.py`
Content:
```
1 import sublime
2 from .typing import Optional
3
4
5 class CSS:
6 def __init__(self) -> None:
7 self.popups = sublime.load_resource("Packages/LSP/popups.css")
8 self.popups_classname = "lsp_popup"
9 self.notification = sublime.load_resource("Packages/LSP/notification.css")
10 self.notification_classname = "notification"
11 self.sheets = sublime.load_resource("Packages/LSP/sheets.css")
12 self.sheets_classname = "lsp_sheet"
13
14
15 _css = None # type: Optional[CSS]
16
17
18 def load() -> None:
19 global _css
20 _css = CSS()
21
22
23 def css() -> CSS:
24 global _css
25 assert _css is not None
26 return _css
27
```
Path: `plugin/inlay_hint.py`
Content:
```
1 from .core.protocol import InlayHint
2 from .core.protocol import InlayHintLabelPart
3 from .core.protocol import MarkupContent
4 from .core.protocol import Point
5 from .core.protocol import Request
6 from .core.registry import LspTextCommand
7 from .core.registry import LspWindowCommand
8 from .core.sessions import Session
9 from .core.settings import userprefs
10 from .core.typing import cast, Optional, Union
11 from .core.views import point_to_offset
12 from .formatting import apply_text_edits_to_view
13 import html
14 import sublime
15 import uuid
16
17
18 class LspToggleInlayHintsCommand(LspWindowCommand):
19 capability = 'inlayHintProvider'
20
21 def run(self, enable: Optional[bool] = None) -> None:
22 if not isinstance(enable, bool):
23 enable = not self.are_enabled(self.window)
24 self.window.settings().set('lsp_show_inlay_hints', enable)
25 status = 'on' if enable else 'off'
26 sublime.status_message('Inlay Hints are {}'.format(status))
27 for session in self.sessions():
28 for sv in session.session_views_async():
29 sv.session_buffer.do_inlay_hints_async(sv.view)
30
31 def is_checked(self) -> bool:
32 return self.are_enabled(self.window)
33
34 @classmethod
35 def are_enabled(cls, window: Optional[sublime.Window]) -> bool:
36 if not window:
37 return userprefs().show_inlay_hints
38 return bool(window.settings().get('lsp_show_inlay_hints', userprefs().show_inlay_hints))
39
40
41 class LspInlayHintClickCommand(LspTextCommand):
42 capability = 'inlayHintProvider'
43
44 def run(self, _edit: sublime.Edit, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,
45 event: Optional[dict] = None, label_part: Optional[InlayHintLabelPart] = None) -> None:
46 # Insert textEdits for the given inlay hint.
47 # If a InlayHintLabelPart was clicked, label_part will be passed as an argument to the LspInlayHintClickCommand
48 # and InlayHintLabelPart.command will be executed.
49 session = self.session_by_name(session_name, 'inlayHintProvider')
50 if session and session.has_capability('inlayHintProvider.resolveProvider'):
51 request = Request.resolveInlayHint(inlay_hint, self.view)
52 session.send_request_async(
53 request,
54 lambda response: self.handle(session_name, response, phantom_uuid, label_part))
55 return
56 self.handle(session_name, inlay_hint, phantom_uuid, label_part)
57
58 def handle(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,
59 label_part: Optional[InlayHintLabelPart] = None) -> None:
60 self.handle_inlay_hint_text_edits(session_name, inlay_hint, phantom_uuid)
61 self.handle_label_part_command(session_name, label_part)
62
63 def handle_inlay_hint_text_edits(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str) -> None:
64 session = self.session_by_name(session_name, 'inlayHintProvider')
65 if not session:
66 return
67 text_edits = inlay_hint.get('textEdits')
68 if not text_edits:
69 return
70 for sb in session.session_buffers_async():
71 sb.remove_inlay_hint_phantom(phantom_uuid)
72 apply_text_edits_to_view(text_edits, self.view)
73
74 def handle_label_part_command(self, session_name: str, label_part: Optional[InlayHintLabelPart] = None) -> None:
75 if not label_part:
76 return
77 command = label_part.get('command')
78 if not command:
79 return
80 args = {
81 "session_name": session_name,
82 "command_name": command["command"],
83 "command_args": command.get("arguments")
84 }
85 self.view.run_command("lsp_execute", args)
86
87
88 def inlay_hint_to_phantom(view: sublime.View, inlay_hint: InlayHint, session: Session) -> sublime.Phantom:
89 position = inlay_hint["position"]
90 region = sublime.Region(point_to_offset(Point.from_lsp(position), view))
91 phantom_uuid = str(uuid.uuid4())
92 content = get_inlay_hint_html(view, inlay_hint, session, phantom_uuid)
93 p = sublime.Phantom(region, content, sublime.LAYOUT_INLINE)
94 setattr(p, 'lsp_uuid', phantom_uuid)
95 return p
96
97
98 def get_inlay_hint_html(view: sublime.View, inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:
99 label = format_inlay_hint_label(inlay_hint, session, phantom_uuid)
100 font = view.settings().get('font_face') or "monospace"
101 html = """
102 <body id="lsp-inlay-hint">
103 <style>
104 .inlay-hint {{
105 color: color(var(--foreground) alpha(0.6));
106 background-color: color(var(--foreground) alpha(0.08));
107 border-radius: 4px;
108 padding: 0.05em 4px;
109 font-size: 0.9em;
110 font-family: {font};
111 }}
112
113 .inlay-hint a {{
114 color: color(var(--foreground) alpha(0.6));
115 text-decoration: none;
116 }}
117 </style>
118 <div class="inlay-hint">
119 {label}
120 </div>
121 </body>
122 """.format(
123 font=font,
124 label=label
125 )
126 return html
127
128
129 def format_inlay_hint_tooltip(tooltip: Optional[Union[str, MarkupContent]]) -> str:
130 if isinstance(tooltip, str):
131 return html.escape(tooltip)
132 if isinstance(tooltip, dict): # MarkupContent
133 return html.escape(tooltip.get('value') or "")
134 return ""
135
136
137 def format_inlay_hint_label(inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:
138 tooltip = format_inlay_hint_tooltip(inlay_hint.get("tooltip"))
139 result = ""
140 can_resolve_inlay_hint = session.has_capability('inlayHintProvider.resolveProvider')
141 label = inlay_hint['label']
142 has_text_edits = bool(inlay_hint.get('textEdits'))
143 is_clickable = has_text_edits or can_resolve_inlay_hint
144 if isinstance(label, str):
145 if is_clickable:
146 inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {
147 'command': 'lsp_inlay_hint_click',
148 'args': {
149 'session_name': session.config.name,
150 'inlay_hint': cast(dict, inlay_hint),
151 'phantom_uuid': phantom_uuid
152 }
153 })
154 result += '<a href="{command}">'.format(command=inlay_hint_click_command)
155 instruction_text = '\nDouble-click to insert' if has_text_edits else ""
156 result += '<span title="{tooltip}">{value}</span>'.format(
157 tooltip=(tooltip + instruction_text).strip(),
158 value=html.escape(label)
159 )
160 if is_clickable:
161 result += "</a>"
162 return result
163
164 for label_part in label:
165 value = ""
166 tooltip = format_inlay_hint_tooltip(label_part.get("tooltip"))
167 has_command = bool(label_part.get('command'))
168 if has_command:
169 inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {
170 'command': 'lsp_inlay_hint_click',
171 'args': {
172 'session_name': session.config.name,
173 'inlay_hint': cast(dict, inlay_hint),
174 'phantom_uuid': phantom_uuid,
175 'label_part': cast(dict, label_part)
176 }
177 })
178 value += '<a href="{command}">'.format(command=inlay_hint_click_command)
179 value += html.escape(label_part['value'])
180 if has_command:
181 value += "</a>"
182 # InlayHintLabelPart.location is not supported
183 instruction_text = '\nDouble-click to execute' if has_command else ""
184 result += "<span title=\"{tooltip}\">{value}</span>".format(
185 tooltip=(tooltip + instruction_text).strip(),
186 value=value
187 )
188 return result
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/css.py b/plugin/core/css.py
--- a/plugin/core/css.py
+++ b/plugin/core/css.py
@@ -10,6 +10,7 @@
self.notification_classname = "notification"
self.sheets = sublime.load_resource("Packages/LSP/sheets.css")
self.sheets_classname = "lsp_sheet"
+ self.inlay_hints = sublime.load_resource("Packages/LSP/inlay_hints.css")
_css = None # type: Optional[CSS]
diff --git a/plugin/inlay_hint.py b/plugin/inlay_hint.py
--- a/plugin/inlay_hint.py
+++ b/plugin/inlay_hint.py
@@ -1,3 +1,4 @@
+from .core.css import css
from .core.protocol import InlayHint
from .core.protocol import InlayHintLabelPart
from .core.protocol import MarkupContent
@@ -102,18 +103,9 @@
<body id="lsp-inlay-hint">
<style>
.inlay-hint {{
- color: color(var(--foreground) alpha(0.6));
- background-color: color(var(--foreground) alpha(0.08));
- border-radius: 4px;
- padding: 0.05em 4px;
- font-size: 0.9em;
font-family: {font};
}}
-
- .inlay-hint a {{
- color: color(var(--foreground) alpha(0.6));
- text-decoration: none;
- }}
+ {css}
</style>
<div class="inlay-hint">
{label}
@@ -121,6 +113,7 @@
</body>
""".format(
font=font,
+ css=css().inlay_hints,
label=label
)
return html
| {"golden_diff": "diff --git a/plugin/core/css.py b/plugin/core/css.py\n--- a/plugin/core/css.py\n+++ b/plugin/core/css.py\n@@ -10,6 +10,7 @@\n self.notification_classname = \"notification\"\n self.sheets = sublime.load_resource(\"Packages/LSP/sheets.css\")\n self.sheets_classname = \"lsp_sheet\"\n+ self.inlay_hints = sublime.load_resource(\"Packages/LSP/inlay_hints.css\")\n \n \n _css = None # type: Optional[CSS]\ndiff --git a/plugin/inlay_hint.py b/plugin/inlay_hint.py\n--- a/plugin/inlay_hint.py\n+++ b/plugin/inlay_hint.py\n@@ -1,3 +1,4 @@\n+from .core.css import css\n from .core.protocol import InlayHint\n from .core.protocol import InlayHintLabelPart\n from .core.protocol import MarkupContent\n@@ -102,18 +103,9 @@\n <body id=\"lsp-inlay-hint\">\n <style>\n .inlay-hint {{\n- color: color(var(--foreground) alpha(0.6));\n- background-color: color(var(--foreground) alpha(0.08));\n- border-radius: 4px;\n- padding: 0.05em 4px;\n- font-size: 0.9em;\n font-family: {font};\n }}\n-\n- .inlay-hint a {{\n- color: color(var(--foreground) alpha(0.6));\n- text-decoration: none;\n- }}\n+ {css}\n </style>\n <div class=\"inlay-hint\">\n {label}\n@@ -121,6 +113,7 @@\n </body>\n \"\"\".format(\n font=font,\n+ css=css().inlay_hints,\n label=label\n )\n return html\n", "issue": "Settings for editing inlay hints\n**Is your feature request related to a problem? Please describe.**\r\nI wish to style the inlay hints that LSP provides, however, LSP only allows for very limited styling, basically background color, font type, font color, but nothing else. There are these ugly round borders on the little hint that cannot be removed without editing inlay_hint.py directly to change the enforced CSS.\r\n\r\n**Describe the solution you'd like**\r\nThe ability to either change all CSS values of the inlay hint though variables, or the ability to outright overwrite the css that's currently there from a settings file.\r\n\r\n**Describe alternatives you've considered**\r\nEditing LSP with PackageResourceViewer by hand each time there is an update.\r\n\r\n**Additional context**\r\nHere is my cute little CSS improvement for my own enjoyment, no more distracting button-like border :<)\r\n\r\n\r\n\n", "before_files": [{"content": "import sublime\nfrom .typing import Optional\n\n\nclass CSS:\n def __init__(self) -> None:\n self.popups = sublime.load_resource(\"Packages/LSP/popups.css\")\n self.popups_classname = \"lsp_popup\"\n self.notification = sublime.load_resource(\"Packages/LSP/notification.css\")\n self.notification_classname = \"notification\"\n self.sheets = sublime.load_resource(\"Packages/LSP/sheets.css\")\n self.sheets_classname = \"lsp_sheet\"\n\n\n_css = None # type: Optional[CSS]\n\n\ndef load() -> None:\n global _css\n _css = CSS()\n\n\ndef css() -> CSS:\n global _css\n assert _css is not None\n return _css\n", "path": "plugin/core/css.py"}, {"content": "from .core.protocol import InlayHint\nfrom .core.protocol import InlayHintLabelPart\nfrom .core.protocol import MarkupContent\nfrom .core.protocol import Point\nfrom .core.protocol import Request\nfrom .core.registry import LspTextCommand\nfrom .core.registry import LspWindowCommand\nfrom .core.sessions import Session\nfrom .core.settings import userprefs\nfrom .core.typing import cast, Optional, Union\nfrom .core.views import point_to_offset\nfrom .formatting import apply_text_edits_to_view\nimport html\nimport sublime\nimport uuid\n\n\nclass LspToggleInlayHintsCommand(LspWindowCommand):\n capability = 'inlayHintProvider'\n\n def run(self, enable: Optional[bool] = None) -> None:\n if not isinstance(enable, bool):\n enable = not self.are_enabled(self.window)\n self.window.settings().set('lsp_show_inlay_hints', enable)\n status = 'on' if enable else 'off'\n sublime.status_message('Inlay Hints are {}'.format(status))\n for session in self.sessions():\n for sv in session.session_views_async():\n sv.session_buffer.do_inlay_hints_async(sv.view)\n\n def is_checked(self) -> bool:\n return self.are_enabled(self.window)\n\n @classmethod\n def are_enabled(cls, window: Optional[sublime.Window]) -> bool:\n if not window:\n return userprefs().show_inlay_hints\n return bool(window.settings().get('lsp_show_inlay_hints', userprefs().show_inlay_hints))\n\n\nclass LspInlayHintClickCommand(LspTextCommand):\n capability = 'inlayHintProvider'\n\n def run(self, _edit: sublime.Edit, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,\n event: Optional[dict] = None, label_part: Optional[InlayHintLabelPart] = None) -> None:\n # Insert textEdits for the given inlay hint.\n # If a InlayHintLabelPart was clicked, label_part will be passed as an argument to the LspInlayHintClickCommand\n # and InlayHintLabelPart.command will be executed.\n session = self.session_by_name(session_name, 'inlayHintProvider')\n if session and session.has_capability('inlayHintProvider.resolveProvider'):\n request = Request.resolveInlayHint(inlay_hint, self.view)\n session.send_request_async(\n request,\n lambda response: self.handle(session_name, response, phantom_uuid, label_part))\n return\n self.handle(session_name, inlay_hint, phantom_uuid, label_part)\n\n def handle(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,\n label_part: Optional[InlayHintLabelPart] = None) -> None:\n self.handle_inlay_hint_text_edits(session_name, inlay_hint, phantom_uuid)\n self.handle_label_part_command(session_name, label_part)\n\n def handle_inlay_hint_text_edits(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str) -> None:\n session = self.session_by_name(session_name, 'inlayHintProvider')\n if not session:\n return\n text_edits = inlay_hint.get('textEdits')\n if not text_edits:\n return\n for sb in session.session_buffers_async():\n sb.remove_inlay_hint_phantom(phantom_uuid)\n apply_text_edits_to_view(text_edits, self.view)\n\n def handle_label_part_command(self, session_name: str, label_part: Optional[InlayHintLabelPart] = None) -> None:\n if not label_part:\n return\n command = label_part.get('command')\n if not command:\n return\n args = {\n \"session_name\": session_name,\n \"command_name\": command[\"command\"],\n \"command_args\": command.get(\"arguments\")\n }\n self.view.run_command(\"lsp_execute\", args)\n\n\ndef inlay_hint_to_phantom(view: sublime.View, inlay_hint: InlayHint, session: Session) -> sublime.Phantom:\n position = inlay_hint[\"position\"]\n region = sublime.Region(point_to_offset(Point.from_lsp(position), view))\n phantom_uuid = str(uuid.uuid4())\n content = get_inlay_hint_html(view, inlay_hint, session, phantom_uuid)\n p = sublime.Phantom(region, content, sublime.LAYOUT_INLINE)\n setattr(p, 'lsp_uuid', phantom_uuid)\n return p\n\n\ndef get_inlay_hint_html(view: sublime.View, inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:\n label = format_inlay_hint_label(inlay_hint, session, phantom_uuid)\n font = view.settings().get('font_face') or \"monospace\"\n html = \"\"\"\n <body id=\"lsp-inlay-hint\">\n <style>\n .inlay-hint {{\n color: color(var(--foreground) alpha(0.6));\n background-color: color(var(--foreground) alpha(0.08));\n border-radius: 4px;\n padding: 0.05em 4px;\n font-size: 0.9em;\n font-family: {font};\n }}\n\n .inlay-hint a {{\n color: color(var(--foreground) alpha(0.6));\n text-decoration: none;\n }}\n </style>\n <div class=\"inlay-hint\">\n {label}\n </div>\n </body>\n \"\"\".format(\n font=font,\n label=label\n )\n return html\n\n\ndef format_inlay_hint_tooltip(tooltip: Optional[Union[str, MarkupContent]]) -> str:\n if isinstance(tooltip, str):\n return html.escape(tooltip)\n if isinstance(tooltip, dict): # MarkupContent\n return html.escape(tooltip.get('value') or \"\")\n return \"\"\n\n\ndef format_inlay_hint_label(inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:\n tooltip = format_inlay_hint_tooltip(inlay_hint.get(\"tooltip\"))\n result = \"\"\n can_resolve_inlay_hint = session.has_capability('inlayHintProvider.resolveProvider')\n label = inlay_hint['label']\n has_text_edits = bool(inlay_hint.get('textEdits'))\n is_clickable = has_text_edits or can_resolve_inlay_hint\n if isinstance(label, str):\n if is_clickable:\n inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {\n 'command': 'lsp_inlay_hint_click',\n 'args': {\n 'session_name': session.config.name,\n 'inlay_hint': cast(dict, inlay_hint),\n 'phantom_uuid': phantom_uuid\n }\n })\n result += '<a href=\"{command}\">'.format(command=inlay_hint_click_command)\n instruction_text = '\\nDouble-click to insert' if has_text_edits else \"\"\n result += '<span title=\"{tooltip}\">{value}</span>'.format(\n tooltip=(tooltip + instruction_text).strip(),\n value=html.escape(label)\n )\n if is_clickable:\n result += \"</a>\"\n return result\n\n for label_part in label:\n value = \"\"\n tooltip = format_inlay_hint_tooltip(label_part.get(\"tooltip\"))\n has_command = bool(label_part.get('command'))\n if has_command:\n inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {\n 'command': 'lsp_inlay_hint_click',\n 'args': {\n 'session_name': session.config.name,\n 'inlay_hint': cast(dict, inlay_hint),\n 'phantom_uuid': phantom_uuid,\n 'label_part': cast(dict, label_part)\n }\n })\n value += '<a href=\"{command}\">'.format(command=inlay_hint_click_command)\n value += html.escape(label_part['value'])\n if has_command:\n value += \"</a>\"\n # InlayHintLabelPart.location is not supported\n instruction_text = '\\nDouble-click to execute' if has_command else \"\"\n result += \"<span title=\\\"{tooltip}\\\">{value}</span>\".format(\n tooltip=(tooltip + instruction_text).strip(),\n value=value\n )\n return result\n", "path": "plugin/inlay_hint.py"}], "after_files": [{"content": "import sublime\nfrom .typing import Optional\n\n\nclass CSS:\n def __init__(self) -> None:\n self.popups = sublime.load_resource(\"Packages/LSP/popups.css\")\n self.popups_classname = \"lsp_popup\"\n self.notification = sublime.load_resource(\"Packages/LSP/notification.css\")\n self.notification_classname = \"notification\"\n self.sheets = sublime.load_resource(\"Packages/LSP/sheets.css\")\n self.sheets_classname = \"lsp_sheet\"\n self.inlay_hints = sublime.load_resource(\"Packages/LSP/inlay_hints.css\")\n\n\n_css = None # type: Optional[CSS]\n\n\ndef load() -> None:\n global _css\n _css = CSS()\n\n\ndef css() -> CSS:\n global _css\n assert _css is not None\n return _css\n", "path": "plugin/core/css.py"}, {"content": "from .core.css import css\nfrom .core.protocol import InlayHint\nfrom .core.protocol import InlayHintLabelPart\nfrom .core.protocol import MarkupContent\nfrom .core.protocol import Point\nfrom .core.protocol import Request\nfrom .core.registry import LspTextCommand\nfrom .core.registry import LspWindowCommand\nfrom .core.sessions import Session\nfrom .core.settings import userprefs\nfrom .core.typing import cast, Optional, Union\nfrom .core.views import point_to_offset\nfrom .formatting import apply_text_edits_to_view\nimport html\nimport sublime\nimport uuid\n\n\nclass LspToggleInlayHintsCommand(LspWindowCommand):\n capability = 'inlayHintProvider'\n\n def run(self, enable: Optional[bool] = None) -> None:\n if not isinstance(enable, bool):\n enable = not self.are_enabled(self.window)\n self.window.settings().set('lsp_show_inlay_hints', enable)\n status = 'on' if enable else 'off'\n sublime.status_message('Inlay Hints are {}'.format(status))\n for session in self.sessions():\n for sv in session.session_views_async():\n sv.session_buffer.do_inlay_hints_async(sv.view)\n\n def is_checked(self) -> bool:\n return self.are_enabled(self.window)\n\n @classmethod\n def are_enabled(cls, window: Optional[sublime.Window]) -> bool:\n if not window:\n return userprefs().show_inlay_hints\n return bool(window.settings().get('lsp_show_inlay_hints', userprefs().show_inlay_hints))\n\n\nclass LspInlayHintClickCommand(LspTextCommand):\n capability = 'inlayHintProvider'\n\n def run(self, _edit: sublime.Edit, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,\n event: Optional[dict] = None, label_part: Optional[InlayHintLabelPart] = None) -> None:\n # Insert textEdits for the given inlay hint.\n # If a InlayHintLabelPart was clicked, label_part will be passed as an argument to the LspInlayHintClickCommand\n # and InlayHintLabelPart.command will be executed.\n session = self.session_by_name(session_name, 'inlayHintProvider')\n if session and session.has_capability('inlayHintProvider.resolveProvider'):\n request = Request.resolveInlayHint(inlay_hint, self.view)\n session.send_request_async(\n request,\n lambda response: self.handle(session_name, response, phantom_uuid, label_part))\n return\n self.handle(session_name, inlay_hint, phantom_uuid, label_part)\n\n def handle(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str,\n label_part: Optional[InlayHintLabelPart] = None) -> None:\n self.handle_inlay_hint_text_edits(session_name, inlay_hint, phantom_uuid)\n self.handle_label_part_command(session_name, label_part)\n\n def handle_inlay_hint_text_edits(self, session_name: str, inlay_hint: InlayHint, phantom_uuid: str) -> None:\n session = self.session_by_name(session_name, 'inlayHintProvider')\n if not session:\n return\n text_edits = inlay_hint.get('textEdits')\n if not text_edits:\n return\n for sb in session.session_buffers_async():\n sb.remove_inlay_hint_phantom(phantom_uuid)\n apply_text_edits_to_view(text_edits, self.view)\n\n def handle_label_part_command(self, session_name: str, label_part: Optional[InlayHintLabelPart] = None) -> None:\n if not label_part:\n return\n command = label_part.get('command')\n if not command:\n return\n args = {\n \"session_name\": session_name,\n \"command_name\": command[\"command\"],\n \"command_args\": command.get(\"arguments\")\n }\n self.view.run_command(\"lsp_execute\", args)\n\n\ndef inlay_hint_to_phantom(view: sublime.View, inlay_hint: InlayHint, session: Session) -> sublime.Phantom:\n position = inlay_hint[\"position\"]\n region = sublime.Region(point_to_offset(Point.from_lsp(position), view))\n phantom_uuid = str(uuid.uuid4())\n content = get_inlay_hint_html(view, inlay_hint, session, phantom_uuid)\n p = sublime.Phantom(region, content, sublime.LAYOUT_INLINE)\n setattr(p, 'lsp_uuid', phantom_uuid)\n return p\n\n\ndef get_inlay_hint_html(view: sublime.View, inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:\n label = format_inlay_hint_label(inlay_hint, session, phantom_uuid)\n font = view.settings().get('font_face') or \"monospace\"\n html = \"\"\"\n <body id=\"lsp-inlay-hint\">\n <style>\n .inlay-hint {{\n font-family: {font};\n }}\n {css}\n </style>\n <div class=\"inlay-hint\">\n {label}\n </div>\n </body>\n \"\"\".format(\n font=font,\n css=css().inlay_hints,\n label=label\n )\n return html\n\n\ndef format_inlay_hint_tooltip(tooltip: Optional[Union[str, MarkupContent]]) -> str:\n if isinstance(tooltip, str):\n return html.escape(tooltip)\n if isinstance(tooltip, dict): # MarkupContent\n return html.escape(tooltip.get('value') or \"\")\n return \"\"\n\n\ndef format_inlay_hint_label(inlay_hint: InlayHint, session: Session, phantom_uuid: str) -> str:\n tooltip = format_inlay_hint_tooltip(inlay_hint.get(\"tooltip\"))\n result = \"\"\n can_resolve_inlay_hint = session.has_capability('inlayHintProvider.resolveProvider')\n label = inlay_hint['label']\n has_text_edits = bool(inlay_hint.get('textEdits'))\n is_clickable = has_text_edits or can_resolve_inlay_hint\n if isinstance(label, str):\n if is_clickable:\n inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {\n 'command': 'lsp_inlay_hint_click',\n 'args': {\n 'session_name': session.config.name,\n 'inlay_hint': cast(dict, inlay_hint),\n 'phantom_uuid': phantom_uuid\n }\n })\n result += '<a href=\"{command}\">'.format(command=inlay_hint_click_command)\n instruction_text = '\\nDouble-click to insert' if has_text_edits else \"\"\n result += '<span title=\"{tooltip}\">{value}</span>'.format(\n tooltip=(tooltip + instruction_text).strip(),\n value=html.escape(label)\n )\n if is_clickable:\n result += \"</a>\"\n return result\n\n for label_part in label:\n value = \"\"\n tooltip = format_inlay_hint_tooltip(label_part.get(\"tooltip\"))\n has_command = bool(label_part.get('command'))\n if has_command:\n inlay_hint_click_command = sublime.command_url('lsp_on_double_click', {\n 'command': 'lsp_inlay_hint_click',\n 'args': {\n 'session_name': session.config.name,\n 'inlay_hint': cast(dict, inlay_hint),\n 'phantom_uuid': phantom_uuid,\n 'label_part': cast(dict, label_part)\n }\n })\n value += '<a href=\"{command}\">'.format(command=inlay_hint_click_command)\n value += html.escape(label_part['value'])\n if has_command:\n value += \"</a>\"\n # InlayHintLabelPart.location is not supported\n instruction_text = '\\nDouble-click to execute' if has_command else \"\"\n result += \"<span title=\\\"{tooltip}\\\">{value}</span>\".format(\n tooltip=(tooltip + instruction_text).strip(),\n value=value\n )\n return result\n", "path": "plugin/inlay_hint.py"}]} | 2,955 | 408 |
gh_patches_debug_25097 | rasdani/github-patches | git_diff | python-gitlab__python-gitlab-1628 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'project-runner get' command should not exist
## Description of the problem, including code/CLI snippet
The library allows to query `/projects/:projectId/runners/:id` URL through `gitlab.v4.objects.ProjectRunnerManager` class, but the official Gitlab API does not describe this endpoint, which does not exists.
Project's runners list :
```bash
$ gitlab -d -o json project-runner list --all --project-id 76644
DEBUG:urllib3.connectionpool:https://gitlab.company.com:443 "GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1" 200 None
send: b'GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1\r\nHost: gitlab.company.com\r\nUser-Agent: python-gitlab/2.10.1\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nPRIVATE-TOKEN: [hidden content]\r\nContent-type: application/json\r\n\r\n'
reply: 'HTTP/1.1 200 OK\r\n'
header: Server: nginx
header: Date: Wed, 22 Sep 2021 18:08:59 GMT
header: Content-Type: application/json
header: Transfer-Encoding: chunked
header: Connection: keep-alive
header: Vary: Accept-Encoding
header: Cache-Control: max-age=0, private, must-revalidate
header: Etag: W/"1ae5eff0bd18cd69fc1aa3325e259422"
header: Link: [hidden content]
header: Vary: Origin
header: X-Content-Type-Options: nosniff
header: X-Frame-Options: SAMEORIGIN
header: X-Gitlab-Feature-Category: continuous_integration
header: X-Next-Page:
header: X-Page: 1
header: X-Per-Page: 20
header: X-Prev-Page:
header: X-Request-Id: 01FG7B4MD17BPMDZXG2XHFFK4Q
header: X-Runtime: 0.072099
header: X-Total: 2
header: X-Total-Pages: 1
header: Strict-Transport-Security: max-age=31536000
header: Referrer-Policy: strict-origin-when-cross-origin
header: Content-Encoding: gzip
[{"id": 7723, "description": "<description>", "ip_address": "<ip address>", "active": true, "is_shared": false, "name": "gitlab-runner", "online": false, "status": "offline"}, {"id": 11737, "description": "shared-gitlab-runner-77d49f579-zpr98", "ip_address": "<ip address>", "active": true, "is_shared": true, "name": "gitlab-runner", "online": true, "status": "online"}]
```
Invalid query :
```bash
$ gitlab -d -o json project-runner get --project-id 76644 --id 7723
DEBUG:urllib3.connectionpool:https://gitlab.company.com:443 "GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1" 404 25
Impossible to get object (404: 404 Not Found)
send: b'GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1\r\nHost: gitlab.company.com\r\nUser-Agent: python-gitlab/2.10.1\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nPRIVATE-TOKEN: [hidden content]\r\nContent-type: application/json\r\n\r\n'
reply: 'HTTP/1.1 404 Not Found\r\n'
header: Server: nginx
header: Date: Wed, 22 Sep 2021 18:11:44 GMT
header: Content-Type: application/json
header: Content-Length: 25
header: Connection: keep-alive
header: Cache-Control: no-cache
header: Vary: Origin
header: X-Content-Type-Options: nosniff
header: X-Frame-Options: SAMEORIGIN
header: X-Gitlab-Feature-Category: not_owned
header: X-Request-Id: 01FG7B9MZ0K19YPZ4MQJ390PK1
header: X-Runtime: 0.019269
```
Valid query (which already exists) : GET /runner/7723
```bash
$ gitlab -d -o json runner get --id 7723
DEBUG:urllib3.connectionpool:https://gitlab.company.com:443 "GET /api/v4/runners/7723 HTTP/1.1" 200 None
send: b'GET /api/v4/runners/7723 HTTP/1.1\r\nHost: gitlab.company.com\r\nUser-Agent: python-gitlab/2.10.1\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nPRIVATE-TOKEN: [hidden content]\r\nContent-type: application/json\r\n\r\n'
reply: 'HTTP/1.1 200 OK\r\n'
header: Server: nginx
header: Date: Wed, 22 Sep 2021 18:11:34 GMT
header: Content-Type: application/json
header: Transfer-Encoding: chunked
header: Connection: keep-alive
header: Vary: Accept-Encoding
header: Cache-Control: max-age=0, private, must-revalidate
header: Etag: W/"7506c48834a201e1d86531647d32b044"
header: Vary: Origin
header: X-Content-Type-Options: nosniff
header: X-Frame-Options: SAMEORIGIN
header: X-Gitlab-Feature-Category: continuous_integration
header: X-Request-Id: 01FG7B9B43ME9BMFFZTFM8A20H
header: X-Runtime: 0.124597
header: Strict-Transport-Security: max-age=31536000
header: Referrer-Policy: strict-origin-when-cross-origin
header: Content-Encoding: gzip
{"id": 7723, "description": "<description>", "ip_address": "<id address>", "active": true, "is_shared": false, "name": "gitlab-runner", "online": false, "status": "offline", "tag_list": [], "run_untagged": true, "locked": false, "maximum_timeout": null, "access_level": "not_protected", "version": "13.11.0", "revision": "7f7a4bb0", "platform": "linux", "architecture": "amd64", "contacted_at": "2021-04-23T13:03:09.789Z", "projects": [projects list], "groups": []}
```
If I correctly understand the library, I think that `ProjectRunnerManager` class should not inherits `GetMixin` class (which comes through `NoUpdateMixin`), but only `ListMixin`, `CreateMixin` and `DeleteMixin` classes (and `RESTManager` class, of course).
The unit tests do not cover the `project-runner get` command (this could have revelead the bug earlier).
Should I open a PR to fix this issue ?
## Expected Behavior
The `gitlab projet-runner get` command should not exist.
## Actual Behavior
The `gitlab projet-runner get` command exists.
## Specifications
- python-gitlab version: 2.10.1
- API version you are using (v3/v4): v4
- Gitlab server version (or gitlab.com): 13.12.1-ee
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gitlab/v4/objects/runners.py`
Content:
```
1 from gitlab import cli
2 from gitlab import exceptions as exc
3 from gitlab import types
4 from gitlab.base import RequiredOptional, RESTManager, RESTObject
5 from gitlab.mixins import (
6 CRUDMixin,
7 ListMixin,
8 NoUpdateMixin,
9 ObjectDeleteMixin,
10 SaveMixin,
11 )
12
13 __all__ = [
14 "RunnerJob",
15 "RunnerJobManager",
16 "Runner",
17 "RunnerManager",
18 "GroupRunner",
19 "GroupRunnerManager",
20 "ProjectRunner",
21 "ProjectRunnerManager",
22 ]
23
24
25 class RunnerJob(RESTObject):
26 pass
27
28
29 class RunnerJobManager(ListMixin, RESTManager):
30 _path = "/runners/%(runner_id)s/jobs"
31 _obj_cls = RunnerJob
32 _from_parent_attrs = {"runner_id": "id"}
33 _list_filters = ("status",)
34
35
36 class Runner(SaveMixin, ObjectDeleteMixin, RESTObject):
37 jobs: RunnerJobManager
38
39
40 class RunnerManager(CRUDMixin, RESTManager):
41 _path = "/runners"
42 _obj_cls = Runner
43 _create_attrs = RequiredOptional(
44 required=("token",),
45 optional=(
46 "description",
47 "info",
48 "active",
49 "locked",
50 "run_untagged",
51 "tag_list",
52 "access_level",
53 "maximum_timeout",
54 ),
55 )
56 _update_attrs = RequiredOptional(
57 optional=(
58 "description",
59 "active",
60 "tag_list",
61 "run_untagged",
62 "locked",
63 "access_level",
64 "maximum_timeout",
65 ),
66 )
67 _list_filters = ("scope", "tag_list")
68 _types = {"tag_list": types.ListAttribute}
69
70 @cli.register_custom_action("RunnerManager", tuple(), ("scope",))
71 @exc.on_http_error(exc.GitlabListError)
72 def all(self, scope=None, **kwargs):
73 """List all the runners.
74
75 Args:
76 scope (str): The scope of runners to show, one of: specific,
77 shared, active, paused, online
78 all (bool): If True, return all the items, without pagination
79 per_page (int): Number of items to retrieve per request
80 page (int): ID of the page to return (starts with page 1)
81 as_list (bool): If set to False and no pagination option is
82 defined, return a generator instead of a list
83 **kwargs: Extra options to send to the server (e.g. sudo)
84
85 Raises:
86 GitlabAuthenticationError: If authentication is not correct
87 GitlabListError: If the server failed to perform the request
88
89 Returns:
90 list(Runner): a list of runners matching the scope.
91 """
92 path = "/runners/all"
93 query_data = {}
94 if scope is not None:
95 query_data["scope"] = scope
96 obj = self.gitlab.http_list(path, query_data, **kwargs)
97 return [self._obj_cls(self, item) for item in obj]
98
99 @cli.register_custom_action("RunnerManager", ("token",))
100 @exc.on_http_error(exc.GitlabVerifyError)
101 def verify(self, token, **kwargs):
102 """Validates authentication credentials for a registered Runner.
103
104 Args:
105 token (str): The runner's authentication token
106 **kwargs: Extra options to send to the server (e.g. sudo)
107
108 Raises:
109 GitlabAuthenticationError: If authentication is not correct
110 GitlabVerifyError: If the server failed to verify the token
111 """
112 path = "/runners/verify"
113 post_data = {"token": token}
114 self.gitlab.http_post(path, post_data=post_data, **kwargs)
115
116
117 class GroupRunner(ObjectDeleteMixin, RESTObject):
118 pass
119
120
121 class GroupRunnerManager(NoUpdateMixin, RESTManager):
122 _path = "/groups/%(group_id)s/runners"
123 _obj_cls = GroupRunner
124 _from_parent_attrs = {"group_id": "id"}
125 _create_attrs = RequiredOptional(required=("runner_id",))
126 _list_filters = ("scope", "tag_list")
127 _types = {"tag_list": types.ListAttribute}
128
129
130 class ProjectRunner(ObjectDeleteMixin, RESTObject):
131 pass
132
133
134 class ProjectRunnerManager(NoUpdateMixin, RESTManager):
135 _path = "/projects/%(project_id)s/runners"
136 _obj_cls = ProjectRunner
137 _from_parent_attrs = {"project_id": "id"}
138 _create_attrs = RequiredOptional(required=("runner_id",))
139 _list_filters = ("scope", "tag_list")
140 _types = {"tag_list": types.ListAttribute}
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gitlab/v4/objects/runners.py b/gitlab/v4/objects/runners.py
--- a/gitlab/v4/objects/runners.py
+++ b/gitlab/v4/objects/runners.py
@@ -3,9 +3,10 @@
from gitlab import types
from gitlab.base import RequiredOptional, RESTManager, RESTObject
from gitlab.mixins import (
+ CreateMixin,
CRUDMixin,
+ DeleteMixin,
ListMixin,
- NoUpdateMixin,
ObjectDeleteMixin,
SaveMixin,
)
@@ -114,11 +115,11 @@
self.gitlab.http_post(path, post_data=post_data, **kwargs)
-class GroupRunner(ObjectDeleteMixin, RESTObject):
+class GroupRunner(RESTObject):
pass
-class GroupRunnerManager(NoUpdateMixin, RESTManager):
+class GroupRunnerManager(ListMixin, RESTManager):
_path = "/groups/%(group_id)s/runners"
_obj_cls = GroupRunner
_from_parent_attrs = {"group_id": "id"}
@@ -131,7 +132,7 @@
pass
-class ProjectRunnerManager(NoUpdateMixin, RESTManager):
+class ProjectRunnerManager(CreateMixin, DeleteMixin, ListMixin, RESTManager):
_path = "/projects/%(project_id)s/runners"
_obj_cls = ProjectRunner
_from_parent_attrs = {"project_id": "id"}
| {"golden_diff": "diff --git a/gitlab/v4/objects/runners.py b/gitlab/v4/objects/runners.py\n--- a/gitlab/v4/objects/runners.py\n+++ b/gitlab/v4/objects/runners.py\n@@ -3,9 +3,10 @@\n from gitlab import types\n from gitlab.base import RequiredOptional, RESTManager, RESTObject\n from gitlab.mixins import (\n+ CreateMixin,\n CRUDMixin,\n+ DeleteMixin,\n ListMixin,\n- NoUpdateMixin,\n ObjectDeleteMixin,\n SaveMixin,\n )\n@@ -114,11 +115,11 @@\n self.gitlab.http_post(path, post_data=post_data, **kwargs)\n \n \n-class GroupRunner(ObjectDeleteMixin, RESTObject):\n+class GroupRunner(RESTObject):\n pass\n \n \n-class GroupRunnerManager(NoUpdateMixin, RESTManager):\n+class GroupRunnerManager(ListMixin, RESTManager):\n _path = \"/groups/%(group_id)s/runners\"\n _obj_cls = GroupRunner\n _from_parent_attrs = {\"group_id\": \"id\"}\n@@ -131,7 +132,7 @@\n pass\n \n \n-class ProjectRunnerManager(NoUpdateMixin, RESTManager):\n+class ProjectRunnerManager(CreateMixin, DeleteMixin, ListMixin, RESTManager):\n _path = \"/projects/%(project_id)s/runners\"\n _obj_cls = ProjectRunner\n _from_parent_attrs = {\"project_id\": \"id\"}\n", "issue": "'project-runner get' command should not exist\n## Description of the problem, including code/CLI snippet\r\nThe library allows to query `/projects/:projectId/runners/:id` URL through `gitlab.v4.objects.ProjectRunnerManager` class, but the official Gitlab API does not describe this endpoint, which does not exists.\r\n\r\nProject's runners list :\r\n```bash\r\n$ gitlab -d -o json project-runner list --all --project-id 76644\r\nDEBUG:urllib3.connectionpool:https://gitlab.company.com:443 \"GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1\" 200 None\r\nsend: b'GET /api/v4/projects/76644/runners?project_id=76644 HTTP/1.1\\r\\nHost: gitlab.company.com\\r\\nUser-Agent: python-gitlab/2.10.1\\r\\nAccept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\nConnection: keep-alive\\r\\nPRIVATE-TOKEN: [hidden content]\\r\\nContent-type: application/json\\r\\n\\r\\n'\r\nreply: 'HTTP/1.1 200 OK\\r\\n'\r\nheader: Server: nginx\r\nheader: Date: Wed, 22 Sep 2021 18:08:59 GMT\r\nheader: Content-Type: application/json\r\nheader: Transfer-Encoding: chunked\r\nheader: Connection: keep-alive\r\nheader: Vary: Accept-Encoding\r\nheader: Cache-Control: max-age=0, private, must-revalidate\r\nheader: Etag: W/\"1ae5eff0bd18cd69fc1aa3325e259422\"\r\nheader: Link: [hidden content]\r\nheader: Vary: Origin\r\nheader: X-Content-Type-Options: nosniff\r\nheader: X-Frame-Options: SAMEORIGIN\r\nheader: X-Gitlab-Feature-Category: continuous_integration\r\nheader: X-Next-Page:\r\nheader: X-Page: 1\r\nheader: X-Per-Page: 20\r\nheader: X-Prev-Page:\r\nheader: X-Request-Id: 01FG7B4MD17BPMDZXG2XHFFK4Q\r\nheader: X-Runtime: 0.072099\r\nheader: X-Total: 2\r\nheader: X-Total-Pages: 1\r\nheader: Strict-Transport-Security: max-age=31536000\r\nheader: Referrer-Policy: strict-origin-when-cross-origin\r\nheader: Content-Encoding: gzip\r\n[{\"id\": 7723, \"description\": \"<description>\", \"ip_address\": \"<ip address>\", \"active\": true, \"is_shared\": false, \"name\": \"gitlab-runner\", \"online\": false, \"status\": \"offline\"}, {\"id\": 11737, \"description\": \"shared-gitlab-runner-77d49f579-zpr98\", \"ip_address\": \"<ip address>\", \"active\": true, \"is_shared\": true, \"name\": \"gitlab-runner\", \"online\": true, \"status\": \"online\"}]\r\n```\r\n\r\nInvalid query :\r\n```bash\r\n$ gitlab -d -o json project-runner get --project-id 76644 --id 7723\r\nDEBUG:urllib3.connectionpool:https://gitlab.company.com:443 \"GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1\" 404 25\r\nImpossible to get object (404: 404 Not Found)\r\nsend: b'GET /api/v4/projects/76644/runners/7723?project_id=76644 HTTP/1.1\\r\\nHost: gitlab.company.com\\r\\nUser-Agent: python-gitlab/2.10.1\\r\\nAccept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\nConnection: keep-alive\\r\\nPRIVATE-TOKEN: [hidden content]\\r\\nContent-type: application/json\\r\\n\\r\\n'\r\nreply: 'HTTP/1.1 404 Not Found\\r\\n'\r\nheader: Server: nginx\r\nheader: Date: Wed, 22 Sep 2021 18:11:44 GMT\r\nheader: Content-Type: application/json\r\nheader: Content-Length: 25\r\nheader: Connection: keep-alive\r\nheader: Cache-Control: no-cache\r\nheader: Vary: Origin\r\nheader: X-Content-Type-Options: nosniff\r\nheader: X-Frame-Options: SAMEORIGIN\r\nheader: X-Gitlab-Feature-Category: not_owned\r\nheader: X-Request-Id: 01FG7B9MZ0K19YPZ4MQJ390PK1\r\nheader: X-Runtime: 0.019269\r\n```\r\n\r\nValid query (which already exists) : GET /runner/7723\r\n```bash\r\n$ gitlab -d -o json runner get --id 7723\r\nDEBUG:urllib3.connectionpool:https://gitlab.company.com:443 \"GET /api/v4/runners/7723 HTTP/1.1\" 200 None\r\nsend: b'GET /api/v4/runners/7723 HTTP/1.1\\r\\nHost: gitlab.company.com\\r\\nUser-Agent: python-gitlab/2.10.1\\r\\nAccept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\nConnection: keep-alive\\r\\nPRIVATE-TOKEN: [hidden content]\\r\\nContent-type: application/json\\r\\n\\r\\n'\r\nreply: 'HTTP/1.1 200 OK\\r\\n'\r\nheader: Server: nginx\r\nheader: Date: Wed, 22 Sep 2021 18:11:34 GMT\r\nheader: Content-Type: application/json\r\nheader: Transfer-Encoding: chunked\r\nheader: Connection: keep-alive\r\nheader: Vary: Accept-Encoding\r\nheader: Cache-Control: max-age=0, private, must-revalidate\r\nheader: Etag: W/\"7506c48834a201e1d86531647d32b044\"\r\nheader: Vary: Origin\r\nheader: X-Content-Type-Options: nosniff\r\nheader: X-Frame-Options: SAMEORIGIN\r\nheader: X-Gitlab-Feature-Category: continuous_integration\r\nheader: X-Request-Id: 01FG7B9B43ME9BMFFZTFM8A20H\r\nheader: X-Runtime: 0.124597\r\nheader: Strict-Transport-Security: max-age=31536000\r\nheader: Referrer-Policy: strict-origin-when-cross-origin\r\nheader: Content-Encoding: gzip\r\n{\"id\": 7723, \"description\": \"<description>\", \"ip_address\": \"<id address>\", \"active\": true, \"is_shared\": false, \"name\": \"gitlab-runner\", \"online\": false, \"status\": \"offline\", \"tag_list\": [], \"run_untagged\": true, \"locked\": false, \"maximum_timeout\": null, \"access_level\": \"not_protected\", \"version\": \"13.11.0\", \"revision\": \"7f7a4bb0\", \"platform\": \"linux\", \"architecture\": \"amd64\", \"contacted_at\": \"2021-04-23T13:03:09.789Z\", \"projects\": [projects list], \"groups\": []}\r\n```\r\n\r\nIf I correctly understand the library, I think that `ProjectRunnerManager` class should not inherits `GetMixin` class (which comes through `NoUpdateMixin`), but only `ListMixin`, `CreateMixin` and `DeleteMixin` classes (and `RESTManager` class, of course).\r\n\r\nThe unit tests do not cover the `project-runner get` command (this could have revelead the bug earlier).\r\n\r\nShould I open a PR to fix this issue ?\r\n\r\n## Expected Behavior\r\nThe `gitlab projet-runner get` command should not exist.\r\n\r\n## Actual Behavior\r\nThe `gitlab projet-runner get` command exists.\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: 2.10.1\r\n - API version you are using (v3/v4): v4\r\n - Gitlab server version (or gitlab.com): 13.12.1-ee\r\n\n", "before_files": [{"content": "from gitlab import cli\nfrom gitlab import exceptions as exc\nfrom gitlab import types\nfrom gitlab.base import RequiredOptional, RESTManager, RESTObject\nfrom gitlab.mixins import (\n CRUDMixin,\n ListMixin,\n NoUpdateMixin,\n ObjectDeleteMixin,\n SaveMixin,\n)\n\n__all__ = [\n \"RunnerJob\",\n \"RunnerJobManager\",\n \"Runner\",\n \"RunnerManager\",\n \"GroupRunner\",\n \"GroupRunnerManager\",\n \"ProjectRunner\",\n \"ProjectRunnerManager\",\n]\n\n\nclass RunnerJob(RESTObject):\n pass\n\n\nclass RunnerJobManager(ListMixin, RESTManager):\n _path = \"/runners/%(runner_id)s/jobs\"\n _obj_cls = RunnerJob\n _from_parent_attrs = {\"runner_id\": \"id\"}\n _list_filters = (\"status\",)\n\n\nclass Runner(SaveMixin, ObjectDeleteMixin, RESTObject):\n jobs: RunnerJobManager\n\n\nclass RunnerManager(CRUDMixin, RESTManager):\n _path = \"/runners\"\n _obj_cls = Runner\n _create_attrs = RequiredOptional(\n required=(\"token\",),\n optional=(\n \"description\",\n \"info\",\n \"active\",\n \"locked\",\n \"run_untagged\",\n \"tag_list\",\n \"access_level\",\n \"maximum_timeout\",\n ),\n )\n _update_attrs = RequiredOptional(\n optional=(\n \"description\",\n \"active\",\n \"tag_list\",\n \"run_untagged\",\n \"locked\",\n \"access_level\",\n \"maximum_timeout\",\n ),\n )\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n\n @cli.register_custom_action(\"RunnerManager\", tuple(), (\"scope\",))\n @exc.on_http_error(exc.GitlabListError)\n def all(self, scope=None, **kwargs):\n \"\"\"List all the runners.\n\n Args:\n scope (str): The scope of runners to show, one of: specific,\n shared, active, paused, online\n all (bool): If True, return all the items, without pagination\n per_page (int): Number of items to retrieve per request\n page (int): ID of the page to return (starts with page 1)\n as_list (bool): If set to False and no pagination option is\n defined, return a generator instead of a list\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabListError: If the server failed to perform the request\n\n Returns:\n list(Runner): a list of runners matching the scope.\n \"\"\"\n path = \"/runners/all\"\n query_data = {}\n if scope is not None:\n query_data[\"scope\"] = scope\n obj = self.gitlab.http_list(path, query_data, **kwargs)\n return [self._obj_cls(self, item) for item in obj]\n\n @cli.register_custom_action(\"RunnerManager\", (\"token\",))\n @exc.on_http_error(exc.GitlabVerifyError)\n def verify(self, token, **kwargs):\n \"\"\"Validates authentication credentials for a registered Runner.\n\n Args:\n token (str): The runner's authentication token\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabVerifyError: If the server failed to verify the token\n \"\"\"\n path = \"/runners/verify\"\n post_data = {\"token\": token}\n self.gitlab.http_post(path, post_data=post_data, **kwargs)\n\n\nclass GroupRunner(ObjectDeleteMixin, RESTObject):\n pass\n\n\nclass GroupRunnerManager(NoUpdateMixin, RESTManager):\n _path = \"/groups/%(group_id)s/runners\"\n _obj_cls = GroupRunner\n _from_parent_attrs = {\"group_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"runner_id\",))\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n\n\nclass ProjectRunner(ObjectDeleteMixin, RESTObject):\n pass\n\n\nclass ProjectRunnerManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/%(project_id)s/runners\"\n _obj_cls = ProjectRunner\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"runner_id\",))\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n", "path": "gitlab/v4/objects/runners.py"}], "after_files": [{"content": "from gitlab import cli\nfrom gitlab import exceptions as exc\nfrom gitlab import types\nfrom gitlab.base import RequiredOptional, RESTManager, RESTObject\nfrom gitlab.mixins import (\n CreateMixin,\n CRUDMixin,\n DeleteMixin,\n ListMixin,\n ObjectDeleteMixin,\n SaveMixin,\n)\n\n__all__ = [\n \"RunnerJob\",\n \"RunnerJobManager\",\n \"Runner\",\n \"RunnerManager\",\n \"GroupRunner\",\n \"GroupRunnerManager\",\n \"ProjectRunner\",\n \"ProjectRunnerManager\",\n]\n\n\nclass RunnerJob(RESTObject):\n pass\n\n\nclass RunnerJobManager(ListMixin, RESTManager):\n _path = \"/runners/%(runner_id)s/jobs\"\n _obj_cls = RunnerJob\n _from_parent_attrs = {\"runner_id\": \"id\"}\n _list_filters = (\"status\",)\n\n\nclass Runner(SaveMixin, ObjectDeleteMixin, RESTObject):\n jobs: RunnerJobManager\n\n\nclass RunnerManager(CRUDMixin, RESTManager):\n _path = \"/runners\"\n _obj_cls = Runner\n _create_attrs = RequiredOptional(\n required=(\"token\",),\n optional=(\n \"description\",\n \"info\",\n \"active\",\n \"locked\",\n \"run_untagged\",\n \"tag_list\",\n \"access_level\",\n \"maximum_timeout\",\n ),\n )\n _update_attrs = RequiredOptional(\n optional=(\n \"description\",\n \"active\",\n \"tag_list\",\n \"run_untagged\",\n \"locked\",\n \"access_level\",\n \"maximum_timeout\",\n ),\n )\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n\n @cli.register_custom_action(\"RunnerManager\", tuple(), (\"scope\",))\n @exc.on_http_error(exc.GitlabListError)\n def all(self, scope=None, **kwargs):\n \"\"\"List all the runners.\n\n Args:\n scope (str): The scope of runners to show, one of: specific,\n shared, active, paused, online\n all (bool): If True, return all the items, without pagination\n per_page (int): Number of items to retrieve per request\n page (int): ID of the page to return (starts with page 1)\n as_list (bool): If set to False and no pagination option is\n defined, return a generator instead of a list\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabListError: If the server failed to perform the request\n\n Returns:\n list(Runner): a list of runners matching the scope.\n \"\"\"\n path = \"/runners/all\"\n query_data = {}\n if scope is not None:\n query_data[\"scope\"] = scope\n obj = self.gitlab.http_list(path, query_data, **kwargs)\n return [self._obj_cls(self, item) for item in obj]\n\n @cli.register_custom_action(\"RunnerManager\", (\"token\",))\n @exc.on_http_error(exc.GitlabVerifyError)\n def verify(self, token, **kwargs):\n \"\"\"Validates authentication credentials for a registered Runner.\n\n Args:\n token (str): The runner's authentication token\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabVerifyError: If the server failed to verify the token\n \"\"\"\n path = \"/runners/verify\"\n post_data = {\"token\": token}\n self.gitlab.http_post(path, post_data=post_data, **kwargs)\n\n\nclass GroupRunner(RESTObject):\n pass\n\n\nclass GroupRunnerManager(ListMixin, RESTManager):\n _path = \"/groups/%(group_id)s/runners\"\n _obj_cls = GroupRunner\n _from_parent_attrs = {\"group_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"runner_id\",))\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n\n\nclass ProjectRunner(ObjectDeleteMixin, RESTObject):\n pass\n\n\nclass ProjectRunnerManager(CreateMixin, DeleteMixin, ListMixin, RESTManager):\n _path = \"/projects/%(project_id)s/runners\"\n _obj_cls = ProjectRunner\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"runner_id\",))\n _list_filters = (\"scope\", \"tag_list\")\n _types = {\"tag_list\": types.ListAttribute}\n", "path": "gitlab/v4/objects/runners.py"}]} | 3,464 | 315 |
gh_patches_debug_26912 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor technical feedback
The current implementation of technical feedback does not comply with the way it is used in the apps when submitting feedback. Instead of having one model for technical feedback, we should rather have a `technical` flag for each of the other feedback models. This flag should be set whenever the `category`-parameter contains "technisch" or "technical". In a future API version, we can convert this to a parameter, which can either be true or false in the feedback submission request.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/models/__init__.py`
Content:
```
1 from .config.configuration import Configuration
2
3 from .events.event import Event
4 from .events.event_translation import EventTranslation
5 from .events.recurrence_rule import RecurrenceRule
6
7 from .offers.offer import Offer
8 from .offers.offer_template import OfferTemplate
9
10 from .feedback.event_feedback import EventFeedback
11 from .feedback.offer_feedback import OfferFeedback
12 from .feedback.feedback import Feedback
13 from .feedback.page_feedback import PageFeedback
14 from .feedback.region_feedback import RegionFeedback
15 from .feedback.search_result_feedback import SearchResultFeedback
16 from .feedback.technical_feedback import TechnicalFeedback
17
18 from .languages.language import Language
19 from .languages.language_tree_node import LanguageTreeNode
20
21 from .media.document import Document
22
23 from .pages.page import Page
24 from .pages.page_translation import PageTranslation
25
26 from .pois.poi import POI
27 from .pois.poi_translation import POITranslation
28
29 from .push_notifications.push_notification import PushNotification
30 from .push_notifications.push_notification_translation import PushNotificationTranslation
31
32 from .regions.region import Region
33
34 from .users.organization import Organization
35 from .users.user_profile import UserProfile
36 from .users.user_mfa import UserMfa
37
```
Path: `src/cms/models/feedback/feedback.py`
Content:
```
1 """
2 Module for models storing feedback from front end users
3 """
4 from django.db import models
5
6
7 class Feedback(models.Model):
8 """
9 Base class for collecting feeedback from users.
10 """
11 EMOTION = (
12 ("Pos", "Positive"),
13 ("Neg", "Negative"),
14 ("NA", "Not Available"),
15 )
16 emotion = models.CharField(max_length=3, choices=EMOTION)
17 comment = models.CharField(max_length=1000)
18 readStatus = models.BooleanField(default=False)
19
20 created_date = models.DateTimeField(auto_now_add=True)
21 last_updated = models.DateTimeField(auto_now=True)
22
23 class Meta:
24 default_permissions = ()
25 permissions = (
26 ('view_feedback', 'Can view feedback'),
27 )
28
```
Path: `src/cms/models/feedback/technical_feedback.py`
Content:
```
1 """
2 Module for models storing feedback from front end users
3 """
4 from django.db import models
5
6 from .feedback import Feedback
7 from ..pages.page import Page
8
9
10 class TechnicalFeedback(Feedback):
11 """
12 Technical feedback on the end user app
13 """
14 page = models.ForeignKey(Page, on_delete=models.CASCADE)
15
16 class Meta:
17 default_permissions = ()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cms/models/__init__.py b/src/cms/models/__init__.py
--- a/src/cms/models/__init__.py
+++ b/src/cms/models/__init__.py
@@ -13,7 +13,6 @@
from .feedback.page_feedback import PageFeedback
from .feedback.region_feedback import RegionFeedback
from .feedback.search_result_feedback import SearchResultFeedback
-from .feedback.technical_feedback import TechnicalFeedback
from .languages.language import Language
from .languages.language_tree_node import LanguageTreeNode
diff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py
--- a/src/cms/models/feedback/feedback.py
+++ b/src/cms/models/feedback/feedback.py
@@ -15,7 +15,8 @@
)
emotion = models.CharField(max_length=3, choices=EMOTION)
comment = models.CharField(max_length=1000)
- readStatus = models.BooleanField(default=False)
+ is_technical = models.BooleanField(default=False)
+ read_status = models.BooleanField(default=False)
created_date = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
diff --git a/src/cms/models/feedback/technical_feedback.py b/src/cms/models/feedback/technical_feedback.py
deleted file mode 100644
--- a/src/cms/models/feedback/technical_feedback.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-Module for models storing feedback from front end users
-"""
-from django.db import models
-
-from .feedback import Feedback
-from ..pages.page import Page
-
-
-class TechnicalFeedback(Feedback):
- """
- Technical feedback on the end user app
- """
- page = models.ForeignKey(Page, on_delete=models.CASCADE)
-
- class Meta:
- default_permissions = ()
| {"golden_diff": "diff --git a/src/cms/models/__init__.py b/src/cms/models/__init__.py\n--- a/src/cms/models/__init__.py\n+++ b/src/cms/models/__init__.py\n@@ -13,7 +13,6 @@\n from .feedback.page_feedback import PageFeedback\n from .feedback.region_feedback import RegionFeedback\n from .feedback.search_result_feedback import SearchResultFeedback\n-from .feedback.technical_feedback import TechnicalFeedback\n \n from .languages.language import Language\n from .languages.language_tree_node import LanguageTreeNode\ndiff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py\n--- a/src/cms/models/feedback/feedback.py\n+++ b/src/cms/models/feedback/feedback.py\n@@ -15,7 +15,8 @@\n )\n emotion = models.CharField(max_length=3, choices=EMOTION)\n comment = models.CharField(max_length=1000)\n- readStatus = models.BooleanField(default=False)\n+ is_technical = models.BooleanField(default=False)\n+ read_status = models.BooleanField(default=False)\n \n created_date = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\ndiff --git a/src/cms/models/feedback/technical_feedback.py b/src/cms/models/feedback/technical_feedback.py\ndeleted file mode 100644\n--- a/src/cms/models/feedback/technical_feedback.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-\"\"\"\n-Module for models storing feedback from front end users\n-\"\"\"\n-from django.db import models\n-\n-from .feedback import Feedback\n-from ..pages.page import Page\n-\n-\n-class TechnicalFeedback(Feedback):\n- \"\"\"\n- Technical feedback on the end user app\n- \"\"\"\n- page = models.ForeignKey(Page, on_delete=models.CASCADE)\n-\n- class Meta:\n- default_permissions = ()\n", "issue": "Refactor technical feedback\nThe current implementation of technical feedback does not comply with the way it is used in the apps when submitting feedback. Instead of having one model for technical feedback, we should rather have a `technical` flag for each of the other feedback models. This flag should be set whenever the `category`-parameter contains \"technisch\" or \"technical\". In a future API version, we can convert this to a parameter, which can either be true or false in the feedback submission request.\n", "before_files": [{"content": "from .config.configuration import Configuration\n\nfrom .events.event import Event\nfrom .events.event_translation import EventTranslation\nfrom .events.recurrence_rule import RecurrenceRule\n\nfrom .offers.offer import Offer\nfrom .offers.offer_template import OfferTemplate\n\nfrom .feedback.event_feedback import EventFeedback\nfrom .feedback.offer_feedback import OfferFeedback\nfrom .feedback.feedback import Feedback\nfrom .feedback.page_feedback import PageFeedback\nfrom .feedback.region_feedback import RegionFeedback\nfrom .feedback.search_result_feedback import SearchResultFeedback\nfrom .feedback.technical_feedback import TechnicalFeedback\n\nfrom .languages.language import Language\nfrom .languages.language_tree_node import LanguageTreeNode\n\nfrom .media.document import Document\n\nfrom .pages.page import Page\nfrom .pages.page_translation import PageTranslation\n\nfrom .pois.poi import POI\nfrom .pois.poi_translation import POITranslation\n\nfrom .push_notifications.push_notification import PushNotification\nfrom .push_notifications.push_notification_translation import PushNotificationTranslation\n\nfrom .regions.region import Region\n\nfrom .users.organization import Organization\nfrom .users.user_profile import UserProfile\nfrom .users.user_mfa import UserMfa\n", "path": "src/cms/models/__init__.py"}, {"content": "\"\"\"\nModule for models storing feedback from front end users\n\"\"\"\nfrom django.db import models\n\n\nclass Feedback(models.Model):\n \"\"\"\n Base class for collecting feeedback from users.\n \"\"\"\n EMOTION = (\n (\"Pos\", \"Positive\"),\n (\"Neg\", \"Negative\"),\n (\"NA\", \"Not Available\"),\n )\n emotion = models.CharField(max_length=3, choices=EMOTION)\n comment = models.CharField(max_length=1000)\n readStatus = models.BooleanField(default=False)\n\n created_date = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n default_permissions = ()\n permissions = (\n ('view_feedback', 'Can view feedback'),\n )\n", "path": "src/cms/models/feedback/feedback.py"}, {"content": "\"\"\"\nModule for models storing feedback from front end users\n\"\"\"\nfrom django.db import models\n\nfrom .feedback import Feedback\nfrom ..pages.page import Page\n\n\nclass TechnicalFeedback(Feedback):\n \"\"\"\n Technical feedback on the end user app\n \"\"\"\n page = models.ForeignKey(Page, on_delete=models.CASCADE)\n\n class Meta:\n default_permissions = ()\n", "path": "src/cms/models/feedback/technical_feedback.py"}], "after_files": [{"content": "from .config.configuration import Configuration\n\nfrom .events.event import Event\nfrom .events.event_translation import EventTranslation\nfrom .events.recurrence_rule import RecurrenceRule\n\nfrom .offers.offer import Offer\nfrom .offers.offer_template import OfferTemplate\n\nfrom .feedback.event_feedback import EventFeedback\nfrom .feedback.offer_feedback import OfferFeedback\nfrom .feedback.feedback import Feedback\nfrom .feedback.page_feedback import PageFeedback\nfrom .feedback.region_feedback import RegionFeedback\nfrom .feedback.search_result_feedback import SearchResultFeedback\n\nfrom .languages.language import Language\nfrom .languages.language_tree_node import LanguageTreeNode\n\nfrom .media.document import Document\n\nfrom .pages.page import Page\nfrom .pages.page_translation import PageTranslation\n\nfrom .pois.poi import POI\nfrom .pois.poi_translation import POITranslation\n\nfrom .push_notifications.push_notification import PushNotification\nfrom .push_notifications.push_notification_translation import PushNotificationTranslation\n\nfrom .regions.region import Region\n\nfrom .users.organization import Organization\nfrom .users.user_profile import UserProfile\nfrom .users.user_mfa import UserMfa\n", "path": "src/cms/models/__init__.py"}, {"content": "\"\"\"\nModule for models storing feedback from front end users\n\"\"\"\nfrom django.db import models\n\n\nclass Feedback(models.Model):\n \"\"\"\n Base class for collecting feeedback from users.\n \"\"\"\n EMOTION = (\n (\"Pos\", \"Positive\"),\n (\"Neg\", \"Negative\"),\n (\"NA\", \"Not Available\"),\n )\n emotion = models.CharField(max_length=3, choices=EMOTION)\n comment = models.CharField(max_length=1000)\n is_technical = models.BooleanField(default=False)\n read_status = models.BooleanField(default=False)\n\n created_date = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n default_permissions = ()\n permissions = (\n ('view_feedback', 'Can view feedback'),\n )\n", "path": "src/cms/models/feedback/feedback.py"}, {"content": null, "path": "src/cms/models/feedback/technical_feedback.py"}]} | 1,007 | 394 |
gh_patches_debug_7917 | rasdani/github-patches | git_diff | bokeh__bokeh-6159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
For Interactive Legends, a note about `muted_color` would be helpful
For the [Interactive Legend guide](https://github.com/bokeh/bokeh/blob/d8fcffa1c472bf641517ef81698bb6d057cbd30f/sphinx/source/docs/user_guide/interaction/legends.rst#id7), in addition to the note about `muted_alpha`, it would be helpful to also demonstrate `muted_color`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/source/docs/user_guide/examples/interaction_legend_mute.py`
Content:
```
1 import pandas as pd
2
3 from bokeh.palettes import Spectral4
4 from bokeh.plotting import figure, output_file, show
5
6 p = figure(plot_width=800, plot_height=250, x_axis_type="datetime")
7 p.title.text = 'Click on legend entries to mute the corresponding lines'
8
9 for name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):
10 df = pd.read_csv(
11 "http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014" % name,
12 parse_dates=['Date']
13 )
14 p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)
15
16 p.legend.location = "top_left"
17 p.legend.click_policy="mute"
18
19 output_file("interactive_legend.html", title="interactive_legend.py example")
20
21 show(p)
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
--- a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
+++ b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
@@ -11,7 +11,8 @@
"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014" % name,
parse_dates=['Date']
)
- p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)
+ p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,
+ muted_color=color, muted_alpha=0.2, legend=name)
p.legend.location = "top_left"
p.legend.click_policy="mute"
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n--- a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n+++ b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n@@ -11,7 +11,8 @@\n \"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014\" % name,\n parse_dates=['Date']\n )\n- p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)\n+ p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,\n+ muted_color=color, muted_alpha=0.2, legend=name)\n \n p.legend.location = \"top_left\"\n p.legend.click_policy=\"mute\"\n", "issue": "For Interactive Legends, a note about `muted_color` would be helpful\nFor the [Interactive Legend guide](https://github.com/bokeh/bokeh/blob/d8fcffa1c472bf641517ef81698bb6d057cbd30f/sphinx/source/docs/user_guide/interaction/legends.rst#id7), in addition to the note about `muted_alpha`, it would be helpful to also demonstrate `muted_color`.\n", "before_files": [{"content": "import pandas as pd\n\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\n\np = figure(plot_width=800, plot_height=250, x_axis_type=\"datetime\")\np.title.text = 'Click on legend entries to mute the corresponding lines'\n\nfor name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):\n df = pd.read_csv(\n \"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014\" % name,\n parse_dates=['Date']\n )\n p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)\n\np.legend.location = \"top_left\"\np.legend.click_policy=\"mute\"\n\noutput_file(\"interactive_legend.html\", title=\"interactive_legend.py example\")\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/examples/interaction_legend_mute.py"}], "after_files": [{"content": "import pandas as pd\n\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\n\np = figure(plot_width=800, plot_height=250, x_axis_type=\"datetime\")\np.title.text = 'Click on legend entries to mute the corresponding lines'\n\nfor name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):\n df = pd.read_csv(\n \"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014\" % name,\n parse_dates=['Date']\n )\n p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,\n muted_color=color, muted_alpha=0.2, legend=name)\n\np.legend.location = \"top_left\"\np.legend.click_policy=\"mute\"\n\noutput_file(\"interactive_legend.html\", title=\"interactive_legend.py example\")\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/examples/interaction_legend_mute.py"}]} | 632 | 223 |
gh_patches_debug_57344 | rasdani/github-patches | git_diff | ethereum__consensus-specs-1102 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BLS and testing
Decided I wanted to get this out to explain the current state of testing, and **collect feedback** (implementers please comment) on what you need from testing, and your feelings about BLS usage in tests.
# BLS and testing
The two pain-points to get a pretty (and large) set of test-vectors out for clients are:
- BLS Signature creation
- BLS Signature verification
And side-issue, but easily resolved:
*efficient creation of a genesis state*:
When BLS functionality is implemented in test-code (creation of signed deposits, and verification).
Solution would be to either cache it, or create it directly, without going through the spec functions (current temporary solution on experiment branch).
## Status
Talking about the status on [`spectest-deco` PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052) here, based on the `v06x` branch, where we are developing 0.6 improvements. (to be merged back into dev later)
### The testing pipeline currently looks like:
- py-spec, calls BLS stub
- test-helpers, don't create self-signed objects with valid signatures
- py-test code, unified with test-vector-creation (see [PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052))
- py-test runner to run spec-tests, purely for assertions
- test-generator running the spec-tests, passing `generator_mode=true` to each of them, making them output a test-vector.
### Pytests status:
- move from `tests/` to `eth2spec/test`, i.e. part of package
- removed use of `pytest`
- annotated with `@spec_test` or similar (see PR 1052)
- as part of test-generation effort, yay for shared effort:
- expanded in block-operation testing: [coverage checklist here](https://github.com/ethereum/eth2.0-specs/issues/927)
- slightly faster, less deep-copies
- stuck on BLS stub (no sig creation/verification)
### Test-generation status:
- BLS, SSZ-generic, SSZ-static, shuffling test generators still all in place and up to date (`v06x` branch)
- `operations` test-gen uses test-package ability to output test-vectors for each test-case
- but no valid signatures
- lack of a definition how to handle this signature problem as a test-consumer
- there are no signature-related testcases
- turning BLS off would effectively let you check conformance, but it's hacky, and not remotely a good practice to have even an option for...
- it's approx. ~140MB worth (iirc) of yaml encoded state-transitions, covering many edge-cases. Worth to get in the hands of implementers quick.
- `sanity` tests updated and can be cleanly used for test-generation, but requires more work to define the format of the test-vectors, as they is more variety.
- `epoch` processing tests also updated, also can be used, not as complete as block-processing, lower priority.
## Possible ways forward:
- Simple but hacky: "turn BLS off for testing"
- No "BLS off", BLS ON on client side, but only partially on spec side. Rely on signature verification not being hit before anything else during testing
- valid test cases generated with valid signatures
- invalid test cases marked: does it error because of BLS? And runners should check the reason for aborting processing: if it doesn't match, the test should fail. Now these pytests don't need full BLS update work, and can be released somewhat quicker
- "BLS on", more work (~1 week)
- slower on test-generation, but we get the best kind of test-vectors: correct, BLS verification ON.
- blocker: what if a test case fails because of a signature error (test setup not creating the sig correctly), instead of a real assertion case. Spec will look correct, passes tests, but things are not right. We need to mark Sig-verification errors distinctly, so we can catch these problems when we turn BLS on in the pyspec. How: instead of `assert verify_...`, just `verify_...`, and make it raise a special `BLSVerificationError` (or something like that)
- We likely still want to mark tests as "signature related" or not, so implementers can catch it easily if their code is not aborting properly before signature verification, to assure invalid inputs are not costly.
A work-in-progress introduction of actual full BLS usage in the pytests is started here: [`tests-with-sigs` branch](https://github.com/ethereum/eth2.0-specs/tree/tests-with-sigs)
Suggestions welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/phase0/build_spec.py`
Content:
```
1 import sys
2 import function_puller
3
4
5 def build_phase0_spec(sourcefile, outfile):
6 code_lines = []
7 code_lines.append("""
8 from typing import (
9 Any,
10 Dict,
11 List,
12 NewType,
13 Tuple,
14 )
15 from eth2spec.utils.minimal_ssz import *
16 from eth2spec.utils.bls_stub import *
17
18 """)
19 for i in (1, 2, 3, 4, 8, 32, 48, 96):
20 code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i))
21
22 code_lines.append("""
23
24 # stub, will get overwritten by real var
25 SLOTS_PER_EPOCH = 64
26
27
28 Slot = NewType('Slot', int) # uint64
29 Epoch = NewType('Epoch', int) # uint64
30 Shard = NewType('Shard', int) # uint64
31 ValidatorIndex = NewType('ValidatorIndex', int) # uint64
32 Gwei = NewType('Gwei', int) # uint64
33 Bytes32 = NewType('Bytes32', bytes) # bytes32
34 BLSPubkey = NewType('BLSPubkey', bytes) # bytes48
35 BLSSignature = NewType('BLSSignature', bytes) # bytes96
36 Store = None
37 """)
38
39 code_lines += function_puller.get_spec(sourcefile)
40
41 code_lines.append("""
42 # Monkey patch validator compute committee code
43 _compute_committee = compute_committee
44 committee_cache = {}
45
46
47 def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
48 param_hash = (hash_tree_root(indices), seed, index, count)
49
50 if param_hash in committee_cache:
51 return committee_cache[param_hash]
52 else:
53 ret = _compute_committee(indices, seed, index, count)
54 committee_cache[param_hash] = ret
55 return ret
56
57
58 # Monkey patch hash cache
59 _hash = hash
60 hash_cache = {}
61
62
63 def hash(x):
64 if x in hash_cache:
65 return hash_cache[x]
66 else:
67 ret = _hash(x)
68 hash_cache[x] = ret
69 return ret
70
71 # Access to overwrite spec constants based on configuration
72 def apply_constants_preset(preset: Dict[str, Any]):
73 global_vars = globals()
74 for k, v in preset.items():
75 global_vars[k] = v
76
77 # Deal with derived constants
78 global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)
79
80 # Initialize SSZ types again, to account for changed lengths
81 init_SSZ_types()
82 """)
83
84 with open(outfile, 'w') as out:
85 out.write("\n".join(code_lines))
86
87
88 if __name__ == '__main__':
89 if len(sys.argv) < 3:
90 print("Usage: <source phase0> <output phase0 pyspec>")
91 build_phase0_spec(sys.argv[1], sys.argv[2])
92
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py
--- a/scripts/phase0/build_spec.py
+++ b/scripts/phase0/build_spec.py
@@ -13,7 +13,7 @@
Tuple,
)
from eth2spec.utils.minimal_ssz import *
-from eth2spec.utils.bls_stub import *
+from eth2spec.utils.bls import *
""")
for i in (1, 2, 3, 4, 8, 32, 48, 96):
| {"golden_diff": "diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py\n--- a/scripts/phase0/build_spec.py\n+++ b/scripts/phase0/build_spec.py\n@@ -13,7 +13,7 @@\n Tuple,\n )\n from eth2spec.utils.minimal_ssz import *\n-from eth2spec.utils.bls_stub import *\n+from eth2spec.utils.bls import *\n \n \"\"\")\n for i in (1, 2, 3, 4, 8, 32, 48, 96):\n", "issue": "BLS and testing\nDecided I wanted to get this out to explain the current state of testing, and **collect feedback** (implementers please comment) on what you need from testing, and your feelings about BLS usage in tests.\r\n\r\n# BLS and testing\r\n\r\nThe two pain-points to get a pretty (and large) set of test-vectors out for clients are:\r\n- BLS Signature creation\r\n- BLS Signature verification\r\n\r\nAnd side-issue, but easily resolved:\r\n*efficient creation of a genesis state*:\r\nWhen BLS functionality is implemented in test-code (creation of signed deposits, and verification).\r\nSolution would be to either cache it, or create it directly, without going through the spec functions (current temporary solution on experiment branch).\r\n\r\n## Status\r\n\r\nTalking about the status on [`spectest-deco` PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052) here, based on the `v06x` branch, where we are developing 0.6 improvements. (to be merged back into dev later)\r\n\r\n### The testing pipeline currently looks like:\r\n\r\n- py-spec, calls BLS stub\r\n- test-helpers, don't create self-signed objects with valid signatures\r\n- py-test code, unified with test-vector-creation (see [PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052))\r\n- py-test runner to run spec-tests, purely for assertions\r\n- test-generator running the spec-tests, passing `generator_mode=true` to each of them, making them output a test-vector.\r\n\r\n### Pytests status:\r\n\r\n- move from `tests/` to `eth2spec/test`, i.e. part of package\r\n - removed use of `pytest`\r\n - annotated with `@spec_test` or similar (see PR 1052)\r\n- as part of test-generation effort, yay for shared effort:\r\n - expanded in block-operation testing: [coverage checklist here](https://github.com/ethereum/eth2.0-specs/issues/927)\r\n - slightly faster, less deep-copies\r\n- stuck on BLS stub (no sig creation/verification)\r\n\r\n### Test-generation status:\r\n\r\n- BLS, SSZ-generic, SSZ-static, shuffling test generators still all in place and up to date (`v06x` branch)\r\n- `operations` test-gen uses test-package ability to output test-vectors for each test-case\r\n - but no valid signatures\r\n - lack of a definition how to handle this signature problem as a test-consumer\r\n - there are no signature-related testcases\r\n - turning BLS off would effectively let you check conformance, but it's hacky, and not remotely a good practice to have even an option for...\r\n - it's approx. ~140MB worth (iirc) of yaml encoded state-transitions, covering many edge-cases. Worth to get in the hands of implementers quick.\r\n- `sanity` tests updated and can be cleanly used for test-generation, but requires more work to define the format of the test-vectors, as they is more variety.\r\n- `epoch` processing tests also updated, also can be used, not as complete as block-processing, lower priority.\r\n\r\n## Possible ways forward:\r\n\r\n- Simple but hacky: \"turn BLS off for testing\"\r\n- No \"BLS off\", BLS ON on client side, but only partially on spec side. Rely on signature verification not being hit before anything else during testing\r\n - valid test cases generated with valid signatures\r\n - invalid test cases marked: does it error because of BLS? And runners should check the reason for aborting processing: if it doesn't match, the test should fail. Now these pytests don't need full BLS update work, and can be released somewhat quicker\r\n- \"BLS on\", more work (~1 week)\r\n - slower on test-generation, but we get the best kind of test-vectors: correct, BLS verification ON.\r\n - blocker: what if a test case fails because of a signature error (test setup not creating the sig correctly), instead of a real assertion case. Spec will look correct, passes tests, but things are not right. We need to mark Sig-verification errors distinctly, so we can catch these problems when we turn BLS on in the pyspec. How: instead of `assert verify_...`, just `verify_...`, and make it raise a special `BLSVerificationError` (or something like that)\r\n - We likely still want to mark tests as \"signature related\" or not, so implementers can catch it easily if their code is not aborting properly before signature verification, to assure invalid inputs are not costly.\r\n\r\nA work-in-progress introduction of actual full BLS usage in the pytests is started here: [`tests-with-sigs` branch](https://github.com/ethereum/eth2.0-specs/tree/tests-with-sigs)\r\n\r\nSuggestions welcome.\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.bls_stub import *\n\n\"\"\")\n for i in (1, 2, 3, 4, 8, 32, 48, 96):\n code_lines.append(\"def int_to_bytes%d(x): return x.to_bytes(%d, 'little')\" % (i, i))\n\n code_lines.append(\"\"\"\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py"}], "after_files": [{"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.bls import *\n\n\"\"\")\n for i in (1, 2, 3, 4, 8, 32, 48, 96):\n code_lines.append(\"def int_to_bytes%d(x): return x.to_bytes(%d, 'little')\" % (i, i))\n\n code_lines.append(\"\"\"\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py"}]} | 2,161 | 121 |
gh_patches_debug_23497 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-6234 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use `modified` time instead of `created` time for recording search queries
We should use `modified` time here and not the `created` time.
`modified` time gets updated automatically, so the query should also be changed.
https://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L213-L214
https://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L159-L163
Ref: [#6088 (comment)](https://github.com/readthedocs/readthedocs.org/pull/6088#discussion_r318738599)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/search/tasks.py`
Content:
```
1 import logging
2
3 from dateutil.parser import parse
4 from django.apps import apps
5 from django.utils import timezone
6 from django_elasticsearch_dsl.registries import registry
7
8 from readthedocs.builds.models import Version
9 from readthedocs.projects.models import Project
10 from readthedocs.search.models import SearchQuery
11 from readthedocs.worker import app
12 from .utils import _get_index, _get_document
13
14 log = logging.getLogger(__name__)
15
16
17 @app.task(queue='web')
18 def index_objects_to_es(
19 app_label, model_name, document_class, index_name=None, chunk=None, objects_id=None
20 ):
21
22 if chunk and objects_id:
23 raise ValueError('You can not pass both chunk and objects_id.')
24
25 if not (chunk or objects_id):
26 raise ValueError('You must pass a chunk or objects_id.')
27
28 model = apps.get_model(app_label, model_name)
29 document = _get_document(model=model, document_class=document_class)
30 doc_obj = document()
31
32 # WARNING: This must use the exact same queryset as from where we get the ID's
33 # There is a chance there is a race condition here as the ID's may change as the task runs,
34 # so we need to think through this a bit more and probably pass explicit ID's,
35 # but there are performance issues with that on large model sets
36 queryset = doc_obj.get_queryset()
37 if chunk:
38 # Chunk is a tuple with start and end index of queryset
39 start = chunk[0]
40 end = chunk[1]
41 queryset = queryset[start:end]
42 elif objects_id:
43 queryset = queryset.filter(id__in=objects_id)
44
45 if index_name:
46 # Hack the index name temporarily for reindexing tasks
47 old_index_name = document._doc_type.index
48 document._doc_type.index = index_name
49 log.info('Replacing index name %s with %s', old_index_name, index_name)
50
51 log.info("Indexing model: %s, '%s' objects", model.__name__, queryset.count())
52 doc_obj.update(queryset.iterator())
53
54 if index_name:
55 log.info('Undoing index replacement, settings %s with %s',
56 document._doc_type.index, old_index_name)
57 document._doc_type.index = old_index_name
58
59
60 @app.task(queue='web')
61 def delete_objects_in_es(app_label, model_name, document_class, objects_id):
62 model = apps.get_model(app_label, model_name)
63 document = _get_document(model=model, document_class=document_class)
64 doc_obj = document()
65 queryset = doc_obj.get_queryset()
66 queryset = queryset.filter(id__in=objects_id)
67 log.info("Deleting model: %s, '%s' objects", model.__name__, queryset.count())
68 try:
69 # This is a common case that we should be handling a better way
70 doc_obj.update(queryset.iterator(), action='delete')
71 except Exception:
72 log.warning('Unable to delete a subset of files. Continuing.', exc_info=True)
73
74
75 @app.task(queue='web')
76 def create_new_es_index(app_label, model_name, index_name, new_index_name):
77 model = apps.get_model(app_label, model_name)
78 indices = registry.get_indices(models=[model])
79 old_index = _get_index(indices=indices, index_name=index_name)
80 new_index = old_index.clone(name=new_index_name)
81 new_index.create()
82
83
84 @app.task(queue='web')
85 def switch_es_index(app_label, model_name, index_name, new_index_name):
86 model = apps.get_model(app_label, model_name)
87 indices = registry.get_indices(models=[model])
88 old_index = _get_index(indices=indices, index_name=index_name)
89 new_index = old_index.clone(name=new_index_name)
90 old_index_actual_name = None
91
92 if old_index.exists():
93 # Alias can not be used to delete an index.
94 # https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html
95 # So get the index actual name to delete it
96 old_index_info = old_index.get()
97 # The info is a dictionary and the key is the actual name of the index
98 old_index_actual_name = list(old_index_info.keys())[0]
99
100 # Put alias into the new index name and delete the old index if its exist
101 new_index.put_alias(name=index_name)
102 if old_index_actual_name:
103 old_index.connection.indices.delete(index=old_index_actual_name)
104
105
106 @app.task(queue='web')
107 def index_missing_objects(app_label, model_name, document_class, index_generation_time):
108 """
109 Task to insure that none of the object is missed from indexing.
110
111 The object ids are sent to `index_objects_to_es` task for indexing.
112 While the task is running, new objects can be created/deleted in database
113 and they will not be in the tasks for indexing into ES.
114 This task will index all the objects that got into DB after the `latest_indexed` timestamp
115 to ensure that everything is in ES index.
116 """
117 model = apps.get_model(app_label, model_name)
118 document = _get_document(model=model, document_class=document_class)
119 query_string = '{}__lte'.format(document.modified_model_field)
120 queryset = document().get_queryset().exclude(**{query_string: index_generation_time})
121 document().update(queryset.iterator())
122
123 log.info("Indexed %s missing objects from model: %s'", queryset.count(), model.__name__)
124
125 # TODO: Figure out how to remove the objects from ES index that has been deleted
126
127
128 @app.task(queue='web')
129 def delete_old_search_queries_from_db():
130 """
131 Delete old SearchQuery objects.
132
133 This is run by celery beat every day.
134 """
135 last_3_months = timezone.now().date() - timezone.timedelta(days=90)
136 search_queries_qs = SearchQuery.objects.filter(
137 created__date__lte=last_3_months,
138 )
139
140 if search_queries_qs.exists():
141 log.info('Deleting search queries for last 3 months. Total: %s', search_queries_qs.count())
142 search_queries_qs.delete()
143
144
145 @app.task(queue='web')
146 def record_search_query(project_slug, version_slug, query, total_results, time_string):
147 """Record/update search query in database."""
148 if not project_slug or not version_slug or not query:
149 log.debug(
150 'Not recording the search query. Passed arguments: '
151 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (
152 project_slug, version_slug, query, total_results, time_string
153 )
154 )
155 return
156
157 time = parse(time_string)
158 before_10_sec = time - timezone.timedelta(seconds=10)
159 partial_query_qs = SearchQuery.objects.filter(
160 project__slug=project_slug,
161 version__slug=version_slug,
162 created__gte=before_10_sec,
163 ).order_by('-created')
164
165 # check if partial query exists,
166 # if yes, then just update the object.
167 for partial_query in partial_query_qs.iterator():
168 if query.startswith(partial_query.query):
169 partial_query.created = time
170 partial_query.query = query
171 partial_query.save()
172 return
173
174 # don't record query with zero results.
175 if not total_results:
176 log.debug(
177 'Not recording search query because of zero results. Passed arguments: '
178 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (
179 project_slug, version_slug, query, total_results, time
180 )
181 )
182 return
183
184 project = Project.objects.filter(slug=project_slug).first()
185 if not project:
186 log.debug(
187 'Not recording the search query because project does not exist. '
188 'project_slug: %s' % (
189 project_slug
190 )
191 )
192 return
193
194 version_qs = Version.objects.filter(project=project, slug=version_slug)
195
196 if not version_qs.exists():
197 log.debug(
198 'Not recording the search query because version does not exist. '
199 'project_slug: %s, version_slug: %s' % (
200 project_slug, version_slug
201 )
202 )
203 return
204
205 version = version_qs.first()
206
207 # make a new SearchQuery object.
208 obj = SearchQuery.objects.create(
209 project=project,
210 version=version,
211 query=query,
212 )
213 obj.created = time
214 obj.save()
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/search/tasks.py b/readthedocs/search/tasks.py
--- a/readthedocs/search/tasks.py
+++ b/readthedocs/search/tasks.py
@@ -159,14 +159,13 @@
partial_query_qs = SearchQuery.objects.filter(
project__slug=project_slug,
version__slug=version_slug,
- created__gte=before_10_sec,
- ).order_by('-created')
+ modified__gte=before_10_sec,
+ ).order_by('-modified')
# check if partial query exists,
# if yes, then just update the object.
for partial_query in partial_query_qs.iterator():
if query.startswith(partial_query.query):
- partial_query.created = time
partial_query.query = query
partial_query.save()
return
@@ -205,10 +204,8 @@
version = version_qs.first()
# make a new SearchQuery object.
- obj = SearchQuery.objects.create(
+ SearchQuery.objects.create(
project=project,
version=version,
query=query,
)
- obj.created = time
- obj.save()
| {"golden_diff": "diff --git a/readthedocs/search/tasks.py b/readthedocs/search/tasks.py\n--- a/readthedocs/search/tasks.py\n+++ b/readthedocs/search/tasks.py\n@@ -159,14 +159,13 @@\n partial_query_qs = SearchQuery.objects.filter(\n project__slug=project_slug,\n version__slug=version_slug,\n- created__gte=before_10_sec,\n- ).order_by('-created')\n+ modified__gte=before_10_sec,\n+ ).order_by('-modified')\n \n # check if partial query exists,\n # if yes, then just update the object.\n for partial_query in partial_query_qs.iterator():\n if query.startswith(partial_query.query):\n- partial_query.created = time\n partial_query.query = query\n partial_query.save()\n return\n@@ -205,10 +204,8 @@\n version = version_qs.first()\n \n # make a new SearchQuery object.\n- obj = SearchQuery.objects.create(\n+ SearchQuery.objects.create(\n project=project,\n version=version,\n query=query,\n )\n- obj.created = time\n- obj.save()\n", "issue": "Use `modified` time instead of `created` time for recording search queries\nWe should use `modified` time here and not the `created` time.\r\n`modified` time gets updated automatically, so the query should also be changed.\r\n\r\nhttps://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L213-L214\r\n\r\nhttps://github.com/readthedocs/readthedocs.org/blob/98a7ff1758d829323e5ef9949f57401f7103ec4e/readthedocs/search/tasks.py#L159-L163\r\n\r\nRef: [#6088 (comment)](https://github.com/readthedocs/readthedocs.org/pull/6088#discussion_r318738599)\n", "before_files": [{"content": "import logging\n\nfrom dateutil.parser import parse\nfrom django.apps import apps\nfrom django.utils import timezone\nfrom django_elasticsearch_dsl.registries import registry\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import Project\nfrom readthedocs.search.models import SearchQuery\nfrom readthedocs.worker import app\nfrom .utils import _get_index, _get_document\n\nlog = logging.getLogger(__name__)\n\n\[email protected](queue='web')\ndef index_objects_to_es(\n app_label, model_name, document_class, index_name=None, chunk=None, objects_id=None\n):\n\n if chunk and objects_id:\n raise ValueError('You can not pass both chunk and objects_id.')\n\n if not (chunk or objects_id):\n raise ValueError('You must pass a chunk or objects_id.')\n\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n doc_obj = document()\n\n # WARNING: This must use the exact same queryset as from where we get the ID's\n # There is a chance there is a race condition here as the ID's may change as the task runs,\n # so we need to think through this a bit more and probably pass explicit ID's,\n # but there are performance issues with that on large model sets\n queryset = doc_obj.get_queryset()\n if chunk:\n # Chunk is a tuple with start and end index of queryset\n start = chunk[0]\n end = chunk[1]\n queryset = queryset[start:end]\n elif objects_id:\n queryset = queryset.filter(id__in=objects_id)\n\n if index_name:\n # Hack the index name temporarily for reindexing tasks\n old_index_name = document._doc_type.index\n document._doc_type.index = index_name\n log.info('Replacing index name %s with %s', old_index_name, index_name)\n\n log.info(\"Indexing model: %s, '%s' objects\", model.__name__, queryset.count())\n doc_obj.update(queryset.iterator())\n\n if index_name:\n log.info('Undoing index replacement, settings %s with %s',\n document._doc_type.index, old_index_name)\n document._doc_type.index = old_index_name\n\n\[email protected](queue='web')\ndef delete_objects_in_es(app_label, model_name, document_class, objects_id):\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n doc_obj = document()\n queryset = doc_obj.get_queryset()\n queryset = queryset.filter(id__in=objects_id)\n log.info(\"Deleting model: %s, '%s' objects\", model.__name__, queryset.count())\n try:\n # This is a common case that we should be handling a better way\n doc_obj.update(queryset.iterator(), action='delete')\n except Exception:\n log.warning('Unable to delete a subset of files. Continuing.', exc_info=True)\n\n\[email protected](queue='web')\ndef create_new_es_index(app_label, model_name, index_name, new_index_name):\n model = apps.get_model(app_label, model_name)\n indices = registry.get_indices(models=[model])\n old_index = _get_index(indices=indices, index_name=index_name)\n new_index = old_index.clone(name=new_index_name)\n new_index.create()\n\n\[email protected](queue='web')\ndef switch_es_index(app_label, model_name, index_name, new_index_name):\n model = apps.get_model(app_label, model_name)\n indices = registry.get_indices(models=[model])\n old_index = _get_index(indices=indices, index_name=index_name)\n new_index = old_index.clone(name=new_index_name)\n old_index_actual_name = None\n\n if old_index.exists():\n # Alias can not be used to delete an index.\n # https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html\n # So get the index actual name to delete it\n old_index_info = old_index.get()\n # The info is a dictionary and the key is the actual name of the index\n old_index_actual_name = list(old_index_info.keys())[0]\n\n # Put alias into the new index name and delete the old index if its exist\n new_index.put_alias(name=index_name)\n if old_index_actual_name:\n old_index.connection.indices.delete(index=old_index_actual_name)\n\n\[email protected](queue='web')\ndef index_missing_objects(app_label, model_name, document_class, index_generation_time):\n \"\"\"\n Task to insure that none of the object is missed from indexing.\n\n The object ids are sent to `index_objects_to_es` task for indexing.\n While the task is running, new objects can be created/deleted in database\n and they will not be in the tasks for indexing into ES.\n This task will index all the objects that got into DB after the `latest_indexed` timestamp\n to ensure that everything is in ES index.\n \"\"\"\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n query_string = '{}__lte'.format(document.modified_model_field)\n queryset = document().get_queryset().exclude(**{query_string: index_generation_time})\n document().update(queryset.iterator())\n\n log.info(\"Indexed %s missing objects from model: %s'\", queryset.count(), model.__name__)\n\n # TODO: Figure out how to remove the objects from ES index that has been deleted\n\n\[email protected](queue='web')\ndef delete_old_search_queries_from_db():\n \"\"\"\n Delete old SearchQuery objects.\n\n This is run by celery beat every day.\n \"\"\"\n last_3_months = timezone.now().date() - timezone.timedelta(days=90)\n search_queries_qs = SearchQuery.objects.filter(\n created__date__lte=last_3_months,\n )\n\n if search_queries_qs.exists():\n log.info('Deleting search queries for last 3 months. Total: %s', search_queries_qs.count())\n search_queries_qs.delete()\n\n\[email protected](queue='web')\ndef record_search_query(project_slug, version_slug, query, total_results, time_string):\n \"\"\"Record/update search query in database.\"\"\"\n if not project_slug or not version_slug or not query:\n log.debug(\n 'Not recording the search query. Passed arguments: '\n 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (\n project_slug, version_slug, query, total_results, time_string\n )\n )\n return\n\n time = parse(time_string)\n before_10_sec = time - timezone.timedelta(seconds=10)\n partial_query_qs = SearchQuery.objects.filter(\n project__slug=project_slug,\n version__slug=version_slug,\n created__gte=before_10_sec,\n ).order_by('-created')\n\n # check if partial query exists,\n # if yes, then just update the object.\n for partial_query in partial_query_qs.iterator():\n if query.startswith(partial_query.query):\n partial_query.created = time\n partial_query.query = query\n partial_query.save()\n return\n\n # don't record query with zero results.\n if not total_results:\n log.debug(\n 'Not recording search query because of zero results. Passed arguments: '\n 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (\n project_slug, version_slug, query, total_results, time\n )\n )\n return\n\n project = Project.objects.filter(slug=project_slug).first()\n if not project:\n log.debug(\n 'Not recording the search query because project does not exist. '\n 'project_slug: %s' % (\n project_slug\n )\n )\n return\n\n version_qs = Version.objects.filter(project=project, slug=version_slug)\n\n if not version_qs.exists():\n log.debug(\n 'Not recording the search query because version does not exist. '\n 'project_slug: %s, version_slug: %s' % (\n project_slug, version_slug\n )\n )\n return\n\n version = version_qs.first()\n\n # make a new SearchQuery object.\n obj = SearchQuery.objects.create(\n project=project,\n version=version,\n query=query,\n )\n obj.created = time\n obj.save()\n", "path": "readthedocs/search/tasks.py"}], "after_files": [{"content": "import logging\n\nfrom dateutil.parser import parse\nfrom django.apps import apps\nfrom django.utils import timezone\nfrom django_elasticsearch_dsl.registries import registry\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import Project\nfrom readthedocs.search.models import SearchQuery\nfrom readthedocs.worker import app\nfrom .utils import _get_index, _get_document\n\nlog = logging.getLogger(__name__)\n\n\[email protected](queue='web')\ndef index_objects_to_es(\n app_label, model_name, document_class, index_name=None, chunk=None, objects_id=None\n):\n\n if chunk and objects_id:\n raise ValueError('You can not pass both chunk and objects_id.')\n\n if not (chunk or objects_id):\n raise ValueError('You must pass a chunk or objects_id.')\n\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n doc_obj = document()\n\n # WARNING: This must use the exact same queryset as from where we get the ID's\n # There is a chance there is a race condition here as the ID's may change as the task runs,\n # so we need to think through this a bit more and probably pass explicit ID's,\n # but there are performance issues with that on large model sets\n queryset = doc_obj.get_queryset()\n if chunk:\n # Chunk is a tuple with start and end index of queryset\n start = chunk[0]\n end = chunk[1]\n queryset = queryset[start:end]\n elif objects_id:\n queryset = queryset.filter(id__in=objects_id)\n\n if index_name:\n # Hack the index name temporarily for reindexing tasks\n old_index_name = document._doc_type.index\n document._doc_type.index = index_name\n log.info('Replacing index name %s with %s', old_index_name, index_name)\n\n log.info(\"Indexing model: %s, '%s' objects\", model.__name__, queryset.count())\n doc_obj.update(queryset.iterator())\n\n if index_name:\n log.info('Undoing index replacement, settings %s with %s',\n document._doc_type.index, old_index_name)\n document._doc_type.index = old_index_name\n\n\[email protected](queue='web')\ndef delete_objects_in_es(app_label, model_name, document_class, objects_id):\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n doc_obj = document()\n queryset = doc_obj.get_queryset()\n queryset = queryset.filter(id__in=objects_id)\n log.info(\"Deleting model: %s, '%s' objects\", model.__name__, queryset.count())\n try:\n # This is a common case that we should be handling a better way\n doc_obj.update(queryset.iterator(), action='delete')\n except Exception:\n log.warning('Unable to delete a subset of files. Continuing.', exc_info=True)\n\n\[email protected](queue='web')\ndef create_new_es_index(app_label, model_name, index_name, new_index_name):\n model = apps.get_model(app_label, model_name)\n indices = registry.get_indices(models=[model])\n old_index = _get_index(indices=indices, index_name=index_name)\n new_index = old_index.clone(name=new_index_name)\n new_index.create()\n\n\[email protected](queue='web')\ndef switch_es_index(app_label, model_name, index_name, new_index_name):\n model = apps.get_model(app_label, model_name)\n indices = registry.get_indices(models=[model])\n old_index = _get_index(indices=indices, index_name=index_name)\n new_index = old_index.clone(name=new_index_name)\n old_index_actual_name = None\n\n if old_index.exists():\n # Alias can not be used to delete an index.\n # https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html\n # So get the index actual name to delete it\n old_index_info = old_index.get()\n # The info is a dictionary and the key is the actual name of the index\n old_index_actual_name = list(old_index_info.keys())[0]\n\n # Put alias into the new index name and delete the old index if its exist\n new_index.put_alias(name=index_name)\n if old_index_actual_name:\n old_index.connection.indices.delete(index=old_index_actual_name)\n\n\[email protected](queue='web')\ndef index_missing_objects(app_label, model_name, document_class, index_generation_time):\n \"\"\"\n Task to insure that none of the object is missed from indexing.\n\n The object ids are sent to `index_objects_to_es` task for indexing.\n While the task is running, new objects can be created/deleted in database\n and they will not be in the tasks for indexing into ES.\n This task will index all the objects that got into DB after the `latest_indexed` timestamp\n to ensure that everything is in ES index.\n \"\"\"\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n query_string = '{}__lte'.format(document.modified_model_field)\n queryset = document().get_queryset().exclude(**{query_string: index_generation_time})\n document().update(queryset.iterator())\n\n log.info(\"Indexed %s missing objects from model: %s'\", queryset.count(), model.__name__)\n\n # TODO: Figure out how to remove the objects from ES index that has been deleted\n\n\[email protected](queue='web')\ndef delete_old_search_queries_from_db():\n \"\"\"\n Delete old SearchQuery objects.\n\n This is run by celery beat every day.\n \"\"\"\n last_3_months = timezone.now().date() - timezone.timedelta(days=90)\n search_queries_qs = SearchQuery.objects.filter(\n created__date__lte=last_3_months,\n )\n\n if search_queries_qs.exists():\n log.info('Deleting search queries for last 3 months. Total: %s', search_queries_qs.count())\n search_queries_qs.delete()\n\n\[email protected](queue='web')\ndef record_search_query(project_slug, version_slug, query, total_results, time_string):\n \"\"\"Record/update search query in database.\"\"\"\n if not project_slug or not version_slug or not query:\n log.debug(\n 'Not recording the search query. Passed arguments: '\n 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (\n project_slug, version_slug, query, total_results, time_string\n )\n )\n return\n\n time = parse(time_string)\n before_10_sec = time - timezone.timedelta(seconds=10)\n partial_query_qs = SearchQuery.objects.filter(\n project__slug=project_slug,\n version__slug=version_slug,\n modified__gte=before_10_sec,\n ).order_by('-modified')\n\n # check if partial query exists,\n # if yes, then just update the object.\n for partial_query in partial_query_qs.iterator():\n if query.startswith(partial_query.query):\n partial_query.query = query\n partial_query.save()\n return\n\n # don't record query with zero results.\n if not total_results:\n log.debug(\n 'Not recording search query because of zero results. Passed arguments: '\n 'project_slug: %s, version_slug: %s, query: %s, total_results: %s, time: %s' % (\n project_slug, version_slug, query, total_results, time\n )\n )\n return\n\n project = Project.objects.filter(slug=project_slug).first()\n if not project:\n log.debug(\n 'Not recording the search query because project does not exist. '\n 'project_slug: %s' % (\n project_slug\n )\n )\n return\n\n version_qs = Version.objects.filter(project=project, slug=version_slug)\n\n if not version_qs.exists():\n log.debug(\n 'Not recording the search query because version does not exist. '\n 'project_slug: %s, version_slug: %s' % (\n project_slug, version_slug\n )\n )\n return\n\n version = version_qs.first()\n\n # make a new SearchQuery object.\n SearchQuery.objects.create(\n project=project,\n version=version,\n query=query,\n )\n", "path": "readthedocs/search/tasks.py"}]} | 2,839 | 256 |
gh_patches_debug_13930 | rasdani/github-patches | git_diff | microsoft__ptvsd-1425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
python without debugging won't start if there is a breakpoint
@tmdag commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540)
## Environment data
- VSCode Version: 1.33.1
- OS Version:Linux 5.0.7-200.fc29.x86_64
- Extension version (available under the Extensions sidebar): Python 2019.4.11987
- Anaconda Extension Pack 1.0.1
## Steps to reproduce:
Open Python code
create a breakpoint
run python without debugging (ctrl+F5)
Terminal output:
cd /home/user/dev/Pytool ; env PYTHONIOENCODING=UTF-8 PYTHONUNBUFFERED=1 /usr/bin/python3 /home/user/.vscode/extensions/ms-python.python-2019.4.11987/pythonFiles/ptvsd_launcher.py --default --nodebug --client --host localhost --port 36019 /home/user/dev/Pytool/mypytool.py
Terminated
Does this issue occur when all extensions are disabled?: Yes/No
Not sure - Python extension is required
## Enabled Extensions:
Bookmarks 10.4.3
C/C++ 0.221
Gist 3.0.3
Git History 0.4.6
GitLens - 9.6.3
markdownlint 0.26.0
Syncing 2.1.6
OpenCL 0.5.2
VEX 0.4.0
TAML 0.4.0
---
@jxramos commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540#issuecomment-488807421)
I'm seeing this too on a MacOS, it immediately exits with `Terminated: 15`. This behavior persists even if the breakpoint is disabled/unchecked.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ptvsd/runner.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import pydevd
6 import threading
7
8 from ptvsd.daemon import DaemonBase
9 from ptvsd.session import DebugSession
10 from ptvsd.wrapper import VSCLifecycleMsgProcessor
11 from pydevd import init_stdout_redirect, init_stderr_redirect
12
13
14 HOSTNAME = 'localhost'
15
16
17 def run(address, filename, is_module, *args, **kwargs):
18 # TODO: docstring
19 # TODO: client/server -> address
20 daemon = Daemon()
21 if not daemon.wait_for_launch(address):
22 return
23
24 debugger = pydevd.PyDB()
25 # We do not want some internal methods to get executed in non-debug mode.
26 debugger.init_matplotlib_support = lambda *arg: None
27 debugger.run(
28 file=filename,
29 globals=None,
30 locals=None,
31 is_module=is_module,
32 set_trace=False)
33
34
35 class Daemon(DaemonBase):
36 """The process-level manager for the VSC protocol debug adapter."""
37
38 LAUNCH_TIMEOUT = 10000 # seconds
39
40 class SESSION(DebugSession):
41 class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):
42 def on_invalid_request(self, request, args):
43 self.send_response(request, success=True)
44
45 def wait_for_launch(self, addr, timeout=LAUNCH_TIMEOUT):
46 # TODO: docstring
47 launched = threading.Event()
48 _, start_session = self.start_client(addr)
49 start_session(
50 notify_launch=launched.set,
51 )
52 return launched.wait(timeout)
53
54 def _start(self):
55 import weakref
56 weak_self = weakref.ref(self) # Avoid cyclic ref
57
58 def on_stdout(msg):
59 self = weak_self()
60 if self is not None:
61 self._send_output('stdout', msg)
62
63 def on_stderr(msg):
64 self = weak_self()
65 if self is not None:
66 self._send_output('stderr', msg)
67
68 init_stdout_redirect(on_stdout)
69 init_stderr_redirect(on_stderr)
70 return NoSocket()
71
72 def _close(self):
73 super(Daemon, self)._close()
74
75 def _send_output(self, category, output):
76 if self.session is None:
77 return
78 self.session._msgprocessor.send_event('output',
79 category=category,
80 output=output)
81
82
83 class NoSocket(object):
84 """A object with a noop socket lifecycle."""
85
86 def shutdown(self, *args, **kwargs):
87 pass
88
89 def close(self):
90 pass
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ptvsd/runner.py b/src/ptvsd/runner.py
--- a/src/ptvsd/runner.py
+++ b/src/ptvsd/runner.py
@@ -39,6 +39,19 @@
class SESSION(DebugSession):
class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):
+
+ def on_setBreakpoints(self, request, args):
+ # Note: breakpoints is required (vscode will terminate
+ # the debugger if that's not the case).
+ # See: https://github.com/microsoft/ptvsd/issues/1408
+ self.send_response(
+ request,
+ success=True,
+ breakpoints=(
+ [{'verified': False}] * len(args.get('breakpoints', ()))
+ )
+ )
+
def on_invalid_request(self, request, args):
self.send_response(request, success=True)
| {"golden_diff": "diff --git a/src/ptvsd/runner.py b/src/ptvsd/runner.py\n--- a/src/ptvsd/runner.py\n+++ b/src/ptvsd/runner.py\n@@ -39,6 +39,19 @@\n \n class SESSION(DebugSession):\n class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):\n+\n+ def on_setBreakpoints(self, request, args):\n+ # Note: breakpoints is required (vscode will terminate\n+ # the debugger if that's not the case).\n+ # See: https://github.com/microsoft/ptvsd/issues/1408\n+ self.send_response(\n+ request,\n+ success=True,\n+ breakpoints=(\n+ [{'verified': False}] * len(args.get('breakpoints', ()))\n+ )\n+ )\n+\n def on_invalid_request(self, request, args):\n self.send_response(request, success=True)\n", "issue": "python without debugging won't start if there is a breakpoint\n@tmdag commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540)\n\n## Environment data\r\n- VSCode Version: 1.33.1\r\n- OS Version:Linux 5.0.7-200.fc29.x86_64\r\n- Extension version (available under the Extensions sidebar): Python 2019.4.11987\r\n- Anaconda Extension Pack 1.0.1\r\n\r\n## Steps to reproduce:\r\nOpen Python code\r\ncreate a breakpoint\r\nrun python without debugging (ctrl+F5)\r\nTerminal output:\r\n\r\ncd /home/user/dev/Pytool ; env PYTHONIOENCODING=UTF-8 PYTHONUNBUFFERED=1 /usr/bin/python3 /home/user/.vscode/extensions/ms-python.python-2019.4.11987/pythonFiles/ptvsd_launcher.py --default --nodebug --client --host localhost --port 36019 /home/user/dev/Pytool/mypytool.py\r\nTerminated\r\nDoes this issue occur when all extensions are disabled?: Yes/No\r\nNot sure - Python extension is required\r\n\r\n## Enabled Extensions:\r\nBookmarks 10.4.3\r\nC/C++ 0.221\r\nGist 3.0.3\r\nGit History 0.4.6\r\nGitLens - 9.6.3\r\nmarkdownlint 0.26.0\r\n\r\nSyncing 2.1.6\r\nOpenCL 0.5.2\r\nVEX 0.4.0\r\nTAML 0.4.0\r\n\r\n\n\n---\n\n@jxramos commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540#issuecomment-488807421)\n\nI'm seeing this too on a MacOS, it immediately exits with `Terminated: 15`. This behavior persists even if the breakpoint is disabled/unchecked.\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport pydevd\nimport threading\n\nfrom ptvsd.daemon import DaemonBase\nfrom ptvsd.session import DebugSession\nfrom ptvsd.wrapper import VSCLifecycleMsgProcessor\nfrom pydevd import init_stdout_redirect, init_stderr_redirect\n\n\nHOSTNAME = 'localhost'\n\n\ndef run(address, filename, is_module, *args, **kwargs):\n # TODO: docstring\n # TODO: client/server -> address\n daemon = Daemon()\n if not daemon.wait_for_launch(address):\n return\n\n debugger = pydevd.PyDB()\n # We do not want some internal methods to get executed in non-debug mode.\n debugger.init_matplotlib_support = lambda *arg: None\n debugger.run(\n file=filename,\n globals=None,\n locals=None,\n is_module=is_module,\n set_trace=False)\n\n\nclass Daemon(DaemonBase):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n LAUNCH_TIMEOUT = 10000 # seconds\n\n class SESSION(DebugSession):\n class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):\n def on_invalid_request(self, request, args):\n self.send_response(request, success=True)\n\n def wait_for_launch(self, addr, timeout=LAUNCH_TIMEOUT):\n # TODO: docstring\n launched = threading.Event()\n _, start_session = self.start_client(addr)\n start_session(\n notify_launch=launched.set,\n )\n return launched.wait(timeout)\n\n def _start(self):\n import weakref\n weak_self = weakref.ref(self) # Avoid cyclic ref\n\n def on_stdout(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stdout', msg)\n\n def on_stderr(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stderr', msg)\n\n init_stdout_redirect(on_stdout)\n init_stderr_redirect(on_stderr)\n return NoSocket()\n\n def _close(self):\n super(Daemon, self)._close()\n\n def _send_output(self, category, output):\n if self.session is None:\n return\n self.session._msgprocessor.send_event('output',\n category=category,\n output=output)\n\n\nclass NoSocket(object):\n \"\"\"A object with a noop socket lifecycle.\"\"\"\n\n def shutdown(self, *args, **kwargs):\n pass\n\n def close(self):\n pass\n", "path": "src/ptvsd/runner.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport pydevd\nimport threading\n\nfrom ptvsd.daemon import DaemonBase\nfrom ptvsd.session import DebugSession\nfrom ptvsd.wrapper import VSCLifecycleMsgProcessor\nfrom pydevd import init_stdout_redirect, init_stderr_redirect\n\n\nHOSTNAME = 'localhost'\n\n\ndef run(address, filename, is_module, *args, **kwargs):\n # TODO: docstring\n # TODO: client/server -> address\n daemon = Daemon()\n if not daemon.wait_for_launch(address):\n return\n\n debugger = pydevd.PyDB()\n # We do not want some internal methods to get executed in non-debug mode.\n debugger.init_matplotlib_support = lambda *arg: None\n debugger.run(\n file=filename,\n globals=None,\n locals=None,\n is_module=is_module,\n set_trace=False)\n\n\nclass Daemon(DaemonBase):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n LAUNCH_TIMEOUT = 10000 # seconds\n\n class SESSION(DebugSession):\n class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):\n\n def on_setBreakpoints(self, request, args):\n # Note: breakpoints is required (vscode will terminate\n # the debugger if that's not the case).\n # See: https://github.com/microsoft/ptvsd/issues/1408\n self.send_response(\n request,\n success=True,\n breakpoints=(\n [{'verified': False}] * len(args.get('breakpoints', ()))\n )\n )\n\n def on_invalid_request(self, request, args):\n self.send_response(request, success=True)\n\n def wait_for_launch(self, addr, timeout=LAUNCH_TIMEOUT):\n # TODO: docstring\n launched = threading.Event()\n _, start_session = self.start_client(addr)\n start_session(\n notify_launch=launched.set,\n )\n return launched.wait(timeout)\n\n def _start(self):\n import weakref\n weak_self = weakref.ref(self) # Avoid cyclic ref\n\n def on_stdout(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stdout', msg)\n\n def on_stderr(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stderr', msg)\n\n init_stdout_redirect(on_stdout)\n init_stderr_redirect(on_stderr)\n return NoSocket()\n\n def _close(self):\n super(Daemon, self)._close()\n\n def _send_output(self, category, output):\n if self.session is None:\n return\n self.session._msgprocessor.send_event('output',\n category=category,\n output=output)\n\n\nclass NoSocket(object):\n \"\"\"A object with a noop socket lifecycle.\"\"\"\n\n def shutdown(self, *args, **kwargs):\n pass\n\n def close(self):\n pass\n", "path": "src/ptvsd/runner.py"}]} | 1,440 | 201 |
gh_patches_debug_41696 | rasdani/github-patches | git_diff | carpentries__amy-475 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin commands broken
Please check all the admin commands (use `make` on its own to get a list):
```
$ make airports
/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/crispy_forms/utils.py:25: RemovedInDjango19Warning: memoize wrapper is deprecated and will be removed in Django 1.9. Use django.utils.lru_cache instead.
default_field_template = memoize(default_field_template, {}, 1)
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 338, in execute_from_command_line
utility.execute()
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 330, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 190, in fetch_command
klass = load_command_class(app_name, subcommand)
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py", line 40, in load_command_class
module = import_module('%s.management.commands.%s' % (app_name, name))
File "/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/importlib/__init__.py", line 90, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1582, in _gcd_import
File "<frozen importlib._bootstrap>", line 1563, in _find_and_load
File "<frozen importlib._bootstrap>", line 1530, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 584, in _check_name_wrapper
File "<frozen importlib._bootstrap>", line 1022, in load_module
File "<frozen importlib._bootstrap>", line 1003, in load_module
File "<frozen importlib._bootstrap>", line 560, in module_for_loader_wrapper
File "<frozen importlib._bootstrap>", line 868, in _load_module
File "<frozen importlib._bootstrap>", line 313, in _call_with_frames_removed
File "/Users/gvwilson/s/amy/workshops/management/commands/export_airports.py", line 3, in <module>
from workshops.views import _export_instructors
ImportError: cannot import name _export_instructors
make: *** [airports] Error 1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `workshops/management/commands/export_airports.py`
Content:
```
1 import yaml
2 from django.core.management.base import BaseCommand, CommandError
3 from workshops.views import _export_instructors
4
5 class Command(BaseCommand):
6 args = 'no arguments'
7 help = 'Display YAML for airports.'
8
9 def handle(self, *args, **options):
10 print(yaml.dump(_export_instructors()).rstrip())
11
```
Path: `workshops/management/commands/export_badges.py`
Content:
```
1 import yaml
2 from django.core.management.base import BaseCommand, CommandError
3 from workshops.views import _export_badges
4
5 class Command(BaseCommand):
6 args = 'no arguments'
7 help = 'Display YAML for badges.'
8
9 def handle(self, *args, **options):
10 print(yaml.dump(_export_badges()).rstrip())
11
```
Path: `api/views.py`
Content:
```
1 from django.db.models import Q
2 from rest_framework.generics import ListAPIView
3 from rest_framework.permissions import IsAuthenticatedOrReadOnly
4 from rest_framework.response import Response
5 from rest_framework.reverse import reverse
6 from rest_framework.views import APIView
7
8 from workshops.models import Badge, Airport, Event
9
10 from .serializers import (
11 ExportBadgesSerializer,
12 ExportInstructorLocationsSerializer,
13 EventSerializer,
14 )
15
16
17 class ApiRoot(APIView):
18 def get(self, request, format=None):
19 return Response({
20 'export-badges': reverse('api:export-badges', request=request,
21 format=format),
22 'export-instructors': reverse('api:export-instructors',
23 request=request, format=format),
24 'events-published': reverse('api:events-published',
25 request=request, format=format),
26 })
27
28
29 class ExportBadgesView(APIView):
30 """List all badges and people who have them."""
31 permission_classes = (IsAuthenticatedOrReadOnly, )
32
33 def get(self, request, format=None):
34 badges = Badge.objects.prefetch_related('person_set')
35 serializer = ExportBadgesSerializer(badges, many=True)
36 return Response(serializer.data)
37
38
39 class ExportInstructorLocationsView(APIView):
40 """List all airports and instructors located near them."""
41 permission_classes = (IsAuthenticatedOrReadOnly, )
42
43 def get(self, request, format=None):
44 # TODO: return only people marked as instructors?
45 airports = Airport.objects.exclude(person=None) \
46 .prefetch_related('person_set')
47 serializer = ExportInstructorLocationsSerializer(airports, many=True)
48 return Response(serializer.data)
49
50
51 class ListEvents(APIView):
52 # I wanted to use ListAPIView, but it had problems with the way we test
53 # this code... Basically ListAPIView uses pagination, and pagination
54 # requires existing Request object - something we're faking in part of the
55 # tests (request = None).
56 serializer_class = EventSerializer
57 permission_classes = (IsAuthenticatedOrReadOnly, )
58 queryset = None # override this in the subclass
59
60 def get(self, request, format=None):
61 objects = self.queryset.all()
62 serializer = self.serializer_class(objects, many=True)
63 return Response(serializer.data)
64
65
66 class PublishedEvents(ListEvents):
67 # only events that have both a starting date and a URL
68 queryset = Event.objects.exclude(
69 Q(start__isnull=True) | Q(url__isnull=True)
70 ).order_by('-start')
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/views.py b/api/views.py
--- a/api/views.py
+++ b/api/views.py
@@ -26,45 +26,31 @@
})
-class ExportBadgesView(APIView):
+class ExportBadgesView(ListAPIView):
"""List all badges and people who have them."""
permission_classes = (IsAuthenticatedOrReadOnly, )
+ paginator = None # disable pagination
- def get(self, request, format=None):
- badges = Badge.objects.prefetch_related('person_set')
- serializer = ExportBadgesSerializer(badges, many=True)
- return Response(serializer.data)
+ queryset = Badge.objects.prefetch_related('person_set')
+ serializer_class = ExportBadgesSerializer
-class ExportInstructorLocationsView(APIView):
+class ExportInstructorLocationsView(ListAPIView):
"""List all airports and instructors located near them."""
permission_classes = (IsAuthenticatedOrReadOnly, )
+ paginator = None # disable pagination
- def get(self, request, format=None):
- # TODO: return only people marked as instructors?
- airports = Airport.objects.exclude(person=None) \
- .prefetch_related('person_set')
- serializer = ExportInstructorLocationsSerializer(airports, many=True)
- return Response(serializer.data)
+ queryset = Airport.objects.exclude(person=None) \
+ .prefetch_related('person_set')
+ serializer_class = ExportInstructorLocationsSerializer
-class ListEvents(APIView):
- # I wanted to use ListAPIView, but it had problems with the way we test
- # this code... Basically ListAPIView uses pagination, and pagination
- # requires existing Request object - something we're faking in part of the
- # tests (request = None).
- serializer_class = EventSerializer
+class PublishedEvents(ListAPIView):
+ # only events that have both a starting date and a URL
permission_classes = (IsAuthenticatedOrReadOnly, )
- queryset = None # override this in the subclass
+ paginator = None # disable pagination
- def get(self, request, format=None):
- objects = self.queryset.all()
- serializer = self.serializer_class(objects, many=True)
- return Response(serializer.data)
-
-
-class PublishedEvents(ListEvents):
- # only events that have both a starting date and a URL
+ serializer_class = EventSerializer
queryset = Event.objects.exclude(
Q(start__isnull=True) | Q(url__isnull=True)
).order_by('-start')
diff --git a/workshops/management/commands/export_airports.py b/workshops/management/commands/export_airports.py
--- a/workshops/management/commands/export_airports.py
+++ b/workshops/management/commands/export_airports.py
@@ -1,10 +1,14 @@
-import yaml
-from django.core.management.base import BaseCommand, CommandError
-from workshops.views import _export_instructors
+from django.core.management.base import BaseCommand
+from django.core.urlresolvers import reverse
+from rest_framework.test import APIClient
+
class Command(BaseCommand):
args = 'no arguments'
help = 'Display YAML for airports.'
def handle(self, *args, **options):
- print(yaml.dump(_export_instructors()).rstrip())
+ client = APIClient()
+ response = client.get(reverse('api:export-instructors'),
+ {'format': 'yaml'})
+ print(response.content.decode('utf-8'))
diff --git a/workshops/management/commands/export_badges.py b/workshops/management/commands/export_badges.py
--- a/workshops/management/commands/export_badges.py
+++ b/workshops/management/commands/export_badges.py
@@ -1,10 +1,14 @@
-import yaml
-from django.core.management.base import BaseCommand, CommandError
-from workshops.views import _export_badges
+from django.core.management.base import BaseCommand
+from django.core.urlresolvers import reverse
+from rest_framework.test import APIClient
+
class Command(BaseCommand):
args = 'no arguments'
help = 'Display YAML for badges.'
def handle(self, *args, **options):
- print(yaml.dump(_export_badges()).rstrip())
+ client = APIClient()
+ response = client.get(reverse('api:export-badges'),
+ {'format': 'yaml'})
+ print(response.content.decode('utf-8'))
| {"golden_diff": "diff --git a/api/views.py b/api/views.py\n--- a/api/views.py\n+++ b/api/views.py\n@@ -26,45 +26,31 @@\n })\n \n \n-class ExportBadgesView(APIView):\n+class ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n+ paginator = None # disable pagination\n \n- def get(self, request, format=None):\n- badges = Badge.objects.prefetch_related('person_set')\n- serializer = ExportBadgesSerializer(badges, many=True)\n- return Response(serializer.data)\n+ queryset = Badge.objects.prefetch_related('person_set')\n+ serializer_class = ExportBadgesSerializer\n \n \n-class ExportInstructorLocationsView(APIView):\n+class ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n+ paginator = None # disable pagination\n \n- def get(self, request, format=None):\n- # TODO: return only people marked as instructors?\n- airports = Airport.objects.exclude(person=None) \\\n- .prefetch_related('person_set')\n- serializer = ExportInstructorLocationsSerializer(airports, many=True)\n- return Response(serializer.data)\n+ queryset = Airport.objects.exclude(person=None) \\\n+ .prefetch_related('person_set')\n+ serializer_class = ExportInstructorLocationsSerializer\n \n \n-class ListEvents(APIView):\n- # I wanted to use ListAPIView, but it had problems with the way we test\n- # this code... Basically ListAPIView uses pagination, and pagination\n- # requires existing Request object - something we're faking in part of the\n- # tests (request = None).\n- serializer_class = EventSerializer\n+class PublishedEvents(ListAPIView):\n+ # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n- queryset = None # override this in the subclass\n+ paginator = None # disable pagination\n \n- def get(self, request, format=None):\n- objects = self.queryset.all()\n- serializer = self.serializer_class(objects, many=True)\n- return Response(serializer.data)\n-\n-\n-class PublishedEvents(ListEvents):\n- # only events that have both a starting date and a URL\n+ serializer_class = EventSerializer\n queryset = Event.objects.exclude(\n Q(start__isnull=True) | Q(url__isnull=True)\n ).order_by('-start')\ndiff --git a/workshops/management/commands/export_airports.py b/workshops/management/commands/export_airports.py\n--- a/workshops/management/commands/export_airports.py\n+++ b/workshops/management/commands/export_airports.py\n@@ -1,10 +1,14 @@\n-import yaml\n-from django.core.management.base import BaseCommand, CommandError\n-from workshops.views import _export_instructors\n+from django.core.management.base import BaseCommand\n+from django.core.urlresolvers import reverse\n+from rest_framework.test import APIClient\n+\n \n class Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for airports.'\n \n def handle(self, *args, **options):\n- print(yaml.dump(_export_instructors()).rstrip())\n+ client = APIClient()\n+ response = client.get(reverse('api:export-instructors'),\n+ {'format': 'yaml'})\n+ print(response.content.decode('utf-8'))\ndiff --git a/workshops/management/commands/export_badges.py b/workshops/management/commands/export_badges.py\n--- a/workshops/management/commands/export_badges.py\n+++ b/workshops/management/commands/export_badges.py\n@@ -1,10 +1,14 @@\n-import yaml\n-from django.core.management.base import BaseCommand, CommandError\n-from workshops.views import _export_badges\n+from django.core.management.base import BaseCommand\n+from django.core.urlresolvers import reverse\n+from rest_framework.test import APIClient\n+\n \n class Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for badges.'\n \n def handle(self, *args, **options):\n- print(yaml.dump(_export_badges()).rstrip())\n+ client = APIClient()\n+ response = client.get(reverse('api:export-badges'),\n+ {'format': 'yaml'})\n+ print(response.content.decode('utf-8'))\n", "issue": "Admin commands broken\nPlease check all the admin commands (use `make` on its own to get a list):\n\n```\n$ make airports\n\n/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/crispy_forms/utils.py:25: RemovedInDjango19Warning: memoize wrapper is deprecated and will be removed in Django 1.9. Use django.utils.lru_cache instead.\n default_field_template = memoize(default_field_template, {}, 1)\n\nTraceback (most recent call last):\n File \"manage.py\", line 10, in <module>\n execute_from_command_line(sys.argv)\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 338, in execute_from_command_line\n utility.execute()\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 330, in execute\n self.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 190, in fetch_command\n klass = load_command_class(app_name, subcommand)\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/site-packages/django/core/management/__init__.py\", line 40, in load_command_class\n module = import_module('%s.management.commands.%s' % (app_name, name))\n File \"/Users/gvwilson/anaconda/envs/py3k/lib/python3.3/importlib/__init__.py\", line 90, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n File \"<frozen importlib._bootstrap>\", line 1582, in _gcd_import\n File \"<frozen importlib._bootstrap>\", line 1563, in _find_and_load\n File \"<frozen importlib._bootstrap>\", line 1530, in _find_and_load_unlocked\n File \"<frozen importlib._bootstrap>\", line 584, in _check_name_wrapper\n File \"<frozen importlib._bootstrap>\", line 1022, in load_module\n File \"<frozen importlib._bootstrap>\", line 1003, in load_module\n File \"<frozen importlib._bootstrap>\", line 560, in module_for_loader_wrapper\n File \"<frozen importlib._bootstrap>\", line 868, in _load_module\n File \"<frozen importlib._bootstrap>\", line 313, in _call_with_frames_removed\n File \"/Users/gvwilson/s/amy/workshops/management/commands/export_airports.py\", line 3, in <module>\n from workshops.views import _export_instructors\nImportError: cannot import name _export_instructors\nmake: *** [airports] Error 1\n```\n\n", "before_files": [{"content": "import yaml\nfrom django.core.management.base import BaseCommand, CommandError\nfrom workshops.views import _export_instructors\n\nclass Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for airports.'\n\n def handle(self, *args, **options):\n print(yaml.dump(_export_instructors()).rstrip())\n", "path": "workshops/management/commands/export_airports.py"}, {"content": "import yaml\nfrom django.core.management.base import BaseCommand, CommandError\nfrom workshops.views import _export_badges\n\nclass Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for badges.'\n\n def handle(self, *args, **options):\n print(yaml.dump(_export_badges()).rstrip())\n", "path": "workshops/management/commands/export_badges.py"}, {"content": "from django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event\n\nfrom .serializers import (\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n)\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(APIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n\n def get(self, request, format=None):\n badges = Badge.objects.prefetch_related('person_set')\n serializer = ExportBadgesSerializer(badges, many=True)\n return Response(serializer.data)\n\n\nclass ExportInstructorLocationsView(APIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n\n def get(self, request, format=None):\n # TODO: return only people marked as instructors?\n airports = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer = ExportInstructorLocationsSerializer(airports, many=True)\n return Response(serializer.data)\n\n\nclass ListEvents(APIView):\n # I wanted to use ListAPIView, but it had problems with the way we test\n # this code... Basically ListAPIView uses pagination, and pagination\n # requires existing Request object - something we're faking in part of the\n # tests (request = None).\n serializer_class = EventSerializer\n permission_classes = (IsAuthenticatedOrReadOnly, )\n queryset = None # override this in the subclass\n\n def get(self, request, format=None):\n objects = self.queryset.all()\n serializer = self.serializer_class(objects, many=True)\n return Response(serializer.data)\n\n\nclass PublishedEvents(ListEvents):\n # only events that have both a starting date and a URL\n queryset = Event.objects.exclude(\n Q(start__isnull=True) | Q(url__isnull=True)\n ).order_by('-start')\n", "path": "api/views.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.core.urlresolvers import reverse\nfrom rest_framework.test import APIClient\n\n\nclass Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for airports.'\n\n def handle(self, *args, **options):\n client = APIClient()\n response = client.get(reverse('api:export-instructors'),\n {'format': 'yaml'})\n print(response.content.decode('utf-8'))\n", "path": "workshops/management/commands/export_airports.py"}, {"content": "from django.core.management.base import BaseCommand\nfrom django.core.urlresolvers import reverse\nfrom rest_framework.test import APIClient\n\n\nclass Command(BaseCommand):\n args = 'no arguments'\n help = 'Display YAML for badges.'\n\n def handle(self, *args, **options):\n client = APIClient()\n response = client.get(reverse('api:export-badges'),\n {'format': 'yaml'})\n print(response.content.decode('utf-8'))\n", "path": "workshops/management/commands/export_badges.py"}, {"content": "from django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event\n\nfrom .serializers import (\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n)\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass PublishedEvents(ListAPIView):\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n queryset = Event.objects.exclude(\n Q(start__isnull=True) | Q(url__isnull=True)\n ).order_by('-start')\n", "path": "api/views.py"}]} | 1,798 | 954 |
gh_patches_debug_13993 | rasdani/github-patches | git_diff | google__flax-1324 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: module 'flax.linen' has no attribute 'merge_param'
[This guide](https://flax.readthedocs.io/en/latest/design_notes/arguments.html) suggests using `nn.merge_param` to combine arguments, but `merge_param` is only available through `nn.module.merge_param`. I believe it needs to be added to the import line [here](https://github.com/google/flax/blob/4ae9143f7ef46ffab6d9123ba1b2e4f3303e68d1/flax/linen/__init__.py#L28). I can open a PR if this is the case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/linen/__init__.py`
Content:
```
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """The Flax Module system."""
16
17
18 # pylint: disable=g-multiple-import
19 # re-export commonly used modules and functions
20 from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
21 log_softmax, relu, sigmoid, soft_sign, softmax,
22 softplus, swish, silu, tanh)
23 from .attention import (MultiHeadDotProductAttention, SelfAttention,
24 dot_product_attention, make_attention_mask,
25 make_causal_mask, combine_masks)
26 from ..core import broadcast, DenyList
27 from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
28 from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply
29 from .normalization import BatchNorm, GroupNorm, LayerNorm
30 from .pooling import avg_pool, max_pool
31 from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
32 from .stochastic import Dropout
33 from .transforms import jit, named_call, remat, scan, vmap
34 from .initializers import zeros, ones
35
36 # pylint: enable=g-multiple-import
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py
--- a/flax/linen/__init__.py
+++ b/flax/linen/__init__.py
@@ -25,7 +25,8 @@
make_causal_mask, combine_masks)
from ..core import broadcast, DenyList
from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
-from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply
+from .module import (Module, compact, enable_named_call, disable_named_call,
+ Variable, init, init_with_output, apply, merge_param)
from .normalization import BatchNorm, GroupNorm, LayerNorm
from .pooling import avg_pool, max_pool
from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
| {"golden_diff": "diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py\n--- a/flax/linen/__init__.py\n+++ b/flax/linen/__init__.py\n@@ -25,7 +25,8 @@\n make_causal_mask, combine_masks)\n from ..core import broadcast, DenyList\n from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\n-from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply\n+from .module import (Module, compact, enable_named_call, disable_named_call,\n+ Variable, init, init_with_output, apply, merge_param)\n from .normalization import BatchNorm, GroupNorm, LayerNorm\n from .pooling import avg_pool, max_pool\n from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\n", "issue": "AttributeError: module 'flax.linen' has no attribute 'merge_param'\n[This guide](https://flax.readthedocs.io/en/latest/design_notes/arguments.html) suggests using `nn.merge_param` to combine arguments, but `merge_param` is only available through `nn.module.merge_param`. I believe it needs to be added to the import line [here](https://github.com/google/flax/blob/4ae9143f7ef46ffab6d9123ba1b2e4f3303e68d1/flax/linen/__init__.py#L28). I can open a PR if this is the case.\r\n\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import\n# re-export commonly used modules and functions\nfrom .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n softplus, swish, silu, tanh)\nfrom .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\nfrom ..core import broadcast, DenyList\nfrom .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\nfrom .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply\nfrom .normalization import BatchNorm, GroupNorm, LayerNorm\nfrom .pooling import avg_pool, max_pool\nfrom .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\nfrom .stochastic import Dropout\nfrom .transforms import jit, named_call, remat, scan, vmap\nfrom .initializers import zeros, ones\n\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}], "after_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import\n# re-export commonly used modules and functions\nfrom .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n softplus, swish, silu, tanh)\nfrom .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\nfrom ..core import broadcast, DenyList\nfrom .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\nfrom .module import (Module, compact, enable_named_call, disable_named_call,\n Variable, init, init_with_output, apply, merge_param)\nfrom .normalization import BatchNorm, GroupNorm, LayerNorm\nfrom .pooling import avg_pool, max_pool\nfrom .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\nfrom .stochastic import Dropout\nfrom .transforms import jit, named_call, remat, scan, vmap\nfrom .initializers import zeros, ones\n\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}]} | 860 | 201 |
gh_patches_debug_18718 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1747 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Investigate Health check restarting sandboxes
### Issue Description
We noticed a few times in both staging and stable environments that the sandboxes appeared to restart even when no one was on them. We weren't able to track down useful information in logs to help assist besides a reference to a health check being performed. Then when EPP connection was added, we saw this same restarting happening often, in fact just trying to set nameservers alone could result in multiple resets. However, this was very sporadic and without a disernable pattern besides "when epp calls are happening". It was determined that the system was failing the health check which does result in a restart. The solution was to increase the timeout on the health check, but that seems like a patch to the problem.
AC:
- [ ] investigate the health check that is currently happening and investigate if this could be resulting by some piece of our architecture (such as a port that is being used and is blocking the health check request)
- [ ] make follow on tickets as needed or document findings here before closing ticket
### Additional Context (optional)
Cloudfoundry has a customer support slack that should be used to get more info on the healthcheck.
To force the health check to occur with some frequency, lower the healthcheck timeout to 1 second.
### Issue Links
May be partially mitigated by #1151
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/djangooidc/views.py`
Content:
```
1 # coding: utf-8
2
3 import logging
4
5 from django.conf import settings
6 from django.contrib.auth import logout as auth_logout
7 from django.contrib.auth import authenticate, login
8 from django.http import HttpResponseRedirect
9 from django.shortcuts import redirect, render
10 from urllib.parse import parse_qs, urlencode
11
12 from djangooidc.oidc import Client
13 from djangooidc import exceptions as o_e
14 from registrar.models import User
15
16 logger = logging.getLogger(__name__)
17
18 try:
19 # Initialize provider using pyOICD
20 OP = getattr(settings, "OIDC_ACTIVE_PROVIDER")
21 CLIENT = Client(OP)
22 logger.debug("client initialized %s" % CLIENT)
23 except Exception as err:
24 CLIENT = None # type: ignore
25 logger.warning(err)
26 logger.warning("Unable to configure OpenID Connect provider. Users cannot log in.")
27
28
29 def error_page(request, error):
30 """Display a sensible message and log the error."""
31 logger.error(error)
32 if isinstance(error, o_e.AuthenticationFailed):
33 return render(
34 request,
35 "401.html",
36 context={
37 "friendly_message": error.friendly_message,
38 "log_identifier": error.locator,
39 },
40 status=401,
41 )
42 if isinstance(error, o_e.InternalError):
43 return render(
44 request,
45 "500.html",
46 context={
47 "friendly_message": error.friendly_message,
48 "log_identifier": error.locator,
49 },
50 status=500,
51 )
52 if isinstance(error, Exception):
53 return render(request, "500.html", status=500)
54
55
56 def openid(request):
57 """Redirect the user to an authentication provider (OP)."""
58
59 # If the session reset because of a server restart, attempt to login again
60 request.session["acr_value"] = CLIENT.get_default_acr_value()
61
62 request.session["next"] = request.GET.get("next", "/")
63
64 try:
65 return CLIENT.create_authn_request(request.session)
66 except Exception as err:
67 return error_page(request, err)
68
69
70 def login_callback(request):
71 """Analyze the token returned by the authentication provider (OP)."""
72 try:
73 query = parse_qs(request.GET.urlencode())
74 userinfo = CLIENT.callback(query, request.session)
75 # test for need for identity verification and if it is satisfied
76 # if not satisfied, redirect user to login with stepped up acr_value
77 if requires_step_up_auth(userinfo):
78 # add acr_value to request.session
79 request.session["acr_value"] = CLIENT.get_step_up_acr_value()
80 return CLIENT.create_authn_request(request.session)
81 user = authenticate(request=request, **userinfo)
82 if user:
83 login(request, user)
84 logger.info("Successfully logged in user %s" % user)
85 # Double login bug (1507)?
86 return redirect(request.session.get("next", "/"))
87 else:
88 raise o_e.BannedUser()
89 except o_e.NoStateDefined as nsd_err:
90 logger.warning(f"No State Defined: {nsd_err}")
91 return redirect(request.session.get("next", "/"))
92 except Exception as err:
93 return error_page(request, err)
94
95
96 def requires_step_up_auth(userinfo):
97 """if User.needs_identity_verification and step_up_acr_value not in
98 ial returned from callback, return True"""
99 step_up_acr_value = CLIENT.get_step_up_acr_value()
100 acr_value = userinfo.get("ial", "")
101 uuid = userinfo.get("sub", "")
102 email = userinfo.get("email", "")
103 if acr_value != step_up_acr_value:
104 # The acr of this attempt is not at the highest level
105 # so check if the user needs the higher level
106 return User.needs_identity_verification(email, uuid)
107 else:
108 # This attempt already came back at the highest level
109 # so does not require step up
110 return False
111
112
113 def logout(request, next_page=None):
114 """Redirect the user to the authentication provider (OP) logout page."""
115 try:
116 user = request.user
117 request_args = {
118 "client_id": CLIENT.client_id,
119 "state": request.session["state"],
120 }
121 if (
122 "post_logout_redirect_uris" in CLIENT.registration_response.keys()
123 and len(CLIENT.registration_response["post_logout_redirect_uris"]) > 0
124 ):
125 request_args.update(
126 {"post_logout_redirect_uri": CLIENT.registration_response["post_logout_redirect_uris"][0]}
127 )
128 url = CLIENT.provider_info["end_session_endpoint"]
129 url += "?" + urlencode(request_args)
130 return HttpResponseRedirect(url)
131 except Exception as err:
132 return error_page(request, err)
133 finally:
134 # Always remove Django session stuff - even if not logged out from OP.
135 # Don't wait for the callback as it may never come.
136 auth_logout(request)
137 logger.info("Successfully logged out user %s" % user)
138 next_page = getattr(settings, "LOGOUT_REDIRECT_URL", None)
139 if next_page:
140 request.session["next"] = next_page
141
142
143 def logout_callback(request):
144 """Simple redirection view: after logout, redirect to `next`."""
145 next = request.session.get("next", "/")
146 return redirect(next)
147
```
Path: `src/registrar/config/urls.py`
Content:
```
1 """URL Configuration
2
3 For more information see:
4 https://docs.djangoproject.com/en/4.0/topics/http/urls/
5 """
6
7 from django.contrib import admin
8 from django.urls import include, path
9 from django.views.generic import RedirectView
10
11 from registrar import views
12
13 from registrar.views.admin_views import ExportData
14
15
16 from registrar.views.application import Step
17 from registrar.views.utility import always_404
18 from api.views import available, get_current_federal, get_current_full
19
20
21 APPLICATION_NAMESPACE = views.ApplicationWizard.URL_NAMESPACE
22 application_urls = [
23 path("", views.ApplicationWizard.as_view(), name=""),
24 path("finished/", views.Finished.as_view(), name="finished"),
25 ]
26
27 # dynamically generate the other application_urls
28 for step, view in [
29 # add/remove steps here
30 (Step.ORGANIZATION_TYPE, views.OrganizationType),
31 (Step.TRIBAL_GOVERNMENT, views.TribalGovernment),
32 (Step.ORGANIZATION_FEDERAL, views.OrganizationFederal),
33 (Step.ORGANIZATION_ELECTION, views.OrganizationElection),
34 (Step.ORGANIZATION_CONTACT, views.OrganizationContact),
35 (Step.ABOUT_YOUR_ORGANIZATION, views.AboutYourOrganization),
36 (Step.AUTHORIZING_OFFICIAL, views.AuthorizingOfficial),
37 (Step.CURRENT_SITES, views.CurrentSites),
38 (Step.DOTGOV_DOMAIN, views.DotgovDomain),
39 (Step.PURPOSE, views.Purpose),
40 (Step.YOUR_CONTACT, views.YourContact),
41 (Step.OTHER_CONTACTS, views.OtherContacts),
42 (Step.ANYTHING_ELSE, views.AnythingElse),
43 (Step.REQUIREMENTS, views.Requirements),
44 (Step.REVIEW, views.Review),
45 ]:
46 application_urls.append(path(f"{step}/", view.as_view(), name=step))
47
48
49 urlpatterns = [
50 path("", views.index, name="home"),
51 path(
52 "admin/logout/",
53 RedirectView.as_view(pattern_name="logout", permanent=False),
54 ),
55 path("export_data/", ExportData.as_view(), name="admin_export_data"),
56 path("admin/", admin.site.urls),
57 path(
58 "application/<id>/edit/",
59 views.ApplicationWizard.as_view(),
60 name=views.ApplicationWizard.EDIT_URL_NAME,
61 ),
62 path(
63 "application/<int:pk>",
64 views.ApplicationStatus.as_view(),
65 name="application-status",
66 ),
67 path(
68 "application/<int:pk>/withdraw",
69 views.ApplicationWithdrawConfirmation.as_view(),
70 name="application-withdraw-confirmation",
71 ),
72 path(
73 "application/<int:pk>/withdrawconfirmed",
74 views.ApplicationWithdrawn.as_view(),
75 name="application-withdrawn",
76 ),
77 path("health/", views.health),
78 path("openid/", include("djangooidc.urls")),
79 path("request/", include((application_urls, APPLICATION_NAMESPACE))),
80 path("api/v1/available/", available, name="available"),
81 path("api/v1/get-report/current-federal", get_current_federal, name="get-current-federal"),
82 path("api/v1/get-report/current-full", get_current_full, name="get-current-full"),
83 path(
84 "todo",
85 lambda r: always_404(r, "We forgot to include this link, sorry."),
86 name="todo",
87 ),
88 path("domain/<int:pk>", views.DomainView.as_view(), name="domain"),
89 path("domain/<int:pk>/users", views.DomainUsersView.as_view(), name="domain-users"),
90 path(
91 "domain/<int:pk>/dns",
92 views.DomainDNSView.as_view(),
93 name="domain-dns",
94 ),
95 path(
96 "domain/<int:pk>/dns/nameservers",
97 views.DomainNameserversView.as_view(),
98 name="domain-dns-nameservers",
99 ),
100 path(
101 "domain/<int:pk>/dns/dnssec",
102 views.DomainDNSSECView.as_view(),
103 name="domain-dns-dnssec",
104 ),
105 path(
106 "domain/<int:pk>/dns/dnssec/dsdata",
107 views.DomainDsDataView.as_view(),
108 name="domain-dns-dnssec-dsdata",
109 ),
110 path(
111 "domain/<int:pk>/your-contact-information",
112 views.DomainYourContactInformationView.as_view(),
113 name="domain-your-contact-information",
114 ),
115 path(
116 "domain/<int:pk>/org-name-address",
117 views.DomainOrgNameAddressView.as_view(),
118 name="domain-org-name-address",
119 ),
120 path(
121 "domain/<int:pk>/authorizing-official",
122 views.DomainAuthorizingOfficialView.as_view(),
123 name="domain-authorizing-official",
124 ),
125 path(
126 "domain/<int:pk>/security-email",
127 views.DomainSecurityEmailView.as_view(),
128 name="domain-security-email",
129 ),
130 path(
131 "domain/<int:pk>/users/add",
132 views.DomainAddUserView.as_view(),
133 name="domain-users-add",
134 ),
135 path(
136 "invitation/<int:pk>/delete",
137 views.DomainInvitationDeleteView.as_view(http_method_names=["post"]),
138 name="invitation-delete",
139 ),
140 path(
141 "application/<int:pk>/delete",
142 views.DomainApplicationDeleteView.as_view(http_method_names=["post"]),
143 name="application-delete",
144 ),
145 path(
146 "domain/<int:pk>/users/<int:user_pk>/delete",
147 views.DomainDeleteUserView.as_view(http_method_names=["post"]),
148 name="domain-user-delete",
149 ),
150 ]
151
152 # we normally would guard these with `if settings.DEBUG` but tests run with
153 # DEBUG = False even when these apps have been loaded because settings.DEBUG
154 # was actually True. Instead, let's add these URLs any time we are able to
155 # import the debug toolbar package.
156 try:
157 import debug_toolbar # type: ignore
158
159 urlpatterns += [path("__debug__/", include(debug_toolbar.urls))]
160 except ImportError:
161 pass
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py
--- a/src/djangooidc/views.py
+++ b/src/djangooidc/views.py
@@ -55,7 +55,6 @@
def openid(request):
"""Redirect the user to an authentication provider (OP)."""
-
# If the session reset because of a server restart, attempt to login again
request.session["acr_value"] = CLIENT.get_default_acr_value()
diff --git a/src/registrar/config/urls.py b/src/registrar/config/urls.py
--- a/src/registrar/config/urls.py
+++ b/src/registrar/config/urls.py
@@ -74,7 +74,7 @@
views.ApplicationWithdrawn.as_view(),
name="application-withdrawn",
),
- path("health/", views.health),
+ path("health", views.health, name="health"),
path("openid/", include("djangooidc.urls")),
path("request/", include((application_urls, APPLICATION_NAMESPACE))),
path("api/v1/available/", available, name="available"),
| {"golden_diff": "diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py\n--- a/src/djangooidc/views.py\n+++ b/src/djangooidc/views.py\n@@ -55,7 +55,6 @@\n \n def openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n-\n # If the session reset because of a server restart, attempt to login again\n request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n \ndiff --git a/src/registrar/config/urls.py b/src/registrar/config/urls.py\n--- a/src/registrar/config/urls.py\n+++ b/src/registrar/config/urls.py\n@@ -74,7 +74,7 @@\n views.ApplicationWithdrawn.as_view(),\n name=\"application-withdrawn\",\n ),\n- path(\"health/\", views.health),\n+ path(\"health\", views.health, name=\"health\"),\n path(\"openid/\", include(\"djangooidc.urls\")),\n path(\"request/\", include((application_urls, APPLICATION_NAMESPACE))),\n path(\"api/v1/available/\", available, name=\"available\"),\n", "issue": "Investigate Health check restarting sandboxes \n### Issue Description\n\nWe noticed a few times in both staging and stable environments that the sandboxes appeared to restart even when no one was on them. We weren't able to track down useful information in logs to help assist besides a reference to a health check being performed. Then when EPP connection was added, we saw this same restarting happening often, in fact just trying to set nameservers alone could result in multiple resets. However, this was very sporadic and without a disernable pattern besides \"when epp calls are happening\". It was determined that the system was failing the health check which does result in a restart. The solution was to increase the timeout on the health check, but that seems like a patch to the problem.\n\nAC:\n\n- [ ] investigate the health check that is currently happening and investigate if this could be resulting by some piece of our architecture (such as a port that is being used and is blocking the health check request)\n- [ ] make follow on tickets as needed or document findings here before closing ticket\n\n### Additional Context (optional)\n\nCloudfoundry has a customer support slack that should be used to get more info on the healthcheck.\n\nTo force the health check to occur with some frequency, lower the healthcheck timeout to 1 second.\n\n### Issue Links\n\nMay be partially mitigated by #1151 \n", "before_files": [{"content": "# coding: utf-8\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom urllib.parse import parse_qs, urlencode\n\nfrom djangooidc.oidc import Client\nfrom djangooidc import exceptions as o_e\nfrom registrar.models import User\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Initialize provider using pyOICD\n OP = getattr(settings, \"OIDC_ACTIVE_PROVIDER\")\n CLIENT = Client(OP)\n logger.debug(\"client initialized %s\" % CLIENT)\nexcept Exception as err:\n CLIENT = None # type: ignore\n logger.warning(err)\n logger.warning(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n\n\ndef error_page(request, error):\n \"\"\"Display a sensible message and log the error.\"\"\"\n logger.error(error)\n if isinstance(error, o_e.AuthenticationFailed):\n return render(\n request,\n \"401.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=401,\n )\n if isinstance(error, o_e.InternalError):\n return render(\n request,\n \"500.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=500,\n )\n if isinstance(error, Exception):\n return render(request, \"500.html\", status=500)\n\n\ndef openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n\n # If the session reset because of a server restart, attempt to login again\n request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n\n request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n\n try:\n return CLIENT.create_authn_request(request.session)\n except Exception as err:\n return error_page(request, err)\n\n\ndef login_callback(request):\n \"\"\"Analyze the token returned by the authentication provider (OP).\"\"\"\n try:\n query = parse_qs(request.GET.urlencode())\n userinfo = CLIENT.callback(query, request.session)\n # test for need for identity verification and if it is satisfied\n # if not satisfied, redirect user to login with stepped up acr_value\n if requires_step_up_auth(userinfo):\n # add acr_value to request.session\n request.session[\"acr_value\"] = CLIENT.get_step_up_acr_value()\n return CLIENT.create_authn_request(request.session)\n user = authenticate(request=request, **userinfo)\n if user:\n login(request, user)\n logger.info(\"Successfully logged in user %s\" % user)\n # Double login bug (1507)?\n return redirect(request.session.get(\"next\", \"/\"))\n else:\n raise o_e.BannedUser()\n except o_e.NoStateDefined as nsd_err:\n logger.warning(f\"No State Defined: {nsd_err}\")\n return redirect(request.session.get(\"next\", \"/\"))\n except Exception as err:\n return error_page(request, err)\n\n\ndef requires_step_up_auth(userinfo):\n \"\"\"if User.needs_identity_verification and step_up_acr_value not in\n ial returned from callback, return True\"\"\"\n step_up_acr_value = CLIENT.get_step_up_acr_value()\n acr_value = userinfo.get(\"ial\", \"\")\n uuid = userinfo.get(\"sub\", \"\")\n email = userinfo.get(\"email\", \"\")\n if acr_value != step_up_acr_value:\n # The acr of this attempt is not at the highest level\n # so check if the user needs the higher level\n return User.needs_identity_verification(email, uuid)\n else:\n # This attempt already came back at the highest level\n # so does not require step up\n return False\n\n\ndef logout(request, next_page=None):\n \"\"\"Redirect the user to the authentication provider (OP) logout page.\"\"\"\n try:\n user = request.user\n request_args = {\n \"client_id\": CLIENT.client_id,\n \"state\": request.session[\"state\"],\n }\n if (\n \"post_logout_redirect_uris\" in CLIENT.registration_response.keys()\n and len(CLIENT.registration_response[\"post_logout_redirect_uris\"]) > 0\n ):\n request_args.update(\n {\"post_logout_redirect_uri\": CLIENT.registration_response[\"post_logout_redirect_uris\"][0]}\n )\n url = CLIENT.provider_info[\"end_session_endpoint\"]\n url += \"?\" + urlencode(request_args)\n return HttpResponseRedirect(url)\n except Exception as err:\n return error_page(request, err)\n finally:\n # Always remove Django session stuff - even if not logged out from OP.\n # Don't wait for the callback as it may never come.\n auth_logout(request)\n logger.info(\"Successfully logged out user %s\" % user)\n next_page = getattr(settings, \"LOGOUT_REDIRECT_URL\", None)\n if next_page:\n request.session[\"next\"] = next_page\n\n\ndef logout_callback(request):\n \"\"\"Simple redirection view: after logout, redirect to `next`.\"\"\"\n next = request.session.get(\"next\", \"/\")\n return redirect(next)\n", "path": "src/djangooidc/views.py"}, {"content": "\"\"\"URL Configuration\n\nFor more information see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.views.generic import RedirectView\n\nfrom registrar import views\n\nfrom registrar.views.admin_views import ExportData\n\n\nfrom registrar.views.application import Step\nfrom registrar.views.utility import always_404\nfrom api.views import available, get_current_federal, get_current_full\n\n\nAPPLICATION_NAMESPACE = views.ApplicationWizard.URL_NAMESPACE\napplication_urls = [\n path(\"\", views.ApplicationWizard.as_view(), name=\"\"),\n path(\"finished/\", views.Finished.as_view(), name=\"finished\"),\n]\n\n# dynamically generate the other application_urls\nfor step, view in [\n # add/remove steps here\n (Step.ORGANIZATION_TYPE, views.OrganizationType),\n (Step.TRIBAL_GOVERNMENT, views.TribalGovernment),\n (Step.ORGANIZATION_FEDERAL, views.OrganizationFederal),\n (Step.ORGANIZATION_ELECTION, views.OrganizationElection),\n (Step.ORGANIZATION_CONTACT, views.OrganizationContact),\n (Step.ABOUT_YOUR_ORGANIZATION, views.AboutYourOrganization),\n (Step.AUTHORIZING_OFFICIAL, views.AuthorizingOfficial),\n (Step.CURRENT_SITES, views.CurrentSites),\n (Step.DOTGOV_DOMAIN, views.DotgovDomain),\n (Step.PURPOSE, views.Purpose),\n (Step.YOUR_CONTACT, views.YourContact),\n (Step.OTHER_CONTACTS, views.OtherContacts),\n (Step.ANYTHING_ELSE, views.AnythingElse),\n (Step.REQUIREMENTS, views.Requirements),\n (Step.REVIEW, views.Review),\n]:\n application_urls.append(path(f\"{step}/\", view.as_view(), name=step))\n\n\nurlpatterns = [\n path(\"\", views.index, name=\"home\"),\n path(\n \"admin/logout/\",\n RedirectView.as_view(pattern_name=\"logout\", permanent=False),\n ),\n path(\"export_data/\", ExportData.as_view(), name=\"admin_export_data\"),\n path(\"admin/\", admin.site.urls),\n path(\n \"application/<id>/edit/\",\n views.ApplicationWizard.as_view(),\n name=views.ApplicationWizard.EDIT_URL_NAME,\n ),\n path(\n \"application/<int:pk>\",\n views.ApplicationStatus.as_view(),\n name=\"application-status\",\n ),\n path(\n \"application/<int:pk>/withdraw\",\n views.ApplicationWithdrawConfirmation.as_view(),\n name=\"application-withdraw-confirmation\",\n ),\n path(\n \"application/<int:pk>/withdrawconfirmed\",\n views.ApplicationWithdrawn.as_view(),\n name=\"application-withdrawn\",\n ),\n path(\"health/\", views.health),\n path(\"openid/\", include(\"djangooidc.urls\")),\n path(\"request/\", include((application_urls, APPLICATION_NAMESPACE))),\n path(\"api/v1/available/\", available, name=\"available\"),\n path(\"api/v1/get-report/current-federal\", get_current_federal, name=\"get-current-federal\"),\n path(\"api/v1/get-report/current-full\", get_current_full, name=\"get-current-full\"),\n path(\n \"todo\",\n lambda r: always_404(r, \"We forgot to include this link, sorry.\"),\n name=\"todo\",\n ),\n path(\"domain/<int:pk>\", views.DomainView.as_view(), name=\"domain\"),\n path(\"domain/<int:pk>/users\", views.DomainUsersView.as_view(), name=\"domain-users\"),\n path(\n \"domain/<int:pk>/dns\",\n views.DomainDNSView.as_view(),\n name=\"domain-dns\",\n ),\n path(\n \"domain/<int:pk>/dns/nameservers\",\n views.DomainNameserversView.as_view(),\n name=\"domain-dns-nameservers\",\n ),\n path(\n \"domain/<int:pk>/dns/dnssec\",\n views.DomainDNSSECView.as_view(),\n name=\"domain-dns-dnssec\",\n ),\n path(\n \"domain/<int:pk>/dns/dnssec/dsdata\",\n views.DomainDsDataView.as_view(),\n name=\"domain-dns-dnssec-dsdata\",\n ),\n path(\n \"domain/<int:pk>/your-contact-information\",\n views.DomainYourContactInformationView.as_view(),\n name=\"domain-your-contact-information\",\n ),\n path(\n \"domain/<int:pk>/org-name-address\",\n views.DomainOrgNameAddressView.as_view(),\n name=\"domain-org-name-address\",\n ),\n path(\n \"domain/<int:pk>/authorizing-official\",\n views.DomainAuthorizingOfficialView.as_view(),\n name=\"domain-authorizing-official\",\n ),\n path(\n \"domain/<int:pk>/security-email\",\n views.DomainSecurityEmailView.as_view(),\n name=\"domain-security-email\",\n ),\n path(\n \"domain/<int:pk>/users/add\",\n views.DomainAddUserView.as_view(),\n name=\"domain-users-add\",\n ),\n path(\n \"invitation/<int:pk>/delete\",\n views.DomainInvitationDeleteView.as_view(http_method_names=[\"post\"]),\n name=\"invitation-delete\",\n ),\n path(\n \"application/<int:pk>/delete\",\n views.DomainApplicationDeleteView.as_view(http_method_names=[\"post\"]),\n name=\"application-delete\",\n ),\n path(\n \"domain/<int:pk>/users/<int:user_pk>/delete\",\n views.DomainDeleteUserView.as_view(http_method_names=[\"post\"]),\n name=\"domain-user-delete\",\n ),\n]\n\n# we normally would guard these with `if settings.DEBUG` but tests run with\n# DEBUG = False even when these apps have been loaded because settings.DEBUG\n# was actually True. Instead, let's add these URLs any time we are able to\n# import the debug toolbar package.\ntry:\n import debug_toolbar # type: ignore\n\n urlpatterns += [path(\"__debug__/\", include(debug_toolbar.urls))]\nexcept ImportError:\n pass\n", "path": "src/registrar/config/urls.py"}], "after_files": [{"content": "# coding: utf-8\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom urllib.parse import parse_qs, urlencode\n\nfrom djangooidc.oidc import Client\nfrom djangooidc import exceptions as o_e\nfrom registrar.models import User\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Initialize provider using pyOICD\n OP = getattr(settings, \"OIDC_ACTIVE_PROVIDER\")\n CLIENT = Client(OP)\n logger.debug(\"client initialized %s\" % CLIENT)\nexcept Exception as err:\n CLIENT = None # type: ignore\n logger.warning(err)\n logger.warning(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n\n\ndef error_page(request, error):\n \"\"\"Display a sensible message and log the error.\"\"\"\n logger.error(error)\n if isinstance(error, o_e.AuthenticationFailed):\n return render(\n request,\n \"401.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=401,\n )\n if isinstance(error, o_e.InternalError):\n return render(\n request,\n \"500.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=500,\n )\n if isinstance(error, Exception):\n return render(request, \"500.html\", status=500)\n\n\ndef openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n # If the session reset because of a server restart, attempt to login again\n request.session[\"acr_value\"] = CLIENT.get_default_acr_value()\n\n request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n\n try:\n return CLIENT.create_authn_request(request.session)\n except Exception as err:\n return error_page(request, err)\n\n\ndef login_callback(request):\n \"\"\"Analyze the token returned by the authentication provider (OP).\"\"\"\n try:\n query = parse_qs(request.GET.urlencode())\n userinfo = CLIENT.callback(query, request.session)\n # test for need for identity verification and if it is satisfied\n # if not satisfied, redirect user to login with stepped up acr_value\n if requires_step_up_auth(userinfo):\n # add acr_value to request.session\n request.session[\"acr_value\"] = CLIENT.get_step_up_acr_value()\n return CLIENT.create_authn_request(request.session)\n user = authenticate(request=request, **userinfo)\n if user:\n login(request, user)\n logger.info(\"Successfully logged in user %s\" % user)\n # Double login bug (1507)?\n return redirect(request.session.get(\"next\", \"/\"))\n else:\n raise o_e.BannedUser()\n except o_e.NoStateDefined as nsd_err:\n logger.warning(f\"No State Defined: {nsd_err}\")\n return redirect(request.session.get(\"next\", \"/\"))\n except Exception as err:\n return error_page(request, err)\n\n\ndef requires_step_up_auth(userinfo):\n \"\"\"if User.needs_identity_verification and step_up_acr_value not in\n ial returned from callback, return True\"\"\"\n step_up_acr_value = CLIENT.get_step_up_acr_value()\n acr_value = userinfo.get(\"ial\", \"\")\n uuid = userinfo.get(\"sub\", \"\")\n email = userinfo.get(\"email\", \"\")\n if acr_value != step_up_acr_value:\n # The acr of this attempt is not at the highest level\n # so check if the user needs the higher level\n return User.needs_identity_verification(email, uuid)\n else:\n # This attempt already came back at the highest level\n # so does not require step up\n return False\n\n\ndef logout(request, next_page=None):\n \"\"\"Redirect the user to the authentication provider (OP) logout page.\"\"\"\n try:\n user = request.user\n request_args = {\n \"client_id\": CLIENT.client_id,\n \"state\": request.session[\"state\"],\n }\n if (\n \"post_logout_redirect_uris\" in CLIENT.registration_response.keys()\n and len(CLIENT.registration_response[\"post_logout_redirect_uris\"]) > 0\n ):\n request_args.update(\n {\"post_logout_redirect_uri\": CLIENT.registration_response[\"post_logout_redirect_uris\"][0]}\n )\n url = CLIENT.provider_info[\"end_session_endpoint\"]\n url += \"?\" + urlencode(request_args)\n return HttpResponseRedirect(url)\n except Exception as err:\n return error_page(request, err)\n finally:\n # Always remove Django session stuff - even if not logged out from OP.\n # Don't wait for the callback as it may never come.\n auth_logout(request)\n logger.info(\"Successfully logged out user %s\" % user)\n next_page = getattr(settings, \"LOGOUT_REDIRECT_URL\", None)\n if next_page:\n request.session[\"next\"] = next_page\n\n\ndef logout_callback(request):\n \"\"\"Simple redirection view: after logout, redirect to `next`.\"\"\"\n next = request.session.get(\"next\", \"/\")\n return redirect(next)\n", "path": "src/djangooidc/views.py"}, {"content": "\"\"\"URL Configuration\n\nFor more information see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.views.generic import RedirectView\n\nfrom registrar import views\n\nfrom registrar.views.admin_views import ExportData\n\n\nfrom registrar.views.application import Step\nfrom registrar.views.utility import always_404\nfrom api.views import available, get_current_federal, get_current_full\n\n\nAPPLICATION_NAMESPACE = views.ApplicationWizard.URL_NAMESPACE\napplication_urls = [\n path(\"\", views.ApplicationWizard.as_view(), name=\"\"),\n path(\"finished/\", views.Finished.as_view(), name=\"finished\"),\n]\n\n# dynamically generate the other application_urls\nfor step, view in [\n # add/remove steps here\n (Step.ORGANIZATION_TYPE, views.OrganizationType),\n (Step.TRIBAL_GOVERNMENT, views.TribalGovernment),\n (Step.ORGANIZATION_FEDERAL, views.OrganizationFederal),\n (Step.ORGANIZATION_ELECTION, views.OrganizationElection),\n (Step.ORGANIZATION_CONTACT, views.OrganizationContact),\n (Step.ABOUT_YOUR_ORGANIZATION, views.AboutYourOrganization),\n (Step.AUTHORIZING_OFFICIAL, views.AuthorizingOfficial),\n (Step.CURRENT_SITES, views.CurrentSites),\n (Step.DOTGOV_DOMAIN, views.DotgovDomain),\n (Step.PURPOSE, views.Purpose),\n (Step.YOUR_CONTACT, views.YourContact),\n (Step.OTHER_CONTACTS, views.OtherContacts),\n (Step.ANYTHING_ELSE, views.AnythingElse),\n (Step.REQUIREMENTS, views.Requirements),\n (Step.REVIEW, views.Review),\n]:\n application_urls.append(path(f\"{step}/\", view.as_view(), name=step))\n\n\nurlpatterns = [\n path(\"\", views.index, name=\"home\"),\n path(\n \"admin/logout/\",\n RedirectView.as_view(pattern_name=\"logout\", permanent=False),\n ),\n path(\"export_data/\", ExportData.as_view(), name=\"admin_export_data\"),\n path(\"admin/\", admin.site.urls),\n path(\n \"application/<id>/edit/\",\n views.ApplicationWizard.as_view(),\n name=views.ApplicationWizard.EDIT_URL_NAME,\n ),\n path(\n \"application/<int:pk>\",\n views.ApplicationStatus.as_view(),\n name=\"application-status\",\n ),\n path(\n \"application/<int:pk>/withdraw\",\n views.ApplicationWithdrawConfirmation.as_view(),\n name=\"application-withdraw-confirmation\",\n ),\n path(\n \"application/<int:pk>/withdrawconfirmed\",\n views.ApplicationWithdrawn.as_view(),\n name=\"application-withdrawn\",\n ),\n path(\"health\", views.health, name=\"health\"),\n path(\"openid/\", include(\"djangooidc.urls\")),\n path(\"request/\", include((application_urls, APPLICATION_NAMESPACE))),\n path(\"api/v1/available/\", available, name=\"available\"),\n path(\"api/v1/get-report/current-federal\", get_current_federal, name=\"get-current-federal\"),\n path(\"api/v1/get-report/current-full\", get_current_full, name=\"get-current-full\"),\n path(\n \"todo\",\n lambda r: always_404(r, \"We forgot to include this link, sorry.\"),\n name=\"todo\",\n ),\n path(\"domain/<int:pk>\", views.DomainView.as_view(), name=\"domain\"),\n path(\"domain/<int:pk>/users\", views.DomainUsersView.as_view(), name=\"domain-users\"),\n path(\n \"domain/<int:pk>/dns\",\n views.DomainDNSView.as_view(),\n name=\"domain-dns\",\n ),\n path(\n \"domain/<int:pk>/dns/nameservers\",\n views.DomainNameserversView.as_view(),\n name=\"domain-dns-nameservers\",\n ),\n path(\n \"domain/<int:pk>/dns/dnssec\",\n views.DomainDNSSECView.as_view(),\n name=\"domain-dns-dnssec\",\n ),\n path(\n \"domain/<int:pk>/dns/dnssec/dsdata\",\n views.DomainDsDataView.as_view(),\n name=\"domain-dns-dnssec-dsdata\",\n ),\n path(\n \"domain/<int:pk>/your-contact-information\",\n views.DomainYourContactInformationView.as_view(),\n name=\"domain-your-contact-information\",\n ),\n path(\n \"domain/<int:pk>/org-name-address\",\n views.DomainOrgNameAddressView.as_view(),\n name=\"domain-org-name-address\",\n ),\n path(\n \"domain/<int:pk>/authorizing-official\",\n views.DomainAuthorizingOfficialView.as_view(),\n name=\"domain-authorizing-official\",\n ),\n path(\n \"domain/<int:pk>/security-email\",\n views.DomainSecurityEmailView.as_view(),\n name=\"domain-security-email\",\n ),\n path(\n \"domain/<int:pk>/users/add\",\n views.DomainAddUserView.as_view(),\n name=\"domain-users-add\",\n ),\n path(\n \"invitation/<int:pk>/delete\",\n views.DomainInvitationDeleteView.as_view(http_method_names=[\"post\"]),\n name=\"invitation-delete\",\n ),\n path(\n \"application/<int:pk>/delete\",\n views.DomainApplicationDeleteView.as_view(http_method_names=[\"post\"]),\n name=\"application-delete\",\n ),\n path(\n \"domain/<int:pk>/users/<int:user_pk>/delete\",\n views.DomainDeleteUserView.as_view(http_method_names=[\"post\"]),\n name=\"domain-user-delete\",\n ),\n]\n\n# we normally would guard these with `if settings.DEBUG` but tests run with\n# DEBUG = False even when these apps have been loaded because settings.DEBUG\n# was actually True. Instead, let's add these URLs any time we are able to\n# import the debug toolbar package.\ntry:\n import debug_toolbar # type: ignore\n\n urlpatterns += [path(\"__debug__/\", include(debug_toolbar.urls))]\nexcept ImportError:\n pass\n", "path": "src/registrar/config/urls.py"}]} | 3,632 | 236 |
gh_patches_debug_29156 | rasdani/github-patches | git_diff | pypi__warehouse-666 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fail gracefully if Redis isn't Up
Right now if the redis for the statistics go down, we'll start throwing errors when rendering the page, which is obviously less than desirable. Ideally we'll want to fail gracefully for this so that if redis goes down we'll just stop rendering statistics until it comes back. A tricky thing here is that we'll want to use ESI so that we don't cache for a long time the statistics with the failure output instead of statistics. This makes sense anyways since we'll want our stats to be updated quicker than the bulk of the page anyways. Additionally, we should ensure that we log the error regardless of whether we raise an exception or not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/routes.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13
14 def includeme(config):
15 # Basic global routes
16 config.add_route("index", "/", read_only=True)
17 config.add_route("robots.txt", "/robots.txt", read_only=True)
18 config.add_route("index.sitemap.xml", "/sitemap.xml", read_only=True)
19 config.add_route(
20 "bucket.sitemap.xml",
21 "/{bucket}.sitemap.xml",
22 read_only=True,
23 )
24
25 # ESI Routes
26 config.add_route(
27 "esi.current-user-indicator",
28 "/_esi/current-user-indicator/",
29 read_only=True,
30 )
31
32 # Accounts
33 config.add_route(
34 "accounts.profile",
35 "/user/{username}/",
36 factory="warehouse.accounts.models:UserFactory",
37 traverse="/{username}",
38 read_only=True,
39 )
40 config.add_route("accounts.login", "/account/login/")
41 config.add_route("accounts.logout", "/account/logout/")
42
43 # Packaging
44 config.add_route(
45 "packaging.project",
46 "/project/{name}/",
47 factory="warehouse.packaging.models:ProjectFactory",
48 traverse="/{name}",
49 read_only=True,
50 )
51 config.add_route(
52 "packaging.release",
53 "/project/{name}/{version}/",
54 factory="warehouse.packaging.models:ProjectFactory",
55 traverse="/{name}/{version}",
56 read_only=True,
57 )
58 config.add_route("packaging.file", "/packages/{path:.*}", read_only=True)
59
60 # Legacy URLs
61 config.add_route("legacy.api.simple.index", "/simple/", read_only=True)
62 config.add_route(
63 "legacy.api.simple.detail",
64 "/simple/{name}/",
65 factory="warehouse.packaging.models:ProjectFactory",
66 traverse="/{name}/",
67 read_only=True,
68 )
69 config.add_route(
70 "legacy.api.json.project",
71 "/pypi/{name}/json",
72 factory="warehouse.packaging.models:ProjectFactory",
73 traverse="/{name}",
74 read_only=True,
75 )
76 config.add_route(
77 "legacy.api.json.release",
78 "/pypi/{name}/{version}/json",
79 factory="warehouse.packaging.models:ProjectFactory",
80 traverse="/{name}/{version}",
81 read_only=True,
82 )
83
84 # Legacy Action URLs
85 config.add_pypi_action_route("legacy.api.pypi.file_upload", "file_upload")
86 config.add_pypi_action_route("legacy.api.pypi.submit", "submit")
87 config.add_pypi_action_route(
88 "legacy.api.pypi.submit_pkg_info",
89 "submit_pkg_info",
90 )
91 config.add_pypi_action_route("legacy.api.pypi.doc_upload", "doc_upload")
92 config.add_pypi_action_route("legacy.api.pypi.doap", "doap")
93
94 # Legacy XMLRPC
95 config.add_xmlrpc_endpoint(
96 "pypi",
97 pattern="/pypi",
98 header="Content-Type:text/xml",
99 read_only=True,
100 )
101
102 # Legacy Documentation
103 config.add_route("legacy.docs", config.registry.settings["docs.url"])
104
105 # Legacy Redirects
106 config.add_redirect("/pypi/{name}/", "/project/{name}/")
107 config.add_redirect(
108 "/pypi/{name}/{version}/",
109 "/project/{name}/{version}/",
110 )
111
```
Path: `warehouse/packaging/views.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
14 from pyramid.response import FileIter, Response
15 from pyramid.view import view_config
16 from sqlalchemy.orm import joinedload
17 from sqlalchemy.orm.exc import NoResultFound
18
19 from warehouse.accounts.models import User
20 from warehouse.cache.http import cache_control
21 from warehouse.cache.origin import origin_cache
22 from warehouse.packaging.interfaces import IDownloadStatService, IFileStorage
23 from warehouse.packaging.models import Release, File, Role
24
25
26 @view_config(
27 route_name="packaging.project",
28 renderer="packaging/detail.html",
29 decorator=[
30 origin_cache(
31 1 * 24 * 60 * 60, # 1 day
32 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
33 stale_if_error=5 * 24 * 60 * 60, # 5 days
34 ),
35 ],
36 )
37 def project_detail(project, request):
38 if project.name != request.matchdict.get("name", project.name):
39 return HTTPMovedPermanently(
40 request.current_route_path(name=project.name),
41 )
42
43 try:
44 release = project.releases.order_by(
45 Release._pypi_ordering.desc()
46 ).limit(1).one()
47 except NoResultFound:
48 return HTTPNotFound()
49
50 return release_detail(release, request)
51
52
53 @view_config(
54 route_name="packaging.release",
55 renderer="packaging/detail.html",
56 decorator=[
57 origin_cache(
58 1 * 24 * 60 * 60, # 1 day
59 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
60 stale_if_error=5 * 24 * 60 * 60, # 5 days
61 ),
62 ],
63 )
64 def release_detail(release, request):
65 project = release.project
66
67 if project.name != request.matchdict.get("name", project.name):
68 return HTTPMovedPermanently(
69 request.current_route_path(name=project.name),
70 )
71
72 # Get all of the registered versions for this Project, in order of newest
73 # to oldest.
74 all_releases = (
75 project.releases
76 .with_entities(Release.version, Release.created)
77 .order_by(Release._pypi_ordering.desc())
78 .all()
79 )
80
81 # Get all of the maintainers for this project.
82 maintainers = [
83 r.user
84 for r in (
85 request.db.query(Role)
86 .join(User)
87 .filter(Role.project == project)
88 .distinct(User.username)
89 .order_by(User.username)
90 .all()
91 )
92 ]
93
94 stats_svc = request.find_service(IDownloadStatService)
95
96 return {
97 "project": project,
98 "release": release,
99 "files": release.files.all(),
100 "all_releases": all_releases,
101 "maintainers": maintainers,
102 "download_stats": {
103 "daily": stats_svc.get_daily_stats(project.name),
104 "weekly": stats_svc.get_weekly_stats(project.name),
105 "monthly": stats_svc.get_monthly_stats(project.name),
106 },
107 }
108
109
110 @view_config(
111 route_name="packaging.file",
112 decorator=[
113 cache_control(365 * 24 * 60 * 60), # 1 year
114 origin_cache(
115 365 * 24 * 60 * 60, # 1 year
116 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
117 stale_if_error=5 * 24 * 60 * 60, # 5 days
118 ),
119 ],
120 )
121 def packages(request):
122 # The amount of logic that we can do in this view is very limited, this
123 # view needs to be able to be handled by Fastly directly hitting S3 instead
124 # of actually hitting this view. This more or less means that we're limited
125 # to just serving the actual file.
126
127 # Grab the path of the file that we're attempting to serve
128 path = request.matchdict["path"]
129
130 # We need to look up the File that is associated with this path, either the
131 # package path or the pgp path. If that doesn't exist then we'll bail out
132 # early with a 404.
133 try:
134 file_ = (
135 request.db.query(File)
136 .options(joinedload(File.release)
137 .joinedload(Release.project))
138 .filter((File.path == path) | (File.pgp_path == path))
139 .one()
140 )
141 except NoResultFound:
142 return HTTPNotFound()
143
144 # If this request is for a PGP signature, and the file doesn't have a PGP
145 # signature, then we can go ahead and 404 now before hitting the file
146 # storage.
147 if path == file_.pgp_path and not file_.has_signature:
148 return HTTPNotFound()
149
150 # Try to get the file from the file file storage service, logging an error
151 # and returning a HTTPNotFound if one can't be found.
152 try:
153 f = request.find_service(IFileStorage).get(path)
154 except FileNotFoundError:
155 request.log.error("missing file data", path=path)
156 return HTTPNotFound()
157
158 # If the path we're accessing is the path for the package itself, as
159 # opposed to the path for the signature, then we can include a
160 # Content-Length header.
161 content_length = None
162 if path == file_.path:
163 content_length = file_.size
164
165 return Response(
166 # If we have a wsgi.file_wrapper, we'll want to use that so that, if
167 # possible, this will use an optimized method of sending. Otherwise
168 # we'll just use Pyramid's FileIter as a fallback.
169 app_iter=request.environ.get("wsgi.file_wrapper", FileIter)(f),
170 # We use application/octet-stream instead of something nicer because
171 # different HTTP libraries will treat different combinations of
172 # Content-Type and Content-Encoding differently. The only thing that
173 # works sanely across all things without having something in the middle
174 # decide it can decompress the result to "help" the end user is with
175 # Content-Type: applicaton/octet-stream and no Content-Encoding.
176 content_type="application/octet-stream",
177 content_encoding=None,
178 # We need to specify an ETag for this response. Since ETags compared
179 # between URLs have no meaning (and thus, is safe for two URLs to share
180 # the same ETag) we will just use the MD5 hash of the package as our
181 # ETag.
182 etag=file_.md5_digest,
183 # Similarly to the ETag header, we'll just use the date that the file
184 # was uploaded as the Last-Modified header.
185 last_modified=file_.upload_time,
186 # If we have a Content-Length, we'll go ahead and use it here to
187 # hopefully enable the server and clients alike to be smarter about how
188 # they handle downloading this response.
189 content_length=content_length,
190 )
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py
--- a/warehouse/packaging/views.py
+++ b/warehouse/packaging/views.py
@@ -91,19 +91,38 @@
)
]
- stats_svc = request.find_service(IDownloadStatService)
-
return {
"project": project,
"release": release,
"files": release.files.all(),
"all_releases": all_releases,
"maintainers": maintainers,
- "download_stats": {
- "daily": stats_svc.get_daily_stats(project.name),
- "weekly": stats_svc.get_weekly_stats(project.name),
- "monthly": stats_svc.get_monthly_stats(project.name),
- },
+ }
+
+
+@view_config(
+ route_name="esi.project-stats",
+ renderer="packaging/includes/project-stats.html",
+ decorator=[
+ origin_cache(
+ 15 * 60, # 15 Minutes
+ stale_while_revalidate=30 * 60, # 30 minutes
+ stale_if_error=30 * 60, # 30 minutes
+ ),
+ ],
+)
+def project_stats(project, request):
+ if project.name != request.matchdict.get("name", project.name):
+ return HTTPMovedPermanently(
+ request.current_route_path(name=project.name),
+ )
+
+ stats_svc = request.find_service(IDownloadStatService)
+
+ return {
+ "daily": stats_svc.get_daily_stats(project.name),
+ "weekly": stats_svc.get_weekly_stats(project.name),
+ "monthly": stats_svc.get_monthly_stats(project.name),
}
diff --git a/warehouse/routes.py b/warehouse/routes.py
--- a/warehouse/routes.py
+++ b/warehouse/routes.py
@@ -28,6 +28,13 @@
"/_esi/current-user-indicator/",
read_only=True,
)
+ config.add_route(
+ "esi.project-stats",
+ "/_esi/project-stats/{name}/",
+ factory="warehouse.packaging.models:ProjectFactory",
+ traverse="/{name}",
+ read_only=True,
+ )
# Accounts
config.add_route(
| {"golden_diff": "diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py\n--- a/warehouse/packaging/views.py\n+++ b/warehouse/packaging/views.py\n@@ -91,19 +91,38 @@\n )\n ]\n \n- stats_svc = request.find_service(IDownloadStatService)\n-\n return {\n \"project\": project,\n \"release\": release,\n \"files\": release.files.all(),\n \"all_releases\": all_releases,\n \"maintainers\": maintainers,\n- \"download_stats\": {\n- \"daily\": stats_svc.get_daily_stats(project.name),\n- \"weekly\": stats_svc.get_weekly_stats(project.name),\n- \"monthly\": stats_svc.get_monthly_stats(project.name),\n- },\n+ }\n+\n+\n+@view_config(\n+ route_name=\"esi.project-stats\",\n+ renderer=\"packaging/includes/project-stats.html\",\n+ decorator=[\n+ origin_cache(\n+ 15 * 60, # 15 Minutes\n+ stale_while_revalidate=30 * 60, # 30 minutes\n+ stale_if_error=30 * 60, # 30 minutes\n+ ),\n+ ],\n+)\n+def project_stats(project, request):\n+ if project.name != request.matchdict.get(\"name\", project.name):\n+ return HTTPMovedPermanently(\n+ request.current_route_path(name=project.name),\n+ )\n+\n+ stats_svc = request.find_service(IDownloadStatService)\n+\n+ return {\n+ \"daily\": stats_svc.get_daily_stats(project.name),\n+ \"weekly\": stats_svc.get_weekly_stats(project.name),\n+ \"monthly\": stats_svc.get_monthly_stats(project.name),\n }\n \n \ndiff --git a/warehouse/routes.py b/warehouse/routes.py\n--- a/warehouse/routes.py\n+++ b/warehouse/routes.py\n@@ -28,6 +28,13 @@\n \"/_esi/current-user-indicator/\",\n read_only=True,\n )\n+ config.add_route(\n+ \"esi.project-stats\",\n+ \"/_esi/project-stats/{name}/\",\n+ factory=\"warehouse.packaging.models:ProjectFactory\",\n+ traverse=\"/{name}\",\n+ read_only=True,\n+ )\n \n # Accounts\n config.add_route(\n", "issue": "Fail gracefully if Redis isn't Up\nRight now if the redis for the statistics go down, we'll start throwing errors when rendering the page, which is obviously less than desirable. Ideally we'll want to fail gracefully for this so that if redis goes down we'll just stop rendering statistics until it comes back. A tricky thing here is that we'll want to use ESI so that we don't cache for a long time the statistics with the failure output instead of statistics. This makes sense anyways since we'll want our stats to be updated quicker than the bulk of the page anyways. Additionally, we should ensure that we log the error regardless of whether we raise an exception or not.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n # Basic global routes\n config.add_route(\"index\", \"/\", read_only=True)\n config.add_route(\"robots.txt\", \"/robots.txt\", read_only=True)\n config.add_route(\"index.sitemap.xml\", \"/sitemap.xml\", read_only=True)\n config.add_route(\n \"bucket.sitemap.xml\",\n \"/{bucket}.sitemap.xml\",\n read_only=True,\n )\n\n # ESI Routes\n config.add_route(\n \"esi.current-user-indicator\",\n \"/_esi/current-user-indicator/\",\n read_only=True,\n )\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n read_only=True,\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n\n # Packaging\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n config.add_route(\"packaging.file\", \"/packages/{path:.*}\", read_only=True)\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\", read_only=True)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n\n # Legacy Action URLs\n config.add_pypi_action_route(\"legacy.api.pypi.file_upload\", \"file_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.submit\", \"submit\")\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n )\n config.add_pypi_action_route(\"legacy.api.pypi.doc_upload\", \"doc_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.doap\", \"doap\")\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n read_only=True,\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\")\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n )\n", "path": "warehouse/routes.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.response import FileIter, Response\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.interfaces import IDownloadStatService, IFileStorage\nfrom warehouse.packaging.models import Release, File, Role\n\n\n@view_config(\n route_name=\"packaging.project\",\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef project_detail(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n try:\n release = project.releases.order_by(\n Release._pypi_ordering.desc()\n ).limit(1).one()\n except NoResultFound:\n return HTTPNotFound()\n\n return release_detail(release, request)\n\n\n@view_config(\n route_name=\"packaging.release\",\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef release_detail(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n # Get all of the registered versions for this Project, in order of newest\n # to oldest.\n all_releases = (\n project.releases\n .with_entities(Release.version, Release.created)\n .order_by(Release._pypi_ordering.desc())\n .all()\n )\n\n # Get all of the maintainers for this project.\n maintainers = [\n r.user\n for r in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .order_by(User.username)\n .all()\n )\n ]\n\n stats_svc = request.find_service(IDownloadStatService)\n\n return {\n \"project\": project,\n \"release\": release,\n \"files\": release.files.all(),\n \"all_releases\": all_releases,\n \"maintainers\": maintainers,\n \"download_stats\": {\n \"daily\": stats_svc.get_daily_stats(project.name),\n \"weekly\": stats_svc.get_weekly_stats(project.name),\n \"monthly\": stats_svc.get_monthly_stats(project.name),\n },\n }\n\n\n@view_config(\n route_name=\"packaging.file\",\n decorator=[\n cache_control(365 * 24 * 60 * 60), # 1 year\n origin_cache(\n 365 * 24 * 60 * 60, # 1 year\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef packages(request):\n # The amount of logic that we can do in this view is very limited, this\n # view needs to be able to be handled by Fastly directly hitting S3 instead\n # of actually hitting this view. This more or less means that we're limited\n # to just serving the actual file.\n\n # Grab the path of the file that we're attempting to serve\n path = request.matchdict[\"path\"]\n\n # We need to look up the File that is associated with this path, either the\n # package path or the pgp path. If that doesn't exist then we'll bail out\n # early with a 404.\n try:\n file_ = (\n request.db.query(File)\n .options(joinedload(File.release)\n .joinedload(Release.project))\n .filter((File.path == path) | (File.pgp_path == path))\n .one()\n )\n except NoResultFound:\n return HTTPNotFound()\n\n # If this request is for a PGP signature, and the file doesn't have a PGP\n # signature, then we can go ahead and 404 now before hitting the file\n # storage.\n if path == file_.pgp_path and not file_.has_signature:\n return HTTPNotFound()\n\n # Try to get the file from the file file storage service, logging an error\n # and returning a HTTPNotFound if one can't be found.\n try:\n f = request.find_service(IFileStorage).get(path)\n except FileNotFoundError:\n request.log.error(\"missing file data\", path=path)\n return HTTPNotFound()\n\n # If the path we're accessing is the path for the package itself, as\n # opposed to the path for the signature, then we can include a\n # Content-Length header.\n content_length = None\n if path == file_.path:\n content_length = file_.size\n\n return Response(\n # If we have a wsgi.file_wrapper, we'll want to use that so that, if\n # possible, this will use an optimized method of sending. Otherwise\n # we'll just use Pyramid's FileIter as a fallback.\n app_iter=request.environ.get(\"wsgi.file_wrapper\", FileIter)(f),\n # We use application/octet-stream instead of something nicer because\n # different HTTP libraries will treat different combinations of\n # Content-Type and Content-Encoding differently. The only thing that\n # works sanely across all things without having something in the middle\n # decide it can decompress the result to \"help\" the end user is with\n # Content-Type: applicaton/octet-stream and no Content-Encoding.\n content_type=\"application/octet-stream\",\n content_encoding=None,\n # We need to specify an ETag for this response. Since ETags compared\n # between URLs have no meaning (and thus, is safe for two URLs to share\n # the same ETag) we will just use the MD5 hash of the package as our\n # ETag.\n etag=file_.md5_digest,\n # Similarly to the ETag header, we'll just use the date that the file\n # was uploaded as the Last-Modified header.\n last_modified=file_.upload_time,\n # If we have a Content-Length, we'll go ahead and use it here to\n # hopefully enable the server and clients alike to be smarter about how\n # they handle downloading this response.\n content_length=content_length,\n )\n", "path": "warehouse/packaging/views.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n # Basic global routes\n config.add_route(\"index\", \"/\", read_only=True)\n config.add_route(\"robots.txt\", \"/robots.txt\", read_only=True)\n config.add_route(\"index.sitemap.xml\", \"/sitemap.xml\", read_only=True)\n config.add_route(\n \"bucket.sitemap.xml\",\n \"/{bucket}.sitemap.xml\",\n read_only=True,\n )\n\n # ESI Routes\n config.add_route(\n \"esi.current-user-indicator\",\n \"/_esi/current-user-indicator/\",\n read_only=True,\n )\n config.add_route(\n \"esi.project-stats\",\n \"/_esi/project-stats/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n read_only=True,\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n\n # Packaging\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n config.add_route(\"packaging.file\", \"/packages/{path:.*}\", read_only=True)\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\", read_only=True)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n\n # Legacy Action URLs\n config.add_pypi_action_route(\"legacy.api.pypi.file_upload\", \"file_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.submit\", \"submit\")\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n )\n config.add_pypi_action_route(\"legacy.api.pypi.doc_upload\", \"doc_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.doap\", \"doap\")\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n read_only=True,\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\")\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n )\n", "path": "warehouse/routes.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.response import FileIter, Response\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.interfaces import IDownloadStatService, IFileStorage\nfrom warehouse.packaging.models import Release, File, Role\n\n\n@view_config(\n route_name=\"packaging.project\",\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef project_detail(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n try:\n release = project.releases.order_by(\n Release._pypi_ordering.desc()\n ).limit(1).one()\n except NoResultFound:\n return HTTPNotFound()\n\n return release_detail(release, request)\n\n\n@view_config(\n route_name=\"packaging.release\",\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef release_detail(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n # Get all of the registered versions for this Project, in order of newest\n # to oldest.\n all_releases = (\n project.releases\n .with_entities(Release.version, Release.created)\n .order_by(Release._pypi_ordering.desc())\n .all()\n )\n\n # Get all of the maintainers for this project.\n maintainers = [\n r.user\n for r in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .order_by(User.username)\n .all()\n )\n ]\n\n return {\n \"project\": project,\n \"release\": release,\n \"files\": release.files.all(),\n \"all_releases\": all_releases,\n \"maintainers\": maintainers,\n }\n\n\n@view_config(\n route_name=\"esi.project-stats\",\n renderer=\"packaging/includes/project-stats.html\",\n decorator=[\n origin_cache(\n 15 * 60, # 15 Minutes\n stale_while_revalidate=30 * 60, # 30 minutes\n stale_if_error=30 * 60, # 30 minutes\n ),\n ],\n)\ndef project_stats(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n stats_svc = request.find_service(IDownloadStatService)\n\n return {\n \"daily\": stats_svc.get_daily_stats(project.name),\n \"weekly\": stats_svc.get_weekly_stats(project.name),\n \"monthly\": stats_svc.get_monthly_stats(project.name),\n }\n\n\n@view_config(\n route_name=\"packaging.file\",\n decorator=[\n cache_control(365 * 24 * 60 * 60), # 1 year\n origin_cache(\n 365 * 24 * 60 * 60, # 1 year\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef packages(request):\n # The amount of logic that we can do in this view is very limited, this\n # view needs to be able to be handled by Fastly directly hitting S3 instead\n # of actually hitting this view. This more or less means that we're limited\n # to just serving the actual file.\n\n # Grab the path of the file that we're attempting to serve\n path = request.matchdict[\"path\"]\n\n # We need to look up the File that is associated with this path, either the\n # package path or the pgp path. If that doesn't exist then we'll bail out\n # early with a 404.\n try:\n file_ = (\n request.db.query(File)\n .options(joinedload(File.release)\n .joinedload(Release.project))\n .filter((File.path == path) | (File.pgp_path == path))\n .one()\n )\n except NoResultFound:\n return HTTPNotFound()\n\n # If this request is for a PGP signature, and the file doesn't have a PGP\n # signature, then we can go ahead and 404 now before hitting the file\n # storage.\n if path == file_.pgp_path and not file_.has_signature:\n return HTTPNotFound()\n\n # Try to get the file from the file file storage service, logging an error\n # and returning a HTTPNotFound if one can't be found.\n try:\n f = request.find_service(IFileStorage).get(path)\n except FileNotFoundError:\n request.log.error(\"missing file data\", path=path)\n return HTTPNotFound()\n\n # If the path we're accessing is the path for the package itself, as\n # opposed to the path for the signature, then we can include a\n # Content-Length header.\n content_length = None\n if path == file_.path:\n content_length = file_.size\n\n return Response(\n # If we have a wsgi.file_wrapper, we'll want to use that so that, if\n # possible, this will use an optimized method of sending. Otherwise\n # we'll just use Pyramid's FileIter as a fallback.\n app_iter=request.environ.get(\"wsgi.file_wrapper\", FileIter)(f),\n # We use application/octet-stream instead of something nicer because\n # different HTTP libraries will treat different combinations of\n # Content-Type and Content-Encoding differently. The only thing that\n # works sanely across all things without having something in the middle\n # decide it can decompress the result to \"help\" the end user is with\n # Content-Type: applicaton/octet-stream and no Content-Encoding.\n content_type=\"application/octet-stream\",\n content_encoding=None,\n # We need to specify an ETag for this response. Since ETags compared\n # between URLs have no meaning (and thus, is safe for two URLs to share\n # the same ETag) we will just use the MD5 hash of the package as our\n # ETag.\n etag=file_.md5_digest,\n # Similarly to the ETag header, we'll just use the date that the file\n # was uploaded as the Last-Modified header.\n last_modified=file_.upload_time,\n # If we have a Content-Length, we'll go ahead and use it here to\n # hopefully enable the server and clients alike to be smarter about how\n # they handle downloading this response.\n content_length=content_length,\n )\n", "path": "warehouse/packaging/views.py"}]} | 3,581 | 499 |
gh_patches_debug_28915 | rasdani/github-patches | git_diff | ckan__ckan-6797 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for pdb and debuggers
### Proposed fixes:
It is now possible to debug ckan with pdb/ipdb/PyCharm debugger and others, both outside Docker and inside Docker.
I just exposed a `werkzeug` option to the CKAN CLI, called `passthrough_errors`. Enabling that, together with `--disable-reloader` (which should be the default in my opinion, like it was in the past), allow to run pdb without making other changes to the source code.
`threads` should not be enabled and `processes` should be set to 1. These are the defaults already.
> passthrough_errors (bool) – set this to True to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.)
-- https://werkzeug.palletsprojects.com/en/2.0.x/serving/
Example:
```
$ cd contrib/docker
$ docker-compose up --build -d
$ # wait...
$ docker-compose exec ckan bash
root@f6a71d0b7686:/# python3 -m pdb /usr/lib/ckan/venv/bin/ckan -c /etc/ckan/production.ini run --host 0.0.0.0 -E --disable-reloader
> /usr/lib/ckan/venv/bin/ckan(3)<module>()
-> import re
(Pdb) b ckan/views/api.py:215
Breakpoint 1 at /usr/lib/ckan/venv/src/ckan/ckan/views/api.py:215
(Pdb) c
2021-11-01 17:00:50,832 INFO [ckan.cli] Using configuration file /etc/ckan/production.ini
2021-11-01 17:00:50,832 INFO [ckan.config.environment] Loading static files from public
2021-11-01 17:00:50,954 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates
2021-11-01 17:00:51,552 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates
2021-11-01 17:00:52,173 INFO [ckan.cli.server] Running CKAN on http://0.0.0.0:5000
2021-11-01 17:00:52,174 WARNI [werkzeug] * Running on all addresses.
WARNING: This is a development server. Do not use it in a production deployment.
```
...then `http://localhost:5000/api/3/action/package_search` can be opened in the web browser to trigger the breakpoint:
```
> /usr/lib/ckan/venv/src/ckan/ckan/views/api.py(215)action()
-> try:
(Pdb)
```
### Features:
- [ ] includes tests covering changes
- [x] includes updated documentation
- [X] includes user-visible changes
- [ ] includes API changes
- [ ] includes bugfix for possible backport
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/cli/server.py`
Content:
```
1 # encoding: utf-8
2
3 import logging
4
5 import click
6 from werkzeug.serving import run_simple
7
8 import ckan.plugins.toolkit as tk
9 from ckan.common import config
10
11 log = logging.getLogger(__name__)
12
13 DEFAULT_HOST = u"localhost"
14 DEFAULT_PORT = 5000
15
16
17 @click.command(u"run", short_help=u"Start development server")
18 @click.option(u"-H", u"--host", help=u"Host name")
19 @click.option(u"-p", u"--port", help=u"Port number")
20 @click.option(u"-r", u"--disable-reloader", is_flag=True,
21 help=u"Disable reloader")
22 @click.option(
23 u"-t", u"--threaded", is_flag=True,
24 help=u"Handle each request in a separate thread"
25 )
26 @click.option(
27 u"--processes", type=int, default=0,
28 help=u"Maximum number of concurrent processes"
29 )
30 @click.option(
31 u"-e", u"--extra-files", multiple=True,
32 help=u"Additional files that should be watched for server reloading"
33 " (you can provide multiple values)")
34 @click.option(
35 u"-C", u"--ssl-cert", default=None,
36 help=u"Certificate file to use to enable SSL. Passing 'adhoc' will "
37 " automatically generate a new one (on each server reload).")
38 @click.option(
39 u"-K", u"--ssl-key", default=None,
40 help=u"Key file to use to enable SSL. Passing 'adhoc' will "
41 " automatically generate a new one (on each server reload).")
42 @click.pass_context
43 def run(ctx, host, port, disable_reloader, threaded, extra_files, processes,
44 ssl_cert, ssl_key):
45 u"""Runs the Werkzeug development server"""
46
47 # Reloading
48 use_reloader = not disable_reloader
49 config_extra_files = tk.aslist(
50 config.get(u"ckan.devserver.watch_patterns")
51 )
52 extra_files = list(extra_files) + [
53 config[u"__file__"]
54 ] + config_extra_files
55
56 # Threads and processes
57 threaded = threaded or tk.asbool(config.get(u"ckan.devserver.threaded"))
58 processes = processes or tk.asint(
59 config.get(u"ckan.devserver.multiprocess", 1)
60 )
61 if threaded and processes > 1:
62 tk.error_shout(u"Cannot have a multithreaded and multi process server")
63 raise click.Abort()
64
65 # SSL
66 cert_file = ssl_cert or config.get(u"ckan.devserver.ssl_cert")
67 key_file = ssl_key or config.get(u"ckan.devserver.ssl_key")
68
69 if cert_file and key_file:
70 if cert_file == key_file == u"adhoc":
71 ssl_context = u"adhoc"
72 else:
73 ssl_context = (ssl_cert, ssl_key)
74 else:
75 ssl_context = None
76
77 host = host or config.get(u'ckan.devserver.host', DEFAULT_HOST)
78 port = port or config.get(u'ckan.devserver.port', DEFAULT_PORT)
79 try:
80 port = int(port)
81 except ValueError:
82 tk.error_shout(u"Server port must be an integer, not {}".format(port))
83 raise click.Abort()
84
85 log.info(u"Running CKAN on {scheme}://{host}:{port}".format(
86 scheme=u"https" if ssl_context else u"http", host=host, port=port))
87
88 run_simple(
89 host,
90 port,
91 ctx.obj.app,
92 use_reloader=use_reloader,
93 use_evalex=True,
94 threaded=threaded,
95 processes=processes,
96 extra_files=extra_files,
97 ssl_context=ssl_context,
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/cli/server.py b/ckan/cli/server.py
--- a/ckan/cli/server.py
+++ b/ckan/cli/server.py
@@ -19,6 +19,8 @@
@click.option(u"-p", u"--port", help=u"Port number")
@click.option(u"-r", u"--disable-reloader", is_flag=True,
help=u"Disable reloader")
[email protected](u"-E", u"--passthrough-errors", is_flag=True,
+ help=u"Disable error caching (useful to hook debuggers)")
@click.option(
u"-t", u"--threaded", is_flag=True,
help=u"Handle each request in a separate thread"
@@ -40,10 +42,16 @@
help=u"Key file to use to enable SSL. Passing 'adhoc' will "
" automatically generate a new one (on each server reload).")
@click.pass_context
-def run(ctx, host, port, disable_reloader, threaded, extra_files, processes,
- ssl_cert, ssl_key):
+def run(ctx, host, port, disable_reloader, passthrough_errors, threaded,
+ extra_files, processes, ssl_cert, ssl_key):
u"""Runs the Werkzeug development server"""
+ # passthrough_errors overrides conflicting options
+ if passthrough_errors:
+ disable_reloader = True
+ threaded = False
+ processes = 1
+
# Reloading
use_reloader = not disable_reloader
config_extra_files = tk.aslist(
@@ -95,4 +103,5 @@
processes=processes,
extra_files=extra_files,
ssl_context=ssl_context,
+ passthrough_errors=passthrough_errors,
)
| {"golden_diff": "diff --git a/ckan/cli/server.py b/ckan/cli/server.py\n--- a/ckan/cli/server.py\n+++ b/ckan/cli/server.py\n@@ -19,6 +19,8 @@\n @click.option(u\"-p\", u\"--port\", help=u\"Port number\")\n @click.option(u\"-r\", u\"--disable-reloader\", is_flag=True,\n help=u\"Disable reloader\")\[email protected](u\"-E\", u\"--passthrough-errors\", is_flag=True,\n+ help=u\"Disable error caching (useful to hook debuggers)\")\n @click.option(\n u\"-t\", u\"--threaded\", is_flag=True,\n help=u\"Handle each request in a separate thread\"\n@@ -40,10 +42,16 @@\n help=u\"Key file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\n @click.pass_context\n-def run(ctx, host, port, disable_reloader, threaded, extra_files, processes,\n- ssl_cert, ssl_key):\n+def run(ctx, host, port, disable_reloader, passthrough_errors, threaded,\n+ extra_files, processes, ssl_cert, ssl_key):\n u\"\"\"Runs the Werkzeug development server\"\"\"\n \n+ # passthrough_errors overrides conflicting options\n+ if passthrough_errors:\n+ disable_reloader = True\n+ threaded = False\n+ processes = 1\n+\n # Reloading\n use_reloader = not disable_reloader\n config_extra_files = tk.aslist(\n@@ -95,4 +103,5 @@\n processes=processes,\n extra_files=extra_files,\n ssl_context=ssl_context,\n+ passthrough_errors=passthrough_errors,\n )\n", "issue": "Support for pdb and debuggers\n### Proposed fixes:\r\n\r\nIt is now possible to debug ckan with pdb/ipdb/PyCharm debugger and others, both outside Docker and inside Docker.\r\nI just exposed a `werkzeug` option to the CKAN CLI, called `passthrough_errors`. Enabling that, together with `--disable-reloader` (which should be the default in my opinion, like it was in the past), allow to run pdb without making other changes to the source code.\r\n`threads` should not be enabled and `processes` should be set to 1. These are the defaults already.\r\n\r\n> passthrough_errors (bool) \u2013 set this to True to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.)\r\n-- https://werkzeug.palletsprojects.com/en/2.0.x/serving/\r\n\r\nExample:\r\n```\r\n$ cd contrib/docker\r\n$ docker-compose up --build -d\r\n$ # wait...\r\n$ docker-compose exec ckan bash\r\nroot@f6a71d0b7686:/# python3 -m pdb /usr/lib/ckan/venv/bin/ckan -c /etc/ckan/production.ini run --host 0.0.0.0 -E --disable-reloader\r\n> /usr/lib/ckan/venv/bin/ckan(3)<module>()\r\n-> import re\r\n(Pdb) b ckan/views/api.py:215\r\nBreakpoint 1 at /usr/lib/ckan/venv/src/ckan/ckan/views/api.py:215\r\n(Pdb) c\r\n2021-11-01 17:00:50,832 INFO [ckan.cli] Using configuration file /etc/ckan/production.ini\r\n2021-11-01 17:00:50,832 INFO [ckan.config.environment] Loading static files from public\r\n2021-11-01 17:00:50,954 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates\r\n2021-11-01 17:00:51,552 INFO [ckan.config.environment] Loading templates from /usr/lib/ckan/venv/src/ckan/ckan/templates\r\n2021-11-01 17:00:52,173 INFO [ckan.cli.server] Running CKAN on http://0.0.0.0:5000\r\n2021-11-01 17:00:52,174 WARNI [werkzeug] * Running on all addresses.\r\n WARNING: This is a development server. Do not use it in a production deployment.\r\n```\r\n\r\n...then `http://localhost:5000/api/3/action/package_search` can be opened in the web browser to trigger the breakpoint:\r\n\r\n```\r\n> /usr/lib/ckan/venv/src/ckan/ckan/views/api.py(215)action()\r\n-> try:\r\n(Pdb)\r\n```\r\n\r\n### Features:\r\n\r\n- [ ] includes tests covering changes\r\n- [x] includes updated documentation\r\n- [X] includes user-visible changes\r\n- [ ] includes API changes\r\n- [ ] includes bugfix for possible backport\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\n\nimport click\nfrom werkzeug.serving import run_simple\n\nimport ckan.plugins.toolkit as tk\nfrom ckan.common import config\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_HOST = u\"localhost\"\nDEFAULT_PORT = 5000\n\n\[email protected](u\"run\", short_help=u\"Start development server\")\[email protected](u\"-H\", u\"--host\", help=u\"Host name\")\[email protected](u\"-p\", u\"--port\", help=u\"Port number\")\[email protected](u\"-r\", u\"--disable-reloader\", is_flag=True,\n help=u\"Disable reloader\")\[email protected](\n u\"-t\", u\"--threaded\", is_flag=True,\n help=u\"Handle each request in a separate thread\"\n)\[email protected](\n u\"--processes\", type=int, default=0,\n help=u\"Maximum number of concurrent processes\"\n)\[email protected](\n u\"-e\", u\"--extra-files\", multiple=True,\n help=u\"Additional files that should be watched for server reloading\"\n \" (you can provide multiple values)\")\[email protected](\n u\"-C\", u\"--ssl-cert\", default=None,\n help=u\"Certificate file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\[email protected](\n u\"-K\", u\"--ssl-key\", default=None,\n help=u\"Key file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\[email protected]_context\ndef run(ctx, host, port, disable_reloader, threaded, extra_files, processes,\n ssl_cert, ssl_key):\n u\"\"\"Runs the Werkzeug development server\"\"\"\n\n # Reloading\n use_reloader = not disable_reloader\n config_extra_files = tk.aslist(\n config.get(u\"ckan.devserver.watch_patterns\")\n )\n extra_files = list(extra_files) + [\n config[u\"__file__\"]\n ] + config_extra_files\n\n # Threads and processes\n threaded = threaded or tk.asbool(config.get(u\"ckan.devserver.threaded\"))\n processes = processes or tk.asint(\n config.get(u\"ckan.devserver.multiprocess\", 1)\n )\n if threaded and processes > 1:\n tk.error_shout(u\"Cannot have a multithreaded and multi process server\")\n raise click.Abort()\n\n # SSL\n cert_file = ssl_cert or config.get(u\"ckan.devserver.ssl_cert\")\n key_file = ssl_key or config.get(u\"ckan.devserver.ssl_key\")\n\n if cert_file and key_file:\n if cert_file == key_file == u\"adhoc\":\n ssl_context = u\"adhoc\"\n else:\n ssl_context = (ssl_cert, ssl_key)\n else:\n ssl_context = None\n\n host = host or config.get(u'ckan.devserver.host', DEFAULT_HOST)\n port = port or config.get(u'ckan.devserver.port', DEFAULT_PORT)\n try:\n port = int(port)\n except ValueError:\n tk.error_shout(u\"Server port must be an integer, not {}\".format(port))\n raise click.Abort()\n\n log.info(u\"Running CKAN on {scheme}://{host}:{port}\".format(\n scheme=u\"https\" if ssl_context else u\"http\", host=host, port=port))\n\n run_simple(\n host,\n port,\n ctx.obj.app,\n use_reloader=use_reloader,\n use_evalex=True,\n threaded=threaded,\n processes=processes,\n extra_files=extra_files,\n ssl_context=ssl_context,\n )\n", "path": "ckan/cli/server.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport logging\n\nimport click\nfrom werkzeug.serving import run_simple\n\nimport ckan.plugins.toolkit as tk\nfrom ckan.common import config\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_HOST = u\"localhost\"\nDEFAULT_PORT = 5000\n\n\[email protected](u\"run\", short_help=u\"Start development server\")\[email protected](u\"-H\", u\"--host\", help=u\"Host name\")\[email protected](u\"-p\", u\"--port\", help=u\"Port number\")\[email protected](u\"-r\", u\"--disable-reloader\", is_flag=True,\n help=u\"Disable reloader\")\[email protected](u\"-E\", u\"--passthrough-errors\", is_flag=True,\n help=u\"Disable error caching (useful to hook debuggers)\")\[email protected](\n u\"-t\", u\"--threaded\", is_flag=True,\n help=u\"Handle each request in a separate thread\"\n)\[email protected](\n u\"--processes\", type=int, default=0,\n help=u\"Maximum number of concurrent processes\"\n)\[email protected](\n u\"-e\", u\"--extra-files\", multiple=True,\n help=u\"Additional files that should be watched for server reloading\"\n \" (you can provide multiple values)\")\[email protected](\n u\"-C\", u\"--ssl-cert\", default=None,\n help=u\"Certificate file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\[email protected](\n u\"-K\", u\"--ssl-key\", default=None,\n help=u\"Key file to use to enable SSL. Passing 'adhoc' will \"\n \" automatically generate a new one (on each server reload).\")\[email protected]_context\ndef run(ctx, host, port, disable_reloader, passthrough_errors, threaded,\n extra_files, processes, ssl_cert, ssl_key):\n u\"\"\"Runs the Werkzeug development server\"\"\"\n\n # passthrough_errors overrides conflicting options\n if passthrough_errors:\n disable_reloader = True\n threaded = False\n processes = 1\n\n # Reloading\n use_reloader = not disable_reloader\n config_extra_files = tk.aslist(\n config.get(u\"ckan.devserver.watch_patterns\")\n )\n extra_files = list(extra_files) + [\n config[u\"__file__\"]\n ] + config_extra_files\n\n # Threads and processes\n threaded = threaded or tk.asbool(config.get(u\"ckan.devserver.threaded\"))\n processes = processes or tk.asint(\n config.get(u\"ckan.devserver.multiprocess\", 1)\n )\n if threaded and processes > 1:\n tk.error_shout(u\"Cannot have a multithreaded and multi process server\")\n raise click.Abort()\n\n # SSL\n cert_file = ssl_cert or config.get(u\"ckan.devserver.ssl_cert\")\n key_file = ssl_key or config.get(u\"ckan.devserver.ssl_key\")\n\n if cert_file and key_file:\n if cert_file == key_file == u\"adhoc\":\n ssl_context = u\"adhoc\"\n else:\n ssl_context = (ssl_cert, ssl_key)\n else:\n ssl_context = None\n\n host = host or config.get(u'ckan.devserver.host', DEFAULT_HOST)\n port = port or config.get(u'ckan.devserver.port', DEFAULT_PORT)\n try:\n port = int(port)\n except ValueError:\n tk.error_shout(u\"Server port must be an integer, not {}\".format(port))\n raise click.Abort()\n\n log.info(u\"Running CKAN on {scheme}://{host}:{port}\".format(\n scheme=u\"https\" if ssl_context else u\"http\", host=host, port=port))\n\n run_simple(\n host,\n port,\n ctx.obj.app,\n use_reloader=use_reloader,\n use_evalex=True,\n threaded=threaded,\n processes=processes,\n extra_files=extra_files,\n ssl_context=ssl_context,\n passthrough_errors=passthrough_errors,\n )\n", "path": "ckan/cli/server.py"}]} | 2,004 | 385 |
gh_patches_debug_6418 | rasdani/github-patches | git_diff | getsentry__sentry-python-3081 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`sentry-sdk[grpcio]` requires `protobuf`
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
2.2.0
### Steps to Reproduce
See: https://stackoverflow.com/questions/78488561
Followed Sentry documentation [gRPC](https://docs.sentry.io/platforms/python/integrations/grpc/)
```bash
python3 -m venv venv
source venv/bin/activate
python3 -m pip install --upgrade 'sentry-sdk[grpcio]'
python3 -m pip freeze
```
```
certifi==2024.2.2
grpcio==1.63.0
sentry-sdk==2.2.0
urllib3==2.2.1
```
`main.py`:
```python3
import grpc
import sentry_sdk
from sentry_sdk.integrations.grpc import GRPCIntegration
sentry_sdk.init(
dsn="https://...",
enable_tracing=True,
integrations=[
GRPCIntegration(),
],
)
with grpc.insecure_channel("example.com:12345") as channel:
print(channel)
```
### Expected Result
Happy path.
### Actual Result
```bash
python3 main.py
Traceback (most recent call last):
File "[redacted]/venv/lib/python3.10/site-packages/sentry_sdk/integrations/grpc/client.py", line 15, in <module>
from google.protobuf.message import Message
ModuleNotFoundError: No module named 'google'
```
`integrations/grpc/client.py`:
```python3
try:
import grpc
from grpc import ClientCallDetails, Call
from grpc._interceptor import _UnaryOutcome
from grpc.aio._interceptor import UnaryStreamCall
from google.protobuf.message import Message <-------------- curious
except ImportError:
raise DidNotEnable("grpcio is not installed")
```
#### Solution
```bash
python3 -m pip install protobuf
python3 -m pip freeze
```
```
certifi==2024.2.2
grpcio==1.63.0
protobuf==5.26.1
sentry-sdk==2.2.0
urllib3==2.2.1
```
Then works.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Sentry-Python - Sentry SDK for Python
5 =====================================
6
7 **Sentry-Python is an SDK for Sentry.** Check out `GitHub
8 <https://github.com/getsentry/sentry-python>`_ to find out more.
9 """
10
11 import os
12 from setuptools import setup, find_packages
13
14 here = os.path.abspath(os.path.dirname(__file__))
15
16
17 def get_file_text(file_name):
18 with open(os.path.join(here, file_name)) as in_file:
19 return in_file.read()
20
21
22 setup(
23 name="sentry-sdk",
24 version="2.2.0",
25 author="Sentry Team and Contributors",
26 author_email="[email protected]",
27 url="https://github.com/getsentry/sentry-python",
28 project_urls={
29 "Documentation": "https://docs.sentry.io/platforms/python/",
30 "Changelog": "https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md",
31 },
32 description="Python client for Sentry (https://sentry.io)",
33 long_description=get_file_text("README.md"),
34 long_description_content_type="text/markdown",
35 packages=find_packages(exclude=("tests", "tests.*")),
36 # PEP 561
37 package_data={"sentry_sdk": ["py.typed"]},
38 zip_safe=False,
39 license="MIT",
40 python_requires=">=3.6",
41 install_requires=[
42 "urllib3>=1.26.11",
43 "certifi",
44 ],
45 extras_require={
46 "aiohttp": ["aiohttp>=3.5"],
47 "anthropic": ["anthropic>=0.16"],
48 "arq": ["arq>=0.23"],
49 "asyncpg": ["asyncpg>=0.23"],
50 "beam": ["apache-beam>=2.12"],
51 "bottle": ["bottle>=0.12.13"],
52 "celery": ["celery>=3"],
53 "celery-redbeat": ["celery-redbeat>=2"],
54 "chalice": ["chalice>=1.16.0"],
55 "clickhouse-driver": ["clickhouse-driver>=0.2.0"],
56 "django": ["django>=1.8"],
57 "falcon": ["falcon>=1.4"],
58 "fastapi": ["fastapi>=0.79.0"],
59 "flask": ["flask>=0.11", "blinker>=1.1", "markupsafe"],
60 "grpcio": ["grpcio>=1.21.1"],
61 "httpx": ["httpx>=0.16.0"],
62 "huey": ["huey>=2"],
63 "huggingface_hub": ["huggingface_hub>=0.22"],
64 "langchain": ["langchain>=0.0.210"],
65 "loguru": ["loguru>=0.5"],
66 "openai": ["openai>=1.0.0", "tiktoken>=0.3.0"],
67 "opentelemetry": ["opentelemetry-distro>=0.35b0"],
68 "opentelemetry-experimental": [
69 "opentelemetry-distro~=0.40b0",
70 "opentelemetry-instrumentation-aiohttp-client~=0.40b0",
71 "opentelemetry-instrumentation-django~=0.40b0",
72 "opentelemetry-instrumentation-fastapi~=0.40b0",
73 "opentelemetry-instrumentation-flask~=0.40b0",
74 "opentelemetry-instrumentation-requests~=0.40b0",
75 "opentelemetry-instrumentation-sqlite3~=0.40b0",
76 "opentelemetry-instrumentation-urllib~=0.40b0",
77 ],
78 "pure_eval": ["pure_eval", "executing", "asttokens"],
79 "pymongo": ["pymongo>=3.1"],
80 "pyspark": ["pyspark>=2.4.4"],
81 "quart": ["quart>=0.16.1", "blinker>=1.1"],
82 "rq": ["rq>=0.6"],
83 "sanic": ["sanic>=0.8"],
84 "sqlalchemy": ["sqlalchemy>=1.2"],
85 "starlette": ["starlette>=0.19.1"],
86 "starlite": ["starlite>=1.48"],
87 "tornado": ["tornado>=5"],
88 },
89 classifiers=[
90 "Development Status :: 5 - Production/Stable",
91 "Environment :: Web Environment",
92 "Intended Audience :: Developers",
93 "License :: OSI Approved :: BSD License",
94 "Operating System :: OS Independent",
95 "Programming Language :: Python",
96 "Programming Language :: Python :: 3",
97 "Programming Language :: Python :: 3.6",
98 "Programming Language :: Python :: 3.7",
99 "Programming Language :: Python :: 3.8",
100 "Programming Language :: Python :: 3.9",
101 "Programming Language :: Python :: 3.10",
102 "Programming Language :: Python :: 3.11",
103 "Programming Language :: Python :: 3.12",
104 "Topic :: Software Development :: Libraries :: Python Modules",
105 ],
106 options={"bdist_wheel": {"universal": "1"}},
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
"falcon": ["falcon>=1.4"],
"fastapi": ["fastapi>=0.79.0"],
"flask": ["flask>=0.11", "blinker>=1.1", "markupsafe"],
- "grpcio": ["grpcio>=1.21.1"],
+ "grpcio": ["grpcio>=1.21.1", "protobuf>=3.8.0"],
"httpx": ["httpx>=0.16.0"],
"huey": ["huey>=2"],
"huggingface_hub": ["huggingface_hub>=0.22"],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n \"falcon\": [\"falcon>=1.4\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\", \"markupsafe\"],\n- \"grpcio\": [\"grpcio>=1.21.1\"],\n+ \"grpcio\": [\"grpcio>=1.21.1\", \"protobuf>=3.8.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"huey\": [\"huey>=2\"],\n \"huggingface_hub\": [\"huggingface_hub>=0.22\"],\n", "issue": "`sentry-sdk[grpcio]` requires `protobuf`\n### How do you use Sentry?\r\n\r\nSentry Saas (sentry.io)\r\n\r\n### Version\r\n\r\n2.2.0\r\n\r\n### Steps to Reproduce\r\n\r\nSee: https://stackoverflow.com/questions/78488561\r\n\r\nFollowed Sentry documentation [gRPC](https://docs.sentry.io/platforms/python/integrations/grpc/)\r\n\r\n```bash\r\npython3 -m venv venv\r\nsource venv/bin/activate\r\npython3 -m pip install --upgrade 'sentry-sdk[grpcio]'\r\n\r\npython3 -m pip freeze\r\n```\r\n```\r\ncertifi==2024.2.2\r\ngrpcio==1.63.0\r\nsentry-sdk==2.2.0\r\nurllib3==2.2.1\r\n```\r\n`main.py`:\r\n```python3\r\nimport grpc\r\n\r\nimport sentry_sdk\r\nfrom sentry_sdk.integrations.grpc import GRPCIntegration\r\n\r\nsentry_sdk.init(\r\n dsn=\"https://...\",\r\n enable_tracing=True,\r\n integrations=[\r\n GRPCIntegration(),\r\n ],\r\n)\r\n\r\nwith grpc.insecure_channel(\"example.com:12345\") as channel:\r\n print(channel)\r\n```\r\n\r\n\r\n### Expected Result\r\n\r\nHappy path.\r\n\r\n### Actual Result\r\n\r\n```bash\r\npython3 main.py\r\nTraceback (most recent call last):\r\n File \"[redacted]/venv/lib/python3.10/site-packages/sentry_sdk/integrations/grpc/client.py\", line 15, in <module>\r\n from google.protobuf.message import Message\r\nModuleNotFoundError: No module named 'google'\r\n```\r\n`integrations/grpc/client.py`:\r\n```python3\r\ntry:\r\n import grpc\r\n from grpc import ClientCallDetails, Call\r\n from grpc._interceptor import _UnaryOutcome\r\n from grpc.aio._interceptor import UnaryStreamCall\r\n from google.protobuf.message import Message <-------------- curious\r\nexcept ImportError:\r\n raise DidNotEnable(\"grpcio is not installed\")\r\n```\r\n\r\n#### Solution\r\n\r\n```bash\r\npython3 -m pip install protobuf\r\npython3 -m pip freeze\r\n```\r\n```\r\ncertifi==2024.2.2\r\ngrpcio==1.63.0\r\nprotobuf==5.26.1\r\nsentry-sdk==2.2.0\r\nurllib3==2.2.1\r\n```\r\nThen works.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"2.2.0\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"MIT\",\n python_requires=\">=3.6\",\n install_requires=[\n \"urllib3>=1.26.11\",\n \"certifi\",\n ],\n extras_require={\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"anthropic\": [\"anthropic>=0.16\"],\n \"arq\": [\"arq>=0.23\"],\n \"asyncpg\": [\"asyncpg>=0.23\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"celery\": [\"celery>=3\"],\n \"celery-redbeat\": [\"celery-redbeat>=2\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"clickhouse-driver\": [\"clickhouse-driver>=0.2.0\"],\n \"django\": [\"django>=1.8\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\", \"markupsafe\"],\n \"grpcio\": [\"grpcio>=1.21.1\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"huey\": [\"huey>=2\"],\n \"huggingface_hub\": [\"huggingface_hub>=0.22\"],\n \"langchain\": [\"langchain>=0.0.210\"],\n \"loguru\": [\"loguru>=0.5\"],\n \"openai\": [\"openai>=1.0.0\", \"tiktoken>=0.3.0\"],\n \"opentelemetry\": [\"opentelemetry-distro>=0.35b0\"],\n \"opentelemetry-experimental\": [\n \"opentelemetry-distro~=0.40b0\",\n \"opentelemetry-instrumentation-aiohttp-client~=0.40b0\",\n \"opentelemetry-instrumentation-django~=0.40b0\",\n \"opentelemetry-instrumentation-fastapi~=0.40b0\",\n \"opentelemetry-instrumentation-flask~=0.40b0\",\n \"opentelemetry-instrumentation-requests~=0.40b0\",\n \"opentelemetry-instrumentation-sqlite3~=0.40b0\",\n \"opentelemetry-instrumentation-urllib~=0.40b0\",\n ],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"pymongo\": [\"pymongo>=3.1\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"rq\": [\"rq>=0.6\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"starlite\": [\"starlite>=1.48\"],\n \"tornado\": [\"tornado>=5\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"2.2.0\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"MIT\",\n python_requires=\">=3.6\",\n install_requires=[\n \"urllib3>=1.26.11\",\n \"certifi\",\n ],\n extras_require={\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"anthropic\": [\"anthropic>=0.16\"],\n \"arq\": [\"arq>=0.23\"],\n \"asyncpg\": [\"asyncpg>=0.23\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"celery\": [\"celery>=3\"],\n \"celery-redbeat\": [\"celery-redbeat>=2\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"clickhouse-driver\": [\"clickhouse-driver>=0.2.0\"],\n \"django\": [\"django>=1.8\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\", \"markupsafe\"],\n \"grpcio\": [\"grpcio>=1.21.1\", \"protobuf>=3.8.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"huey\": [\"huey>=2\"],\n \"huggingface_hub\": [\"huggingface_hub>=0.22\"],\n \"langchain\": [\"langchain>=0.0.210\"],\n \"loguru\": [\"loguru>=0.5\"],\n \"openai\": [\"openai>=1.0.0\", \"tiktoken>=0.3.0\"],\n \"opentelemetry\": [\"opentelemetry-distro>=0.35b0\"],\n \"opentelemetry-experimental\": [\n \"opentelemetry-distro~=0.40b0\",\n \"opentelemetry-instrumentation-aiohttp-client~=0.40b0\",\n \"opentelemetry-instrumentation-django~=0.40b0\",\n \"opentelemetry-instrumentation-fastapi~=0.40b0\",\n \"opentelemetry-instrumentation-flask~=0.40b0\",\n \"opentelemetry-instrumentation-requests~=0.40b0\",\n \"opentelemetry-instrumentation-sqlite3~=0.40b0\",\n \"opentelemetry-instrumentation-urllib~=0.40b0\",\n ],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"pymongo\": [\"pymongo>=3.1\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"rq\": [\"rq>=0.6\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"starlite\": [\"starlite>=1.48\"],\n \"tornado\": [\"tornado>=5\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}]} | 2,143 | 176 |
gh_patches_debug_21049 | rasdani/github-patches | git_diff | aws__aws-cli-461 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No way to specify all for --protocol option
The `aws ec2 authorize-security-group-ingress` and `aws ec2 authorize-security-group-egress` commands both accept a `--protocol` option but there is no way to supply a value that represents ALL protocols.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/ec2secgroupsimplify.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization adds the following scalar parameters to the
15 authorize operations:
16
17 * --protocol: tcp | udp | icmp or any protocol number
18 * --port: A single integer or a range (min-max). You can specify ``all``
19 to mean all ports (for example, port range 0-65535)
20 * --source-group-name
21 * --source-group-id
22 * --cidr - The CIDR range. Cannot be used when specifying a source or
23 destination security group.
24 """
25
26 from awscli.arguments import CustomArgument
27
28
29 def _add_params(argument_table, operation, **kwargs):
30 arg = ProtocolArgument('protocol',
31 help_text=PROTOCOL_DOCS)
32 argument_table['protocol'] = arg
33 arg = PortArgument('port', help_text=PORT_DOCS)
34 argument_table['port'] = arg
35 arg = CidrArgument('cidr', help_text=CIDR_DOCS)
36 argument_table['cidr'] = arg
37 arg = SourceGroupArgument('source-group',
38 help_text=SOURCEGROUP_DOCS)
39 argument_table['source-group'] = arg
40 arg = GroupOwnerArgument('group-owner',
41 help_text=GROUPOWNER_DOCS)
42 argument_table['group-owner'] = arg
43
44
45 def _check_args(parsed_args, **kwargs):
46 # This function checks the parsed args. If the user specified
47 # the --ip-permissions option with any of the scalar options we
48 # raise an error.
49 arg_dict = vars(parsed_args)
50 if arg_dict['ip_permissions']:
51 for key in ('protocol', 'port', 'cidr',
52 'source_group', 'group_owner'):
53 if arg_dict[key]:
54 msg = ('The --%s option is not compatible '
55 'with the --ip-permissions option ') % key
56 raise ValueError(msg)
57
58 def _add_docs(help_command, **kwargs):
59 doc = help_command.doc
60 doc.style.new_paragraph()
61 doc.style.start_note()
62 msg = ('To specify multiple rules in a single command '
63 'use the <code>--ip-permissions</code> option')
64 doc.include_doc_string(msg)
65 doc.style.end_note()
66
67
68 EVENTS = [
69 ('building-argument-table.ec2.authorize-security-group-ingress', _add_params),
70 ('building-argument-table.ec2.authorize-security-group-egress', _add_params),
71 ('building-argument-table.ec2.revoke-security-group-ingress', _add_params),
72 ('building-argument-table.ec2.revoke-security-group-egress', _add_params),
73 ('operation-args-parsed.ec2.authorize-security-group-ingress', _check_args),
74 ('operation-args-parsed.ec2.authorize-security-group-egress', _check_args),
75 ('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args),
76 ('operation-args-parsed.ec2.revoke-security-group-egress', _check_args),
77 ('doc-description.ec2.authorize-security-group-ingress', _add_docs),
78 ('doc-description.ec2.authorize-security-group-egress', _add_docs),
79 ('doc-description.ec2.revoke-security-group-ingress', _add_docs),
80 ('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs),
81 ]
82 PROTOCOL_DOCS = ('<p>The IP protocol of this permission.</p>'
83 '<p>Valid protocol values: <code>tcp</code>, '
84 '<code>udp</code>, <code>icmp</code></p>')
85 PORT_DOCS = ('<p>For TCP or UDP: The range of ports to allow.'
86 ' A single integer or a range (min-max). You can '
87 'specify <code>all</code> to mean all ports</p>')
88 CIDR_DOCS = '<p>The CIDR IP range.</p>'
89 SOURCEGROUP_DOCS = ('<p>The name of the source security group. '
90 'Cannot be used when specifying a CIDR IP address.')
91 GROUPOWNER_DOCS = ('<p>The AWS account ID that owns the source security '
92 'group. Cannot be used when specifying a CIDR IP '
93 'address.</p>')
94
95 def register_secgroup(event_handler):
96 for event, handler in EVENTS:
97 event_handler.register(event, handler)
98
99
100 def _build_ip_permissions(params, key, value):
101 if 'ip_permissions' not in params:
102 params['ip_permissions'] = [{}]
103 if key == 'CidrIp':
104 if 'IpRanges' not in params['ip_permissions'][0]:
105 params['ip_permissions'][0]['IpRanges'] = []
106 params['ip_permissions'][0]['IpRanges'].append(value)
107 elif key in ('GroupId', 'GroupName', 'UserId'):
108 if 'UserIdGroupPairs' not in params['ip_permissions'][0]:
109 params['ip_permissions'][0]['UserIdGroupPairs'] = [{}]
110 params['ip_permissions'][0]['UserIdGroupPairs'][0][key] = value
111 else:
112 params['ip_permissions'][0][key] = value
113
114
115 class ProtocolArgument(CustomArgument):
116
117 def add_to_params(self, parameters, value):
118 if value:
119 try:
120 int_value = int(value)
121 if int_value < 0 or int_value > 255:
122 msg = ('protocol numbers must be in the range 0-255')
123 raise ValueError(msg)
124 except ValueError:
125 if value not in ('tcp', 'udp', 'icmp'):
126 msg = ('protocol parameter should be one of: '
127 'tcp|udp|icmp or any valid protocol number.')
128 raise ValueError(msg)
129 _build_ip_permissions(parameters, 'IpProtocol', value)
130
131
132 class PortArgument(CustomArgument):
133
134 def add_to_params(self, parameters, value):
135 if value:
136 try:
137 if value == '-1' or value == 'all':
138 fromstr = '-1'
139 tostr = '-1'
140 elif '-' in value:
141 fromstr, tostr = value.split('-')
142 else:
143 fromstr, tostr = (value, value)
144 _build_ip_permissions(parameters, 'FromPort', int(fromstr))
145 _build_ip_permissions(parameters, 'ToPort', int(tostr))
146 except ValueError:
147 msg = ('port parameter should be of the '
148 'form <from[-to]> (e.g. 22 or 22-25)')
149 raise ValueError(msg)
150
151
152 class CidrArgument(CustomArgument):
153
154 def add_to_params(self, parameters, value):
155 if value:
156 value = [{'CidrIp': value}]
157 _build_ip_permissions(parameters, 'IpRanges', value)
158
159
160 class SourceGroupArgument(CustomArgument):
161
162 def add_to_params(self, parameters, value):
163 if value:
164 if value.startswith('sg-'):
165 _build_ip_permissions(parameters, 'GroupId', value)
166 else:
167 _build_ip_permissions(parameters, 'GroupName', value)
168
169
170 class GroupOwnerArgument(CustomArgument):
171
172 def add_to_params(self, parameters, value):
173 if value:
174 _build_ip_permissions(parameters, 'UserId', value)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/ec2secgroupsimplify.py b/awscli/customizations/ec2secgroupsimplify.py
--- a/awscli/customizations/ec2secgroupsimplify.py
+++ b/awscli/customizations/ec2secgroupsimplify.py
@@ -118,14 +118,17 @@
if value:
try:
int_value = int(value)
- if int_value < 0 or int_value > 255:
- msg = ('protocol numbers must be in the range 0-255')
+ if (int_value < 0 or int_value > 255) and int_value != -1:
+ msg = ('protocol numbers must be in the range 0-255 '
+ 'or -1 to specify all protocols')
raise ValueError(msg)
except ValueError:
- if value not in ('tcp', 'udp', 'icmp'):
+ if value not in ('tcp', 'udp', 'icmp', 'all'):
msg = ('protocol parameter should be one of: '
- 'tcp|udp|icmp or any valid protocol number.')
+ 'tcp|udp|icmp|all or any valid protocol number.')
raise ValueError(msg)
+ if value == 'all':
+ value = '-1'
_build_ip_permissions(parameters, 'IpProtocol', value)
| {"golden_diff": "diff --git a/awscli/customizations/ec2secgroupsimplify.py b/awscli/customizations/ec2secgroupsimplify.py\n--- a/awscli/customizations/ec2secgroupsimplify.py\n+++ b/awscli/customizations/ec2secgroupsimplify.py\n@@ -118,14 +118,17 @@\n if value:\n try:\n int_value = int(value)\n- if int_value < 0 or int_value > 255:\n- msg = ('protocol numbers must be in the range 0-255')\n+ if (int_value < 0 or int_value > 255) and int_value != -1:\n+ msg = ('protocol numbers must be in the range 0-255 '\n+ 'or -1 to specify all protocols')\n raise ValueError(msg)\n except ValueError:\n- if value not in ('tcp', 'udp', 'icmp'):\n+ if value not in ('tcp', 'udp', 'icmp', 'all'):\n msg = ('protocol parameter should be one of: '\n- 'tcp|udp|icmp or any valid protocol number.')\n+ 'tcp|udp|icmp|all or any valid protocol number.')\n raise ValueError(msg)\n+ if value == 'all':\n+ value = '-1'\n _build_ip_permissions(parameters, 'IpProtocol', value)\n", "issue": "No way to specify all for --protocol option\nThe `aws ec2 authorize-security-group-ingress` and `aws ec2 authorize-security-group-egress` commands both accept a `--protocol` option but there is no way to supply a value that represents ALL protocols.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds the following scalar parameters to the\nauthorize operations:\n\n* --protocol: tcp | udp | icmp or any protocol number\n* --port: A single integer or a range (min-max). You can specify ``all``\n to mean all ports (for example, port range 0-65535)\n* --source-group-name\n* --source-group-id\n* --cidr - The CIDR range. Cannot be used when specifying a source or\n destination security group.\n\"\"\"\n\nfrom awscli.arguments import CustomArgument\n\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = ProtocolArgument('protocol',\n help_text=PROTOCOL_DOCS)\n argument_table['protocol'] = arg\n arg = PortArgument('port', help_text=PORT_DOCS)\n argument_table['port'] = arg\n arg = CidrArgument('cidr', help_text=CIDR_DOCS)\n argument_table['cidr'] = arg\n arg = SourceGroupArgument('source-group',\n help_text=SOURCEGROUP_DOCS)\n argument_table['source-group'] = arg\n arg = GroupOwnerArgument('group-owner',\n help_text=GROUPOWNER_DOCS)\n argument_table['group-owner'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --ip-permissions option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['ip_permissions']:\n for key in ('protocol', 'port', 'cidr',\n 'source_group', 'group_owner'):\n if arg_dict[key]:\n msg = ('The --%s option is not compatible '\n 'with the --ip-permissions option ') % key\n raise ValueError(msg)\n\ndef _add_docs(help_command, **kwargs):\n doc = help_command.doc\n doc.style.new_paragraph()\n doc.style.start_note()\n msg = ('To specify multiple rules in a single command '\n 'use the <code>--ip-permissions</code> option')\n doc.include_doc_string(msg)\n doc.style.end_note()\n\n\nEVENTS = [\n ('building-argument-table.ec2.authorize-security-group-ingress', _add_params),\n ('building-argument-table.ec2.authorize-security-group-egress', _add_params),\n ('building-argument-table.ec2.revoke-security-group-ingress', _add_params),\n ('building-argument-table.ec2.revoke-security-group-egress', _add_params),\n ('operation-args-parsed.ec2.authorize-security-group-ingress', _check_args),\n ('operation-args-parsed.ec2.authorize-security-group-egress', _check_args),\n ('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args),\n ('operation-args-parsed.ec2.revoke-security-group-egress', _check_args),\n ('doc-description.ec2.authorize-security-group-ingress', _add_docs),\n ('doc-description.ec2.authorize-security-group-egress', _add_docs),\n ('doc-description.ec2.revoke-security-group-ingress', _add_docs),\n ('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs),\n ]\nPROTOCOL_DOCS = ('<p>The IP protocol of this permission.</p>'\n '<p>Valid protocol values: <code>tcp</code>, '\n '<code>udp</code>, <code>icmp</code></p>')\nPORT_DOCS = ('<p>For TCP or UDP: The range of ports to allow.'\n ' A single integer or a range (min-max). You can '\n 'specify <code>all</code> to mean all ports</p>')\nCIDR_DOCS = '<p>The CIDR IP range.</p>'\nSOURCEGROUP_DOCS = ('<p>The name of the source security group. '\n 'Cannot be used when specifying a CIDR IP address.')\nGROUPOWNER_DOCS = ('<p>The AWS account ID that owns the source security '\n 'group. Cannot be used when specifying a CIDR IP '\n 'address.</p>')\n\ndef register_secgroup(event_handler):\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_ip_permissions(params, key, value):\n if 'ip_permissions' not in params:\n params['ip_permissions'] = [{}]\n if key == 'CidrIp':\n if 'IpRanges' not in params['ip_permissions'][0]:\n params['ip_permissions'][0]['IpRanges'] = []\n params['ip_permissions'][0]['IpRanges'].append(value)\n elif key in ('GroupId', 'GroupName', 'UserId'):\n if 'UserIdGroupPairs' not in params['ip_permissions'][0]:\n params['ip_permissions'][0]['UserIdGroupPairs'] = [{}]\n params['ip_permissions'][0]['UserIdGroupPairs'][0][key] = value\n else:\n params['ip_permissions'][0][key] = value\n\n\nclass ProtocolArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n try:\n int_value = int(value)\n if int_value < 0 or int_value > 255:\n msg = ('protocol numbers must be in the range 0-255')\n raise ValueError(msg)\n except ValueError:\n if value not in ('tcp', 'udp', 'icmp'):\n msg = ('protocol parameter should be one of: '\n 'tcp|udp|icmp or any valid protocol number.')\n raise ValueError(msg)\n _build_ip_permissions(parameters, 'IpProtocol', value)\n\n\nclass PortArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n try:\n if value == '-1' or value == 'all':\n fromstr = '-1'\n tostr = '-1'\n elif '-' in value:\n fromstr, tostr = value.split('-')\n else:\n fromstr, tostr = (value, value)\n _build_ip_permissions(parameters, 'FromPort', int(fromstr))\n _build_ip_permissions(parameters, 'ToPort', int(tostr))\n except ValueError:\n msg = ('port parameter should be of the '\n 'form <from[-to]> (e.g. 22 or 22-25)')\n raise ValueError(msg)\n\n\nclass CidrArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'CidrIp': value}]\n _build_ip_permissions(parameters, 'IpRanges', value)\n\n\nclass SourceGroupArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n if value.startswith('sg-'):\n _build_ip_permissions(parameters, 'GroupId', value)\n else:\n _build_ip_permissions(parameters, 'GroupName', value)\n\n\nclass GroupOwnerArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n _build_ip_permissions(parameters, 'UserId', value)\n", "path": "awscli/customizations/ec2secgroupsimplify.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds the following scalar parameters to the\nauthorize operations:\n\n* --protocol: tcp | udp | icmp or any protocol number\n* --port: A single integer or a range (min-max). You can specify ``all``\n to mean all ports (for example, port range 0-65535)\n* --source-group-name\n* --source-group-id\n* --cidr - The CIDR range. Cannot be used when specifying a source or\n destination security group.\n\"\"\"\n\nfrom awscli.arguments import CustomArgument\n\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = ProtocolArgument('protocol',\n help_text=PROTOCOL_DOCS)\n argument_table['protocol'] = arg\n arg = PortArgument('port', help_text=PORT_DOCS)\n argument_table['port'] = arg\n arg = CidrArgument('cidr', help_text=CIDR_DOCS)\n argument_table['cidr'] = arg\n arg = SourceGroupArgument('source-group',\n help_text=SOURCEGROUP_DOCS)\n argument_table['source-group'] = arg\n arg = GroupOwnerArgument('group-owner',\n help_text=GROUPOWNER_DOCS)\n argument_table['group-owner'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --ip-permissions option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['ip_permissions']:\n for key in ('protocol', 'port', 'cidr',\n 'source_group', 'group_owner'):\n if arg_dict[key]:\n msg = ('The --%s option is not compatible '\n 'with the --ip-permissions option ') % key\n raise ValueError(msg)\n\ndef _add_docs(help_command, **kwargs):\n doc = help_command.doc\n doc.style.new_paragraph()\n doc.style.start_note()\n msg = ('To specify multiple rules in a single command '\n 'use the <code>--ip-permissions</code> option')\n doc.include_doc_string(msg)\n doc.style.end_note()\n\n\nEVENTS = [\n ('building-argument-table.ec2.authorize-security-group-ingress', _add_params),\n ('building-argument-table.ec2.authorize-security-group-egress', _add_params),\n ('building-argument-table.ec2.revoke-security-group-ingress', _add_params),\n ('building-argument-table.ec2.revoke-security-group-egress', _add_params),\n ('operation-args-parsed.ec2.authorize-security-group-ingress', _check_args),\n ('operation-args-parsed.ec2.authorize-security-group-egress', _check_args),\n ('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args),\n ('operation-args-parsed.ec2.revoke-security-group-egress', _check_args),\n ('doc-description.ec2.authorize-security-group-ingress', _add_docs),\n ('doc-description.ec2.authorize-security-group-egress', _add_docs),\n ('doc-description.ec2.revoke-security-group-ingress', _add_docs),\n ('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs),\n ]\nPROTOCOL_DOCS = ('<p>The IP protocol of this permission.</p>'\n '<p>Valid protocol values: <code>tcp</code>, '\n '<code>udp</code>, <code>icmp</code></p>')\nPORT_DOCS = ('<p>For TCP or UDP: The range of ports to allow.'\n ' A single integer or a range (min-max). You can '\n 'specify <code>all</code> to mean all ports</p>')\nCIDR_DOCS = '<p>The CIDR IP range.</p>'\nSOURCEGROUP_DOCS = ('<p>The name of the source security group. '\n 'Cannot be used when specifying a CIDR IP address.')\nGROUPOWNER_DOCS = ('<p>The AWS account ID that owns the source security '\n 'group. Cannot be used when specifying a CIDR IP '\n 'address.</p>')\n\ndef register_secgroup(event_handler):\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_ip_permissions(params, key, value):\n if 'ip_permissions' not in params:\n params['ip_permissions'] = [{}]\n if key == 'CidrIp':\n if 'IpRanges' not in params['ip_permissions'][0]:\n params['ip_permissions'][0]['IpRanges'] = []\n params['ip_permissions'][0]['IpRanges'].append(value)\n elif key in ('GroupId', 'GroupName', 'UserId'):\n if 'UserIdGroupPairs' not in params['ip_permissions'][0]:\n params['ip_permissions'][0]['UserIdGroupPairs'] = [{}]\n params['ip_permissions'][0]['UserIdGroupPairs'][0][key] = value\n else:\n params['ip_permissions'][0][key] = value\n\n\nclass ProtocolArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n try:\n int_value = int(value)\n if (int_value < 0 or int_value > 255) and int_value != -1:\n msg = ('protocol numbers must be in the range 0-255 '\n 'or -1 to specify all protocols')\n raise ValueError(msg)\n except ValueError:\n if value not in ('tcp', 'udp', 'icmp', 'all'):\n msg = ('protocol parameter should be one of: '\n 'tcp|udp|icmp|all or any valid protocol number.')\n raise ValueError(msg)\n if value == 'all':\n value = '-1'\n _build_ip_permissions(parameters, 'IpProtocol', value)\n\n\nclass PortArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n try:\n if value == '-1' or value == 'all':\n fromstr = '-1'\n tostr = '-1'\n elif '-' in value:\n fromstr, tostr = value.split('-')\n else:\n fromstr, tostr = (value, value)\n _build_ip_permissions(parameters, 'FromPort', int(fromstr))\n _build_ip_permissions(parameters, 'ToPort', int(tostr))\n except ValueError:\n msg = ('port parameter should be of the '\n 'form <from[-to]> (e.g. 22 or 22-25)')\n raise ValueError(msg)\n\n\nclass CidrArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'CidrIp': value}]\n _build_ip_permissions(parameters, 'IpRanges', value)\n\n\nclass SourceGroupArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n if value.startswith('sg-'):\n _build_ip_permissions(parameters, 'GroupId', value)\n else:\n _build_ip_permissions(parameters, 'GroupName', value)\n\n\nclass GroupOwnerArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value:\n _build_ip_permissions(parameters, 'UserId', value)\n", "path": "awscli/customizations/ec2secgroupsimplify.py"}]} | 2,402 | 290 |
gh_patches_debug_15523 | rasdani/github-patches | git_diff | docker__docker-py-1629 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Question about stream flag when building image
[This section of the docs](https://docker-py.readthedocs.io/en/stable/images.html#docker.models.images.ImageCollection.build) says:
> **stream (bool)** – Deprecated for API version > 1.8 (always True). Return a blocking generator you can iterate over to retrieve build output as it happens
However, if I attempt to run something like:
```py
docker.images.build(path='docker/example/', tag='example', stream=True)
```
It is blocked until the image is built, and returns the image then.
What's a "blocking generator" and how is it supposed to be used?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/models/images.py`
Content:
```
1 import re
2
3 import six
4
5 from ..api import APIClient
6 from ..errors import BuildError
7 from ..utils.json_stream import json_stream
8 from .resource import Collection, Model
9
10
11 class Image(Model):
12 """
13 An image on the server.
14 """
15 def __repr__(self):
16 return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
17
18 @property
19 def labels(self):
20 """
21 The labels of an image as dictionary.
22 """
23 result = self.attrs['Config'].get('Labels')
24 return result or {}
25
26 @property
27 def short_id(self):
28 """
29 The ID of the image truncated to 10 characters, plus the ``sha256:``
30 prefix.
31 """
32 if self.id.startswith('sha256:'):
33 return self.id[:17]
34 return self.id[:10]
35
36 @property
37 def tags(self):
38 """
39 The image's tags.
40 """
41 tags = self.attrs.get('RepoTags')
42 if tags is None:
43 tags = []
44 return [tag for tag in tags if tag != '<none>:<none>']
45
46 def history(self):
47 """
48 Show the history of an image.
49
50 Returns:
51 (str): The history of the image.
52
53 Raises:
54 :py:class:`docker.errors.APIError`
55 If the server returns an error.
56 """
57 return self.client.api.history(self.id)
58
59 def save(self):
60 """
61 Get a tarball of an image. Similar to the ``docker save`` command.
62
63 Returns:
64 (urllib3.response.HTTPResponse object): The response from the
65 daemon.
66
67 Raises:
68 :py:class:`docker.errors.APIError`
69 If the server returns an error.
70
71 Example:
72
73 >>> image = cli.images.get("fedora:latest")
74 >>> resp = image.save()
75 >>> f = open('/tmp/fedora-latest.tar', 'w')
76 >>> for chunk in resp.stream():
77 >>> f.write(chunk)
78 >>> f.close()
79 """
80 return self.client.api.get_image(self.id)
81
82 def tag(self, repository, tag=None, **kwargs):
83 """
84 Tag this image into a repository. Similar to the ``docker tag``
85 command.
86
87 Args:
88 repository (str): The repository to set for the tag
89 tag (str): The tag name
90 force (bool): Force
91
92 Raises:
93 :py:class:`docker.errors.APIError`
94 If the server returns an error.
95
96 Returns:
97 (bool): ``True`` if successful
98 """
99 self.client.api.tag(self.id, repository, tag=tag, **kwargs)
100
101
102 class ImageCollection(Collection):
103 model = Image
104
105 def build(self, **kwargs):
106 """
107 Build an image and return it. Similar to the ``docker build``
108 command. Either ``path`` or ``fileobj`` must be set.
109
110 If you have a tar file for the Docker build context (including a
111 Dockerfile) already, pass a readable file-like object to ``fileobj``
112 and also pass ``custom_context=True``. If the stream is compressed
113 also, set ``encoding`` to the correct value (e.g ``gzip``).
114
115 If you want to get the raw output of the build, use the
116 :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
117 low-level API.
118
119 Args:
120 path (str): Path to the directory containing the Dockerfile
121 fileobj: A file object to use as the Dockerfile. (Or a file-like
122 object)
123 tag (str): A tag to add to the final image
124 quiet (bool): Whether to return the status
125 nocache (bool): Don't use the cache when set to ``True``
126 rm (bool): Remove intermediate containers. The ``docker build``
127 command now defaults to ``--rm=true``, but we have kept the old
128 default of `False` to preserve backward compatibility
129 stream (bool): *Deprecated for API version > 1.8 (always True)*.
130 Return a blocking generator you can iterate over to retrieve
131 build output as it happens
132 timeout (int): HTTP timeout
133 custom_context (bool): Optional if using ``fileobj``
134 encoding (str): The encoding for a stream. Set to ``gzip`` for
135 compressing
136 pull (bool): Downloads any updates to the FROM image in Dockerfiles
137 forcerm (bool): Always remove intermediate containers, even after
138 unsuccessful builds
139 dockerfile (str): path within the build context to the Dockerfile
140 buildargs (dict): A dictionary of build arguments
141 container_limits (dict): A dictionary of limits applied to each
142 container created by the build process. Valid keys:
143
144 - memory (int): set memory limit for build
145 - memswap (int): Total memory (memory + swap), -1 to disable
146 swap
147 - cpushares (int): CPU shares (relative weight)
148 - cpusetcpus (str): CPUs in which to allow execution, e.g.,
149 ``"0-3"``, ``"0,1"``
150 decode (bool): If set to ``True``, the returned stream will be
151 decoded into dicts on the fly. Default ``False``.
152 cache_from (list): A list of images used for build cache
153 resolution.
154 target (str): Name of the build-stage to build in a multi-stage
155 Dockerfile.
156
157 Returns:
158 (:py:class:`Image`): The built image.
159
160 Raises:
161 :py:class:`docker.errors.BuildError`
162 If there is an error during the build.
163 :py:class:`docker.errors.APIError`
164 If the server returns any other error.
165 ``TypeError``
166 If neither ``path`` nor ``fileobj`` is specified.
167 """
168 resp = self.client.api.build(**kwargs)
169 if isinstance(resp, six.string_types):
170 return self.get(resp)
171 last_event = None
172 for chunk in json_stream(resp):
173 if 'error' in chunk:
174 raise BuildError(chunk['error'])
175 if 'stream' in chunk:
176 match = re.search(
177 r'(Successfully built |sha256:)([0-9a-f]+)',
178 chunk['stream']
179 )
180 if match:
181 image_id = match.group(2)
182 return self.get(image_id)
183 last_event = chunk
184
185 raise BuildError(last_event or 'Unknown')
186
187 def get(self, name):
188 """
189 Gets an image.
190
191 Args:
192 name (str): The name of the image.
193
194 Returns:
195 (:py:class:`Image`): The image.
196
197 Raises:
198 :py:class:`docker.errors.ImageNotFound`
199 If the image does not exist.
200 :py:class:`docker.errors.APIError`
201 If the server returns an error.
202 """
203 return self.prepare_model(self.client.api.inspect_image(name))
204
205 def list(self, name=None, all=False, filters=None):
206 """
207 List images on the server.
208
209 Args:
210 name (str): Only show images belonging to the repository ``name``
211 all (bool): Show intermediate image layers. By default, these are
212 filtered out.
213 filters (dict): Filters to be processed on the image list.
214 Available filters:
215 - ``dangling`` (bool)
216 - ``label`` (str): format either ``key`` or ``key=value``
217
218 Returns:
219 (list of :py:class:`Image`): The images.
220
221 Raises:
222 :py:class:`docker.errors.APIError`
223 If the server returns an error.
224 """
225 resp = self.client.api.images(name=name, all=all, filters=filters)
226 return [self.prepare_model(r) for r in resp]
227
228 def load(self, data):
229 """
230 Load an image that was previously saved using
231 :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
232 Similar to ``docker load``.
233
234 Args:
235 data (binary): Image data to be loaded.
236
237 Raises:
238 :py:class:`docker.errors.APIError`
239 If the server returns an error.
240 """
241 return self.client.api.load_image(data)
242
243 def pull(self, name, tag=None, **kwargs):
244 """
245 Pull an image of the given name and return it. Similar to the
246 ``docker pull`` command.
247
248 If you want to get the raw pull output, use the
249 :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
250 low-level API.
251
252 Args:
253 repository (str): The repository to pull
254 tag (str): The tag to pull
255 insecure_registry (bool): Use an insecure registry
256 auth_config (dict): Override the credentials that
257 :py:meth:`~docker.client.DockerClient.login` has set for
258 this request. ``auth_config`` should contain the ``username``
259 and ``password`` keys to be valid.
260
261 Returns:
262 (:py:class:`Image`): The image that has been pulled.
263
264 Raises:
265 :py:class:`docker.errors.APIError`
266 If the server returns an error.
267
268 Example:
269
270 >>> image = client.images.pull('busybox')
271 """
272 self.client.api.pull(name, tag=tag, **kwargs)
273 return self.get('{0}:{1}'.format(name, tag) if tag else name)
274
275 def push(self, repository, tag=None, **kwargs):
276 return self.client.api.push(repository, tag=tag, **kwargs)
277 push.__doc__ = APIClient.push.__doc__
278
279 def remove(self, *args, **kwargs):
280 self.client.api.remove_image(*args, **kwargs)
281 remove.__doc__ = APIClient.remove_image.__doc__
282
283 def search(self, *args, **kwargs):
284 return self.client.api.search(*args, **kwargs)
285 search.__doc__ = APIClient.search.__doc__
286
287 def prune(self, filters=None):
288 return self.client.api.prune_images(filters=filters)
289 prune.__doc__ = APIClient.prune_images.__doc__
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/models/images.py b/docker/models/images.py
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -126,9 +126,6 @@
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
- stream (bool): *Deprecated for API version > 1.8 (always True)*.
- Return a blocking generator you can iterate over to retrieve
- build output as it happens
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
| {"golden_diff": "diff --git a/docker/models/images.py b/docker/models/images.py\n--- a/docker/models/images.py\n+++ b/docker/models/images.py\n@@ -126,9 +126,6 @@\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n- stream (bool): *Deprecated for API version > 1.8 (always True)*.\n- Return a blocking generator you can iterate over to retrieve\n- build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n", "issue": "Question about stream flag when building image\n[This section of the docs](https://docker-py.readthedocs.io/en/stable/images.html#docker.models.images.ImageCollection.build) says:\r\n\r\n> **stream (bool)** \u2013 Deprecated for API version > 1.8 (always True). Return a blocking generator you can iterate over to retrieve build output as it happens\r\n\r\nHowever, if I attempt to run something like:\r\n\r\n```py\r\ndocker.images.build(path='docker/example/', tag='example', stream=True)\r\n```\r\n\r\nIt is blocked until the image is built, and returns the image then.\r\n\r\nWhat's a \"blocking generator\" and how is it supposed to be used?\r\n\r\nThanks!\n", "before_files": [{"content": "import re\n\nimport six\n\nfrom ..api import APIClient\nfrom ..errors import BuildError\nfrom ..utils.json_stream import json_stream\nfrom .resource import Collection, Model\n\n\nclass Image(Model):\n \"\"\"\n An image on the server.\n \"\"\"\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\n\n @property\n def labels(self):\n \"\"\"\n The labels of an image as dictionary.\n \"\"\"\n result = self.attrs['Config'].get('Labels')\n return result or {}\n\n @property\n def short_id(self):\n \"\"\"\n The ID of the image truncated to 10 characters, plus the ``sha256:``\n prefix.\n \"\"\"\n if self.id.startswith('sha256:'):\n return self.id[:17]\n return self.id[:10]\n\n @property\n def tags(self):\n \"\"\"\n The image's tags.\n \"\"\"\n tags = self.attrs.get('RepoTags')\n if tags is None:\n tags = []\n return [tag for tag in tags if tag != '<none>:<none>']\n\n def history(self):\n \"\"\"\n Show the history of an image.\n\n Returns:\n (str): The history of the image.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.history(self.id)\n\n def save(self):\n \"\"\"\n Get a tarball of an image. Similar to the ``docker save`` command.\n\n Returns:\n (urllib3.response.HTTPResponse object): The response from the\n daemon.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = cli.images.get(\"fedora:latest\")\n >>> resp = image.save()\n >>> f = open('/tmp/fedora-latest.tar', 'w')\n >>> for chunk in resp.stream():\n >>> f.write(chunk)\n >>> f.close()\n \"\"\"\n return self.client.api.get_image(self.id)\n\n def tag(self, repository, tag=None, **kwargs):\n \"\"\"\n Tag this image into a repository. Similar to the ``docker tag``\n command.\n\n Args:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Returns:\n (bool): ``True`` if successful\n \"\"\"\n self.client.api.tag(self.id, repository, tag=tag, **kwargs)\n\n\nclass ImageCollection(Collection):\n model = Image\n\n def build(self, **kwargs):\n \"\"\"\n Build an image and return it. Similar to the ``docker build``\n command. Either ``path`` or ``fileobj`` must be set.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n If you want to get the raw output of the build, use the\n :py:meth:`~docker.api.build.BuildApiMixin.build` method in the\n low-level API.\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``.\n cache_from (list): A list of images used for build cache\n resolution.\n target (str): Name of the build-stage to build in a multi-stage\n Dockerfile.\n\n Returns:\n (:py:class:`Image`): The built image.\n\n Raises:\n :py:class:`docker.errors.BuildError`\n If there is an error during the build.\n :py:class:`docker.errors.APIError`\n If the server returns any other error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n last_event = None\n for chunk in json_stream(resp):\n if 'error' in chunk:\n raise BuildError(chunk['error'])\n if 'stream' in chunk:\n match = re.search(\n r'(Successfully built |sha256:)([0-9a-f]+)',\n chunk['stream']\n )\n if match:\n image_id = match.group(2)\n return self.get(image_id)\n last_event = chunk\n\n raise BuildError(last_event or 'Unknown')\n\n def get(self, name):\n \"\"\"\n Gets an image.\n\n Args:\n name (str): The name of the image.\n\n Returns:\n (:py:class:`Image`): The image.\n\n Raises:\n :py:class:`docker.errors.ImageNotFound`\n If the image does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_image(name))\n\n def list(self, name=None, all=False, filters=None):\n \"\"\"\n List images on the server.\n\n Args:\n name (str): Only show images belonging to the repository ``name``\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\n Returns:\n (list of :py:class:`Image`): The images.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.images(name=name, all=all, filters=filters)\n return [self.prepare_model(r) for r in resp]\n\n def load(self, data):\n \"\"\"\n Load an image that was previously saved using\n :py:meth:`~docker.models.images.Image.save` (or ``docker save``).\n Similar to ``docker load``.\n\n Args:\n data (binary): Image data to be loaded.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.load_image(data)\n\n def pull(self, name, tag=None, **kwargs):\n \"\"\"\n Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\n Args:\n repository (str): The repository to pull\n tag (str): The tag to pull\n insecure_registry (bool): Use an insecure registry\n auth_config (dict): Override the credentials that\n :py:meth:`~docker.client.DockerClient.login` has set for\n this request. ``auth_config`` should contain the ``username``\n and ``password`` keys to be valid.\n\n Returns:\n (:py:class:`Image`): The image that has been pulled.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = client.images.pull('busybox')\n \"\"\"\n self.client.api.pull(name, tag=tag, **kwargs)\n return self.get('{0}:{1}'.format(name, tag) if tag else name)\n\n def push(self, repository, tag=None, **kwargs):\n return self.client.api.push(repository, tag=tag, **kwargs)\n push.__doc__ = APIClient.push.__doc__\n\n def remove(self, *args, **kwargs):\n self.client.api.remove_image(*args, **kwargs)\n remove.__doc__ = APIClient.remove_image.__doc__\n\n def search(self, *args, **kwargs):\n return self.client.api.search(*args, **kwargs)\n search.__doc__ = APIClient.search.__doc__\n\n def prune(self, filters=None):\n return self.client.api.prune_images(filters=filters)\n prune.__doc__ = APIClient.prune_images.__doc__\n", "path": "docker/models/images.py"}], "after_files": [{"content": "import re\n\nimport six\n\nfrom ..api import APIClient\nfrom ..errors import BuildError\nfrom ..utils.json_stream import json_stream\nfrom .resource import Collection, Model\n\n\nclass Image(Model):\n \"\"\"\n An image on the server.\n \"\"\"\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\n\n @property\n def labels(self):\n \"\"\"\n The labels of an image as dictionary.\n \"\"\"\n result = self.attrs['Config'].get('Labels')\n return result or {}\n\n @property\n def short_id(self):\n \"\"\"\n The ID of the image truncated to 10 characters, plus the ``sha256:``\n prefix.\n \"\"\"\n if self.id.startswith('sha256:'):\n return self.id[:17]\n return self.id[:10]\n\n @property\n def tags(self):\n \"\"\"\n The image's tags.\n \"\"\"\n tags = self.attrs.get('RepoTags')\n if tags is None:\n tags = []\n return [tag for tag in tags if tag != '<none>:<none>']\n\n def history(self):\n \"\"\"\n Show the history of an image.\n\n Returns:\n (str): The history of the image.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.history(self.id)\n\n def save(self):\n \"\"\"\n Get a tarball of an image. Similar to the ``docker save`` command.\n\n Returns:\n (urllib3.response.HTTPResponse object): The response from the\n daemon.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = cli.images.get(\"fedora:latest\")\n >>> resp = image.save()\n >>> f = open('/tmp/fedora-latest.tar', 'w')\n >>> for chunk in resp.stream():\n >>> f.write(chunk)\n >>> f.close()\n \"\"\"\n return self.client.api.get_image(self.id)\n\n def tag(self, repository, tag=None, **kwargs):\n \"\"\"\n Tag this image into a repository. Similar to the ``docker tag``\n command.\n\n Args:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Returns:\n (bool): ``True`` if successful\n \"\"\"\n self.client.api.tag(self.id, repository, tag=tag, **kwargs)\n\n\nclass ImageCollection(Collection):\n model = Image\n\n def build(self, **kwargs):\n \"\"\"\n Build an image and return it. Similar to the ``docker build``\n command. Either ``path`` or ``fileobj`` must be set.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n If you want to get the raw output of the build, use the\n :py:meth:`~docker.api.build.BuildApiMixin.build` method in the\n low-level API.\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``.\n cache_from (list): A list of images used for build cache\n resolution.\n target (str): Name of the build-stage to build in a multi-stage\n Dockerfile.\n\n Returns:\n (:py:class:`Image`): The built image.\n\n Raises:\n :py:class:`docker.errors.BuildError`\n If there is an error during the build.\n :py:class:`docker.errors.APIError`\n If the server returns any other error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n last_event = None\n for chunk in json_stream(resp):\n if 'error' in chunk:\n raise BuildError(chunk['error'])\n if 'stream' in chunk:\n match = re.search(\n r'(Successfully built |sha256:)([0-9a-f]+)',\n chunk['stream']\n )\n if match:\n image_id = match.group(2)\n return self.get(image_id)\n last_event = chunk\n\n raise BuildError(last_event or 'Unknown')\n\n def get(self, name):\n \"\"\"\n Gets an image.\n\n Args:\n name (str): The name of the image.\n\n Returns:\n (:py:class:`Image`): The image.\n\n Raises:\n :py:class:`docker.errors.ImageNotFound`\n If the image does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_image(name))\n\n def list(self, name=None, all=False, filters=None):\n \"\"\"\n List images on the server.\n\n Args:\n name (str): Only show images belonging to the repository ``name``\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\n Returns:\n (list of :py:class:`Image`): The images.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.images(name=name, all=all, filters=filters)\n return [self.prepare_model(r) for r in resp]\n\n def load(self, data):\n \"\"\"\n Load an image that was previously saved using\n :py:meth:`~docker.models.images.Image.save` (or ``docker save``).\n Similar to ``docker load``.\n\n Args:\n data (binary): Image data to be loaded.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.load_image(data)\n\n def pull(self, name, tag=None, **kwargs):\n \"\"\"\n Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\n Args:\n repository (str): The repository to pull\n tag (str): The tag to pull\n insecure_registry (bool): Use an insecure registry\n auth_config (dict): Override the credentials that\n :py:meth:`~docker.client.DockerClient.login` has set for\n this request. ``auth_config`` should contain the ``username``\n and ``password`` keys to be valid.\n\n Returns:\n (:py:class:`Image`): The image that has been pulled.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = client.images.pull('busybox')\n \"\"\"\n self.client.api.pull(name, tag=tag, **kwargs)\n return self.get('{0}:{1}'.format(name, tag) if tag else name)\n\n def push(self, repository, tag=None, **kwargs):\n return self.client.api.push(repository, tag=tag, **kwargs)\n push.__doc__ = APIClient.push.__doc__\n\n def remove(self, *args, **kwargs):\n self.client.api.remove_image(*args, **kwargs)\n remove.__doc__ = APIClient.remove_image.__doc__\n\n def search(self, *args, **kwargs):\n return self.client.api.search(*args, **kwargs)\n search.__doc__ = APIClient.search.__doc__\n\n def prune(self, filters=None):\n return self.client.api.prune_images(filters=filters)\n prune.__doc__ = APIClient.prune_images.__doc__\n", "path": "docker/models/images.py"}]} | 3,340 | 165 |
gh_patches_debug_24353 | rasdani/github-patches | git_diff | armada-ths__ais-766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disable debug in production
https://app.asana.com/0/1204005227675382/1204038385197069/f
Currently the Django debug flag is true in the production environment, leading to security issues.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ais/common/settings.py`
Content:
```
1 """
2 This is the settings file containing settings common to both the
3 development and production environments.
4
5 For more information on this file, see
6 https://docs.djangoproject.com/en/1.8/topics/settings/
7
8 For the full list of settings and their values, see
9 https://docs.djangoproject.com/en/1.8/ref/settings/
10 """
11
12 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
13 import os
14 from os import path
15
16 BASE_DIR = path.join(path.dirname(path.abspath(__file__)), '../../')
17
18 CRISPY_TEMPLATE_PACK = 'bootstrap3'
19
20 INSTALLED_APPS = (
21 'django.contrib.admin',
22 'django.contrib.auth',
23 'django.contrib.contenttypes',
24 'django.contrib.sessions',
25 'django.contrib.messages',
26 'django.contrib.staticfiles',
27 'django.contrib.gis',
28 'corsheaders',
29 'webpack_loader',
30 'polymorphic',
31 'events',
32 'companies',
33 'fair',
34 'people',
35 'locations',
36 'recruitment',
37 'api',
38 'news',
39 'orders',
40 'unirel',
41 'crispy_forms',
42 'exhibitors',
43 'django.contrib.humanize',
44 'banquet',
45 'register',
46 'matching',
47 'student_profiles',
48 'transportation',
49 'accounting',
50 'dynamic_formsets',
51 'journal',
52 'markupfield',
53 'testpage',
54 )
55
56 MIDDLEWARE = [
57 'corsheaders.middleware.CorsMiddleware',
58 'django.contrib.sessions.middleware.SessionMiddleware',
59 'django.middleware.common.CommonMiddleware',
60 'django.middleware.csrf.CsrfViewMiddleware',
61 'django.middleware.gzip.GZipMiddleware',
62 'django.contrib.auth.middleware.AuthenticationMiddleware',
63 'django.contrib.messages.middleware.MessageMiddleware',
64 'django.middleware.clickjacking.XFrameOptionsMiddleware',
65 'django.middleware.security.SecurityMiddleware',
66 'recruitment.middleware.LoginRequiredMiddleware'
67 ]
68
69 USE_ETAGS = True
70
71 AUTHENTICATION_BACKENDS = (
72 'django.contrib.auth.backends.ModelBackend',
73 )
74
75 TEMPLATES = [
76 {
77 'BACKEND': 'django.template.backends.django.DjangoTemplates',
78 'DIRS': [path.join(BASE_DIR, "templates")],
79 'APP_DIRS': True,
80 'OPTIONS': {
81 'context_processors': [
82 'django.template.context_processors.debug',
83 'django.template.context_processors.request',
84 'django.contrib.auth.context_processors.auth',
85 'django.contrib.messages.context_processors.messages',
86 ],
87 },
88 },
89 ]
90
91 WEBPACK_LOADER = {
92 'DEFAULT': {
93 'BUNDLE_DIR_NAME': 'bundles/',
94 'STATS_FILE': path.join(BASE_DIR, 'webpack-stats.js')
95 }
96 }
97
98 WSGI_APPLICATION = 'ais.wsgi.application'
99
100 STATIC_ROOT = path.join(BASE_DIR, "static")
101 STATIC_URL = '/static/'
102 STATICFILES_DIRS = (
103 path.join(BASE_DIR, "ais_static"),
104 )
105
106 ADMIN_MEDIA_PREFIX = '/static/admin/'
107 MEDIA_ROOT = path.abspath(path.join(BASE_DIR, '..', 'media'))
108
109 MEDIA_URL = '/media/'
110
111 # Internationalization
112 # https://docs.djangoproject.com/en/1.8/topics/i18n/
113 LANGUAGE_CODE = 'en-us'
114 TIME_ZONE = 'Europe/Stockholm'
115 USE_I18N = True
116 LANGUAGE_CODE = 'en-us'
117 USE_L10N = False
118 DATE_FORMAT = "M j, Y"
119 DATETIME_FORMAT = "M j, Y, H:i"
120
```
Path: `ais/production/settings.py`
Content:
```
1 """
2 This is the settings file to be used in a production environment. It's
3 more secure, more robust and more performant than the development setup
4 and also configures AIS to talk to external services.
5 """
6
7 import os
8 from ais.common.settings import *
9
10 SECRET_KEY = os.environ.get('SECRET_KEY')
11 # This is important so other people can't set their own domains
12 # to point to AIS (which would be a security concern).
13 ALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']
14
15 DEBUG = True
16
17 # The URL scheme is slightly different in a production environment
18 # since we need to accomodate the KTH OpenID Connect integration.
19 ROOT_URLCONF = 'ais.production.urls'
20
21 # Use KTH OpenID Connect for authentication
22 INSTALLED_APPS += ('kth_login','raven.contrib.django.raven_compat',)
23
24 # Use a full-fledged database instead of SQLite.
25 DATABASES = {
26 'default': {
27 'ENGINE': 'django.contrib.gis.db.backends.postgis',
28 'NAME': os.environ.get('DB_NAME', 'ais_dev'),
29 'USER': os.environ.get('DB_USERNAME', 'ais_dev'),
30 'PASSWORD': os.environ.get('DB_PASSWORD', 'ais_dev'),
31 'HOST': os.environ.get('DB_HOST', '127.0.0.1'),
32 'PORT': os.environ.get('DB_PORT', '5432'),
33 }
34 }
35
36 # SENTRY
37 RAVEN_CONFIG = {
38 'dsn': 'https://%s:%[email protected]/%s' % (os.environ.get('SENTRY_USERNAME'), os.environ.get('SENTRY_PASSWORD'), os.environ.get('SENTRY_APPID')),
39 'processors': ('raven.processors.Processor',)
40 }
41
42 CACHES = {
43 'default': {
44 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
45 }
46 }
47
48 LOGGING = {
49 'version': 1,
50 'disable_existing_loggers': True,
51 'formatters': {
52 'verbose': {
53 'format': '%(levelname)s %(asctime)s %(module)s '
54 '%(process)d %(thread)d %(message)s'
55 },
56 },
57 'handlers': {
58 'sentry': {
59 'level': 'ERROR', # To capture more than ERROR, change to WARNING, INFO, etc.
60 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
61 'tags': {'custom-tag': 'x'},
62 },
63 'console': {
64 'level': 'DEBUG',
65 'class': 'logging.StreamHandler',
66 'formatter': 'verbose'
67 }
68 },
69 'loggers': {
70 'root': {
71 'level': 'WARNING',
72 'handlers': ['sentry'],
73 },
74 'django.db.backends': {
75 'level': 'ERROR',
76 'handlers': ['console'],
77 'propagate': False,
78 },
79 'raven': {
80 'level': 'DEBUG',
81 'handlers': ['console'],
82 'propagate': False,
83 },
84 'sentry.errors': {
85 'level': 'DEBUG',
86 'handlers': ['console'],
87 'propagate': False,
88 },
89 },
90 }
91
92 # The system sends out system-related emails to these addresses.
93 ADMINS = MANAGERS = (
94 ('System', '[email protected]'),
95 )
96
97 # This is for AUTHLIB package for interacting with KTH OpenID Connect
98 # APPLICATION_ID is given from the 'secrets.py' file.
99 # CLIENT_SECRET is given from the 'secrets.py' file.
100 AUTHLIB_OAUTH_CLIENTS = {
101 'kth': {
102 'client_id': os.environ.get('APPLICATION_ID'),
103 'client_secret': os.environ.get('CLIENT_SECRET'),
104 'api_base_url': 'https://login.ug.kth.se/adfs/oauth2/',
105 }
106 }
107 LOGOUT_REDIRECT_URL = '/'
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ais/common/settings.py b/ais/common/settings.py
--- a/ais/common/settings.py
+++ b/ais/common/settings.py
@@ -18,6 +18,7 @@
CRISPY_TEMPLATE_PACK = 'bootstrap3'
INSTALLED_APPS = (
+ 'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
@@ -55,6 +56,8 @@
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
+ "django.middleware.security.SecurityMiddleware",
+ "whitenoise.middleware.WhiteNoiseMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
diff --git a/ais/production/settings.py b/ais/production/settings.py
--- a/ais/production/settings.py
+++ b/ais/production/settings.py
@@ -12,7 +12,7 @@
# to point to AIS (which would be a security concern).
ALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']
-DEBUG = True
+DEBUG = False
# The URL scheme is slightly different in a production environment
# since we need to accomodate the KTH OpenID Connect integration.
| {"golden_diff": "diff --git a/ais/common/settings.py b/ais/common/settings.py\n--- a/ais/common/settings.py\n+++ b/ais/common/settings.py\n@@ -18,6 +18,7 @@\n CRISPY_TEMPLATE_PACK = 'bootstrap3'\n \n INSTALLED_APPS = (\n+ 'whitenoise.runserver_nostatic',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n@@ -55,6 +56,8 @@\n \n MIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n+ \"django.middleware.security.SecurityMiddleware\",\n+ \"whitenoise.middleware.WhiteNoiseMiddleware\",\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\ndiff --git a/ais/production/settings.py b/ais/production/settings.py\n--- a/ais/production/settings.py\n+++ b/ais/production/settings.py\n@@ -12,7 +12,7 @@\n # to point to AIS (which would be a security concern).\n ALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']\n \n-DEBUG = True\n+DEBUG = False\n \n # The URL scheme is slightly different in a production environment\n # since we need to accomodate the KTH OpenID Connect integration.\n", "issue": "Disable debug in production\nhttps://app.asana.com/0/1204005227675382/1204038385197069/f\r\n\r\nCurrently the Django debug flag is true in the production environment, leading to security issues.\n", "before_files": [{"content": "\"\"\"\nThis is the settings file containing settings common to both the\ndevelopment and production environments.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom os import path\n\nBASE_DIR = path.join(path.dirname(path.abspath(__file__)), '../../')\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'corsheaders',\n 'webpack_loader',\n 'polymorphic',\n 'events',\n 'companies',\n 'fair',\n 'people',\n 'locations',\n 'recruitment',\n 'api',\n 'news',\n 'orders',\n 'unirel',\n 'crispy_forms',\n 'exhibitors',\n 'django.contrib.humanize',\n 'banquet',\n 'register',\n 'matching',\n 'student_profiles',\n 'transportation',\n 'accounting',\n 'dynamic_formsets',\n 'journal',\n 'markupfield',\n 'testpage',\n)\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'recruitment.middleware.LoginRequiredMiddleware'\n]\n\nUSE_ETAGS = True\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [path.join(BASE_DIR, \"templates\")],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'BUNDLE_DIR_NAME': 'bundles/',\n 'STATS_FILE': path.join(BASE_DIR, 'webpack-stats.js')\n }\n}\n\nWSGI_APPLICATION = 'ais.wsgi.application'\n\nSTATIC_ROOT = path.join(BASE_DIR, \"static\")\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n path.join(BASE_DIR, \"ais_static\"),\n)\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\nMEDIA_ROOT = path.abspath(path.join(BASE_DIR, '..', 'media'))\n\nMEDIA_URL = '/media/'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'Europe/Stockholm'\nUSE_I18N = True\nLANGUAGE_CODE = 'en-us'\nUSE_L10N = False\nDATE_FORMAT = \"M j, Y\"\nDATETIME_FORMAT = \"M j, Y, H:i\"\n", "path": "ais/common/settings.py"}, {"content": "\"\"\"\nThis is the settings file to be used in a production environment. It's\nmore secure, more robust and more performant than the development setup\nand also configures AIS to talk to external services.\n\"\"\"\n\nimport os\nfrom ais.common.settings import *\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\n# This is important so other people can't set their own domains\n# to point to AIS (which would be a security concern).\nALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']\n\nDEBUG = True\n\n# The URL scheme is slightly different in a production environment\n# since we need to accomodate the KTH OpenID Connect integration.\nROOT_URLCONF = 'ais.production.urls'\n\n# Use KTH OpenID Connect for authentication\nINSTALLED_APPS += ('kth_login','raven.contrib.django.raven_compat',)\n\n# Use a full-fledged database instead of SQLite.\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': os.environ.get('DB_NAME', 'ais_dev'),\n 'USER': os.environ.get('DB_USERNAME', 'ais_dev'),\n 'PASSWORD': os.environ.get('DB_PASSWORD', 'ais_dev'),\n 'HOST': os.environ.get('DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DB_PORT', '5432'),\n }\n}\n\n# SENTRY\nRAVEN_CONFIG = {\n 'dsn': 'https://%s:%[email protected]/%s' % (os.environ.get('SENTRY_USERNAME'), os.environ.get('SENTRY_PASSWORD'), os.environ.get('SENTRY_APPID')),\n 'processors': ('raven.processors.Processor',)\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR', # To capture more than ERROR, change to WARNING, INFO, etc.\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\n# The system sends out system-related emails to these addresses.\nADMINS = MANAGERS = (\n ('System', '[email protected]'),\n)\n\n# This is for AUTHLIB package for interacting with KTH OpenID Connect\n# APPLICATION_ID is given from the 'secrets.py' file.\n# CLIENT_SECRET is given from the 'secrets.py' file.\nAUTHLIB_OAUTH_CLIENTS = {\n 'kth': {\n 'client_id': os.environ.get('APPLICATION_ID'),\n 'client_secret': os.environ.get('CLIENT_SECRET'),\n 'api_base_url': 'https://login.ug.kth.se/adfs/oauth2/',\n }\n}\nLOGOUT_REDIRECT_URL = '/'\n", "path": "ais/production/settings.py"}], "after_files": [{"content": "\"\"\"\nThis is the settings file containing settings common to both the\ndevelopment and production environments.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom os import path\n\nBASE_DIR = path.join(path.dirname(path.abspath(__file__)), '../../')\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nINSTALLED_APPS = (\n 'whitenoise.runserver_nostatic',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'corsheaders',\n 'webpack_loader',\n 'polymorphic',\n 'events',\n 'companies',\n 'fair',\n 'people',\n 'locations',\n 'recruitment',\n 'api',\n 'news',\n 'orders',\n 'unirel',\n 'crispy_forms',\n 'exhibitors',\n 'django.contrib.humanize',\n 'banquet',\n 'register',\n 'matching',\n 'student_profiles',\n 'transportation',\n 'accounting',\n 'dynamic_formsets',\n 'journal',\n 'markupfield',\n 'testpage',\n)\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'recruitment.middleware.LoginRequiredMiddleware'\n]\n\nUSE_ETAGS = True\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [path.join(BASE_DIR, \"templates\")],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'BUNDLE_DIR_NAME': 'bundles/',\n 'STATS_FILE': path.join(BASE_DIR, 'webpack-stats.js')\n }\n}\n\nWSGI_APPLICATION = 'ais.wsgi.application'\n\nSTATIC_ROOT = path.join(BASE_DIR, \"static\")\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n path.join(BASE_DIR, \"ais_static\"),\n)\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\nMEDIA_ROOT = path.abspath(path.join(BASE_DIR, '..', 'media'))\n\nMEDIA_URL = '/media/'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'Europe/Stockholm'\nUSE_I18N = True\nLANGUAGE_CODE = 'en-us'\nUSE_L10N = False\nDATE_FORMAT = \"M j, Y\"\nDATETIME_FORMAT = \"M j, Y, H:i\"\n", "path": "ais/common/settings.py"}, {"content": "\"\"\"\nThis is the settings file to be used in a production environment. It's\nmore secure, more robust and more performant than the development setup\nand also configures AIS to talk to external services.\n\"\"\"\n\nimport os\nfrom ais.common.settings import *\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\n# This is important so other people can't set their own domains\n# to point to AIS (which would be a security concern).\nALLOWED_HOSTS = ['.armada.nu', 'localhost', 'armada.nu']\n\nDEBUG = False\n\n# The URL scheme is slightly different in a production environment\n# since we need to accomodate the KTH OpenID Connect integration.\nROOT_URLCONF = 'ais.production.urls'\n\n# Use KTH OpenID Connect for authentication\nINSTALLED_APPS += ('kth_login','raven.contrib.django.raven_compat',)\n\n# Use a full-fledged database instead of SQLite.\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': os.environ.get('DB_NAME', 'ais_dev'),\n 'USER': os.environ.get('DB_USERNAME', 'ais_dev'),\n 'PASSWORD': os.environ.get('DB_PASSWORD', 'ais_dev'),\n 'HOST': os.environ.get('DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DB_PORT', '5432'),\n }\n}\n\n# SENTRY\nRAVEN_CONFIG = {\n 'dsn': 'https://%s:%[email protected]/%s' % (os.environ.get('SENTRY_USERNAME'), os.environ.get('SENTRY_PASSWORD'), os.environ.get('SENTRY_APPID')),\n 'processors': ('raven.processors.Processor',)\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR', # To capture more than ERROR, change to WARNING, INFO, etc.\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\n# The system sends out system-related emails to these addresses.\nADMINS = MANAGERS = (\n ('System', '[email protected]'),\n)\n\n# This is for AUTHLIB package for interacting with KTH OpenID Connect\n# APPLICATION_ID is given from the 'secrets.py' file.\n# CLIENT_SECRET is given from the 'secrets.py' file.\nAUTHLIB_OAUTH_CLIENTS = {\n 'kth': {\n 'client_id': os.environ.get('APPLICATION_ID'),\n 'client_secret': os.environ.get('CLIENT_SECRET'),\n 'api_base_url': 'https://login.ug.kth.se/adfs/oauth2/',\n }\n}\nLOGOUT_REDIRECT_URL = '/'\n", "path": "ais/production/settings.py"}]} | 2,358 | 287 |
gh_patches_debug_19296 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1673 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pass `Accept` header in `contrib.utils.download`
I'm copying a comment here that I made in the [HEPData Zulip chat](https://hepdata.zulipchat.com/#narrow/stream/226203-pyhf/topic/DOIs/near/213610306) on 16th October 2020.
Regarding the issue (HEPData/hepdata#162) to mint DOIs for all local resource files attached to a submission, if we do eventually get around to addressing it, we would probably redirect the DOI to a landing page for the resource file, rather than to the resource file itself (e.g. the pyhf tarball). This would follow the DataCite [Best Practices for DOI Landing Pages](https://support.datacite.org/docs/landing-pages), e.g. "[DOIs should resolve to a landing page, not directly to the content](https://support.datacite.org/docs/landing-pages#dois-should-resolve-to-a-landing-page-not-directly-to-the-content)", which I'm currently breaking for the two manually minted DOIs. In the issue (HEPdata/hepdata#162) I mentioned the possibility of using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) to redirect to the resource file itself, but the linked page now says "Custom content types are no longer supported since January 1st, 2020". I thought maybe content negotiation could be used to return the `.tar.gz` file directly, but the intended purpose is to retrieve DOI metadata in different formats, not to provide the content itself. In anticipation of possible future changes, I'd recommend that you use the URL directly rather than the DOI in pyhf download scripts and documentation (e.g. revert #1109).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/contrib/utils.py`
Content:
```
1 """Helper utilities for common tasks."""
2
3 from urllib.parse import urlparse
4 import tarfile
5 from io import BytesIO
6 import logging
7 from pyhf import exceptions
8
9 log = logging.getLogger(__name__)
10
11 __all__ = ["download"]
12
13
14 def __dir__():
15 return __all__
16
17
18 try:
19 import requests
20
21 def download(archive_url, output_directory, force=False, compress=False):
22 """
23 Download the patchset archive from the remote URL and extract it in a
24 directory at the path given.
25
26 Example:
27
28 >>> from pyhf.contrib.utils import download
29 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods")
30 >>> import os
31 >>> sorted(os.listdir("1Lbb-likelihoods"))
32 ['BkgOnly.json', 'README.md', 'patchset.json']
33 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True)
34 >>> import glob
35 >>> glob.glob("1Lbb-likelihoods.tar.gz")
36 ['1Lbb-likelihoods.tar.gz']
37
38 Args:
39 archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.
40 output_directory (:obj:`str`): Name of the directory to unpack the archive into.
41 force (:obj:`bool`): Force download from non-approved host. Default is ``False``.
42 compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.
43
44 Raises:
45 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
46 """
47 if not force:
48 valid_hosts = ["www.hepdata.net", "doi.org"]
49 netloc = urlparse(archive_url).netloc
50 if netloc not in valid_hosts:
51 raise exceptions.InvalidArchiveHost(
52 f"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\n"
53 + "To download an archive from this host use the --force option."
54 )
55
56 with requests.get(archive_url) as response:
57 if compress:
58 with open(output_directory, "wb") as archive:
59 archive.write(response.content)
60 else:
61 with tarfile.open(
62 mode="r|gz", fileobj=BytesIO(response.content)
63 ) as archive:
64 archive.extractall(output_directory)
65
66
67 except ModuleNotFoundError:
68 log.error(
69 "\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download"
70 + "\nPlease install with: python -m pip install pyhf[contrib]\n",
71 exc_info=True,
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -53,7 +53,18 @@
+ "To download an archive from this host use the --force option."
)
- with requests.get(archive_url) as response:
+ # c.f. https://github.com/scikit-hep/pyhf/issues/1491
+ # > Use content negotiation at the landing page for the resource that
+ # > the DOI resolves to. DataCite content negotiation is forwarding all
+ # > requests with unknown content types to the URL registered in the
+ # > handle system.
+ # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/
+ # The HEPData landing page for the resource file can check if the Accept
+ # request HTTP header matches the content type of the resource file and
+ # return the content directly if so.
+ with requests.get(
+ archive_url, headers={"Accept": "application/x-tar"}
+ ) as response:
if compress:
with open(output_directory, "wb") as archive:
archive.write(response.content)
| {"golden_diff": "diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py\n--- a/src/pyhf/contrib/utils.py\n+++ b/src/pyhf/contrib/utils.py\n@@ -53,7 +53,18 @@\n + \"To download an archive from this host use the --force option.\"\n )\n \n- with requests.get(archive_url) as response:\n+ # c.f. https://github.com/scikit-hep/pyhf/issues/1491\n+ # > Use content negotiation at the landing page for the resource that\n+ # > the DOI resolves to. DataCite content negotiation is forwarding all\n+ # > requests with unknown content types to the URL registered in the\n+ # > handle system.\n+ # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/\n+ # The HEPData landing page for the resource file can check if the Accept\n+ # request HTTP header matches the content type of the resource file and\n+ # return the content directly if so.\n+ with requests.get(\n+ archive_url, headers={\"Accept\": \"application/x-tar\"}\n+ ) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n", "issue": "Pass `Accept` header in `contrib.utils.download`\nI'm copying a comment here that I made in the [HEPData Zulip chat](https://hepdata.zulipchat.com/#narrow/stream/226203-pyhf/topic/DOIs/near/213610306) on 16th October 2020.\r\n\r\nRegarding the issue (HEPData/hepdata#162) to mint DOIs for all local resource files attached to a submission, if we do eventually get around to addressing it, we would probably redirect the DOI to a landing page for the resource file, rather than to the resource file itself (e.g. the pyhf tarball). This would follow the DataCite [Best Practices for DOI Landing Pages](https://support.datacite.org/docs/landing-pages), e.g. \"[DOIs should resolve to a landing page, not directly to the content](https://support.datacite.org/docs/landing-pages#dois-should-resolve-to-a-landing-page-not-directly-to-the-content)\", which I'm currently breaking for the two manually minted DOIs. In the issue (HEPdata/hepdata#162) I mentioned the possibility of using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) to redirect to the resource file itself, but the linked page now says \"Custom content types are no longer supported since January 1st, 2020\". I thought maybe content negotiation could be used to return the `.tar.gz` file directly, but the intended purpose is to retrieve DOI metadata in different formats, not to provide the content itself. In anticipation of possible future changes, I'd recommend that you use the URL directly rather than the DOI in pyhf download scripts and documentation (e.g. revert #1109).\n", "before_files": [{"content": "\"\"\"Helper utilities for common tasks.\"\"\"\n\nfrom urllib.parse import urlparse\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom pyhf import exceptions\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"download\"]\n\n\ndef __dir__():\n return __all__\n\n\ntry:\n import requests\n\n def download(archive_url, output_directory, force=False, compress=False):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n >>> from pyhf.contrib.utils import download\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods\")\n >>> import os\n >>> sorted(os.listdir(\"1Lbb-likelihoods\"))\n ['BkgOnly.json', 'README.md', 'patchset.json']\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods.tar.gz\", compress=True)\n >>> import glob\n >>> glob.glob(\"1Lbb-likelihoods.tar.gz\")\n ['1Lbb-likelihoods.tar.gz']\n\n Args:\n archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.\n output_directory (:obj:`str`): Name of the directory to unpack the archive into.\n force (:obj:`bool`): Force download from non-approved host. Default is ``False``.\n compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n if not force:\n valid_hosts = [\"www.hepdata.net\", \"doi.org\"]\n netloc = urlparse(archive_url).netloc\n if netloc not in valid_hosts:\n raise exceptions.InvalidArchiveHost(\n f\"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\\n\"\n + \"To download an archive from this host use the --force option.\"\n )\n\n with requests.get(archive_url) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n else:\n with tarfile.open(\n mode=\"r|gz\", fileobj=BytesIO(response.content)\n ) as archive:\n archive.extractall(output_directory)\n\n\nexcept ModuleNotFoundError:\n log.error(\n \"\\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\",\n exc_info=True,\n )\n", "path": "src/pyhf/contrib/utils.py"}], "after_files": [{"content": "\"\"\"Helper utilities for common tasks.\"\"\"\n\nfrom urllib.parse import urlparse\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom pyhf import exceptions\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"download\"]\n\n\ndef __dir__():\n return __all__\n\n\ntry:\n import requests\n\n def download(archive_url, output_directory, force=False, compress=False):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n >>> from pyhf.contrib.utils import download\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods\")\n >>> import os\n >>> sorted(os.listdir(\"1Lbb-likelihoods\"))\n ['BkgOnly.json', 'README.md', 'patchset.json']\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods.tar.gz\", compress=True)\n >>> import glob\n >>> glob.glob(\"1Lbb-likelihoods.tar.gz\")\n ['1Lbb-likelihoods.tar.gz']\n\n Args:\n archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.\n output_directory (:obj:`str`): Name of the directory to unpack the archive into.\n force (:obj:`bool`): Force download from non-approved host. Default is ``False``.\n compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n if not force:\n valid_hosts = [\"www.hepdata.net\", \"doi.org\"]\n netloc = urlparse(archive_url).netloc\n if netloc not in valid_hosts:\n raise exceptions.InvalidArchiveHost(\n f\"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\\n\"\n + \"To download an archive from this host use the --force option.\"\n )\n\n # c.f. https://github.com/scikit-hep/pyhf/issues/1491\n # > Use content negotiation at the landing page for the resource that\n # > the DOI resolves to. DataCite content negotiation is forwarding all\n # > requests with unknown content types to the URL registered in the\n # > handle system.\n # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/\n # The HEPData landing page for the resource file can check if the Accept\n # request HTTP header matches the content type of the resource file and\n # return the content directly if so.\n with requests.get(\n archive_url, headers={\"Accept\": \"application/x-tar\"}\n ) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n else:\n with tarfile.open(\n mode=\"r|gz\", fileobj=BytesIO(response.content)\n ) as archive:\n archive.extractall(output_directory)\n\n\nexcept ModuleNotFoundError:\n log.error(\n \"\\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\",\n exc_info=True,\n )\n", "path": "src/pyhf/contrib/utils.py"}]} | 1,412 | 280 |
gh_patches_debug_23232 | rasdani/github-patches | git_diff | matrix-org__synapse-8567 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Increase in errors due to no active logging span
Starting on Oct 1 (which corresponds with a re-deploy of matrix.org) we've been seeing a lot more of the following error:
> There was no active span when trying to inject the span into a byte dict. Did you forget to start one or did a context slip?
https://sentry.matrix.org/sentry/synapse-matrixorg/issues/15795/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/metrics/background_process_metrics.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2018 New Vector Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import inspect
17 import logging
18 import threading
19 from functools import wraps
20 from typing import TYPE_CHECKING, Dict, Optional, Set
21
22 from prometheus_client.core import REGISTRY, Counter, Gauge
23
24 from twisted.internet import defer
25
26 from synapse.logging.context import LoggingContext, PreserveLoggingContext
27
28 if TYPE_CHECKING:
29 import resource
30
31
32 logger = logging.getLogger(__name__)
33
34
35 _background_process_start_count = Counter(
36 "synapse_background_process_start_count",
37 "Number of background processes started",
38 ["name"],
39 )
40
41 _background_process_in_flight_count = Gauge(
42 "synapse_background_process_in_flight_count",
43 "Number of background processes in flight",
44 labelnames=["name"],
45 )
46
47 # we set registry=None in all of these to stop them getting registered with
48 # the default registry. Instead we collect them all via the CustomCollector,
49 # which ensures that we can update them before they are collected.
50 #
51 _background_process_ru_utime = Counter(
52 "synapse_background_process_ru_utime_seconds",
53 "User CPU time used by background processes, in seconds",
54 ["name"],
55 registry=None,
56 )
57
58 _background_process_ru_stime = Counter(
59 "synapse_background_process_ru_stime_seconds",
60 "System CPU time used by background processes, in seconds",
61 ["name"],
62 registry=None,
63 )
64
65 _background_process_db_txn_count = Counter(
66 "synapse_background_process_db_txn_count",
67 "Number of database transactions done by background processes",
68 ["name"],
69 registry=None,
70 )
71
72 _background_process_db_txn_duration = Counter(
73 "synapse_background_process_db_txn_duration_seconds",
74 (
75 "Seconds spent by background processes waiting for database "
76 "transactions, excluding scheduling time"
77 ),
78 ["name"],
79 registry=None,
80 )
81
82 _background_process_db_sched_duration = Counter(
83 "synapse_background_process_db_sched_duration_seconds",
84 "Seconds spent by background processes waiting for database connections",
85 ["name"],
86 registry=None,
87 )
88
89 # map from description to a counter, so that we can name our logcontexts
90 # incrementally. (It actually duplicates _background_process_start_count, but
91 # it's much simpler to do so than to try to combine them.)
92 _background_process_counts = {} # type: Dict[str, int]
93
94 # Set of all running background processes that became active active since the
95 # last time metrics were scraped (i.e. background processes that performed some
96 # work since the last scrape.)
97 #
98 # We do it like this to handle the case where we have a large number of
99 # background processes stacking up behind a lock or linearizer, where we then
100 # only need to iterate over and update metrics for the process that have
101 # actually been active and can ignore the idle ones.
102 _background_processes_active_since_last_scrape = set() # type: Set[_BackgroundProcess]
103
104 # A lock that covers the above set and dict
105 _bg_metrics_lock = threading.Lock()
106
107
108 class _Collector:
109 """A custom metrics collector for the background process metrics.
110
111 Ensures that all of the metrics are up-to-date with any in-flight processes
112 before they are returned.
113 """
114
115 def collect(self):
116 global _background_processes_active_since_last_scrape
117
118 # We swap out the _background_processes set with an empty one so that
119 # we can safely iterate over the set without holding the lock.
120 with _bg_metrics_lock:
121 _background_processes_copy = _background_processes_active_since_last_scrape
122 _background_processes_active_since_last_scrape = set()
123
124 for process in _background_processes_copy:
125 process.update_metrics()
126
127 # now we need to run collect() over each of the static Counters, and
128 # yield each metric they return.
129 for m in (
130 _background_process_ru_utime,
131 _background_process_ru_stime,
132 _background_process_db_txn_count,
133 _background_process_db_txn_duration,
134 _background_process_db_sched_duration,
135 ):
136 for r in m.collect():
137 yield r
138
139
140 REGISTRY.register(_Collector())
141
142
143 class _BackgroundProcess:
144 def __init__(self, desc, ctx):
145 self.desc = desc
146 self._context = ctx
147 self._reported_stats = None
148
149 def update_metrics(self):
150 """Updates the metrics with values from this process."""
151 new_stats = self._context.get_resource_usage()
152 if self._reported_stats is None:
153 diff = new_stats
154 else:
155 diff = new_stats - self._reported_stats
156 self._reported_stats = new_stats
157
158 _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
159 _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
160 _background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
161 _background_process_db_txn_duration.labels(self.desc).inc(
162 diff.db_txn_duration_sec
163 )
164 _background_process_db_sched_duration.labels(self.desc).inc(
165 diff.db_sched_duration_sec
166 )
167
168
169 def run_as_background_process(desc: str, func, *args, **kwargs):
170 """Run the given function in its own logcontext, with resource metrics
171
172 This should be used to wrap processes which are fired off to run in the
173 background, instead of being associated with a particular request.
174
175 It returns a Deferred which completes when the function completes, but it doesn't
176 follow the synapse logcontext rules, which makes it appropriate for passing to
177 clock.looping_call and friends (or for firing-and-forgetting in the middle of a
178 normal synapse async function).
179
180 Args:
181 desc: a description for this background process type
182 func: a function, which may return a Deferred or a coroutine
183 args: positional args for func
184 kwargs: keyword args for func
185
186 Returns: Deferred which returns the result of func, but note that it does not
187 follow the synapse logcontext rules.
188 """
189
190 async def run():
191 with _bg_metrics_lock:
192 count = _background_process_counts.get(desc, 0)
193 _background_process_counts[desc] = count + 1
194
195 _background_process_start_count.labels(desc).inc()
196 _background_process_in_flight_count.labels(desc).inc()
197
198 with BackgroundProcessLoggingContext(desc) as context:
199 context.request = "%s-%i" % (desc, count)
200
201 try:
202 result = func(*args, **kwargs)
203
204 if inspect.isawaitable(result):
205 result = await result
206
207 return result
208 except Exception:
209 logger.exception(
210 "Background process '%s' threw an exception", desc,
211 )
212 finally:
213 _background_process_in_flight_count.labels(desc).dec()
214
215 with PreserveLoggingContext():
216 # Note that we return a Deferred here so that it can be used in a
217 # looping_call and other places that expect a Deferred.
218 return defer.ensureDeferred(run())
219
220
221 def wrap_as_background_process(desc):
222 """Decorator that wraps a function that gets called as a background
223 process.
224
225 Equivalent of calling the function with `run_as_background_process`
226 """
227
228 def wrap_as_background_process_inner(func):
229 @wraps(func)
230 def wrap_as_background_process_inner_2(*args, **kwargs):
231 return run_as_background_process(desc, func, *args, **kwargs)
232
233 return wrap_as_background_process_inner_2
234
235 return wrap_as_background_process_inner
236
237
238 class BackgroundProcessLoggingContext(LoggingContext):
239 """A logging context that tracks in flight metrics for background
240 processes.
241 """
242
243 __slots__ = ["_proc"]
244
245 def __init__(self, name: str):
246 super().__init__(name)
247
248 self._proc = _BackgroundProcess(name, self)
249
250 def start(self, rusage: "Optional[resource._RUsage]"):
251 """Log context has started running (again).
252 """
253
254 super().start(rusage)
255
256 # We've become active again so we make sure we're in the list of active
257 # procs. (Note that "start" here means we've become active, as opposed
258 # to starting for the first time.)
259 with _bg_metrics_lock:
260 _background_processes_active_since_last_scrape.add(self._proc)
261
262 def __exit__(self, type, value, traceback) -> None:
263 """Log context has finished.
264 """
265
266 super().__exit__(type, value, traceback)
267
268 # The background process has finished. We explictly remove and manually
269 # update the metrics here so that if nothing is scraping metrics the set
270 # doesn't infinitely grow.
271 with _bg_metrics_lock:
272 _background_processes_active_since_last_scrape.discard(self._proc)
273
274 self._proc.update_metrics()
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -24,6 +24,7 @@
from twisted.internet import defer
from synapse.logging.context import LoggingContext, PreserveLoggingContext
+from synapse.logging.opentracing import start_active_span
if TYPE_CHECKING:
import resource
@@ -197,14 +198,14 @@
with BackgroundProcessLoggingContext(desc) as context:
context.request = "%s-%i" % (desc, count)
-
try:
- result = func(*args, **kwargs)
+ with start_active_span(desc, tags={"request_id": context.request}):
+ result = func(*args, **kwargs)
- if inspect.isawaitable(result):
- result = await result
+ if inspect.isawaitable(result):
+ result = await result
- return result
+ return result
except Exception:
logger.exception(
"Background process '%s' threw an exception", desc,
| {"golden_diff": "diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py\n--- a/synapse/metrics/background_process_metrics.py\n+++ b/synapse/metrics/background_process_metrics.py\n@@ -24,6 +24,7 @@\n from twisted.internet import defer\n \n from synapse.logging.context import LoggingContext, PreserveLoggingContext\n+from synapse.logging.opentracing import start_active_span\n \n if TYPE_CHECKING:\n import resource\n@@ -197,14 +198,14 @@\n \n with BackgroundProcessLoggingContext(desc) as context:\n context.request = \"%s-%i\" % (desc, count)\n-\n try:\n- result = func(*args, **kwargs)\n+ with start_active_span(desc, tags={\"request_id\": context.request}):\n+ result = func(*args, **kwargs)\n \n- if inspect.isawaitable(result):\n- result = await result\n+ if inspect.isawaitable(result):\n+ result = await result\n \n- return result\n+ return result\n except Exception:\n logger.exception(\n \"Background process '%s' threw an exception\", desc,\n", "issue": "Increase in errors due to no active logging span\nStarting on Oct 1 (which corresponds with a re-deploy of matrix.org) we've been seeing a lot more of the following error:\r\n\r\n> There was no active span when trying to inject the span into a byte dict. Did you forget to start one or did a context slip?\r\n\r\nhttps://sentry.matrix.org/sentry/synapse-matrixorg/issues/15795/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport logging\nimport threading\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Dict, Optional, Set\n\nfrom prometheus_client.core import REGISTRY, Counter, Gauge\n\nfrom twisted.internet import defer\n\nfrom synapse.logging.context import LoggingContext, PreserveLoggingContext\n\nif TYPE_CHECKING:\n import resource\n\n\nlogger = logging.getLogger(__name__)\n\n\n_background_process_start_count = Counter(\n \"synapse_background_process_start_count\",\n \"Number of background processes started\",\n [\"name\"],\n)\n\n_background_process_in_flight_count = Gauge(\n \"synapse_background_process_in_flight_count\",\n \"Number of background processes in flight\",\n labelnames=[\"name\"],\n)\n\n# we set registry=None in all of these to stop them getting registered with\n# the default registry. Instead we collect them all via the CustomCollector,\n# which ensures that we can update them before they are collected.\n#\n_background_process_ru_utime = Counter(\n \"synapse_background_process_ru_utime_seconds\",\n \"User CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_ru_stime = Counter(\n \"synapse_background_process_ru_stime_seconds\",\n \"System CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_count = Counter(\n \"synapse_background_process_db_txn_count\",\n \"Number of database transactions done by background processes\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_duration = Counter(\n \"synapse_background_process_db_txn_duration_seconds\",\n (\n \"Seconds spent by background processes waiting for database \"\n \"transactions, excluding scheduling time\"\n ),\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_sched_duration = Counter(\n \"synapse_background_process_db_sched_duration_seconds\",\n \"Seconds spent by background processes waiting for database connections\",\n [\"name\"],\n registry=None,\n)\n\n# map from description to a counter, so that we can name our logcontexts\n# incrementally. (It actually duplicates _background_process_start_count, but\n# it's much simpler to do so than to try to combine them.)\n_background_process_counts = {} # type: Dict[str, int]\n\n# Set of all running background processes that became active active since the\n# last time metrics were scraped (i.e. background processes that performed some\n# work since the last scrape.)\n#\n# We do it like this to handle the case where we have a large number of\n# background processes stacking up behind a lock or linearizer, where we then\n# only need to iterate over and update metrics for the process that have\n# actually been active and can ignore the idle ones.\n_background_processes_active_since_last_scrape = set() # type: Set[_BackgroundProcess]\n\n# A lock that covers the above set and dict\n_bg_metrics_lock = threading.Lock()\n\n\nclass _Collector:\n \"\"\"A custom metrics collector for the background process metrics.\n\n Ensures that all of the metrics are up-to-date with any in-flight processes\n before they are returned.\n \"\"\"\n\n def collect(self):\n global _background_processes_active_since_last_scrape\n\n # We swap out the _background_processes set with an empty one so that\n # we can safely iterate over the set without holding the lock.\n with _bg_metrics_lock:\n _background_processes_copy = _background_processes_active_since_last_scrape\n _background_processes_active_since_last_scrape = set()\n\n for process in _background_processes_copy:\n process.update_metrics()\n\n # now we need to run collect() over each of the static Counters, and\n # yield each metric they return.\n for m in (\n _background_process_ru_utime,\n _background_process_ru_stime,\n _background_process_db_txn_count,\n _background_process_db_txn_duration,\n _background_process_db_sched_duration,\n ):\n for r in m.collect():\n yield r\n\n\nREGISTRY.register(_Collector())\n\n\nclass _BackgroundProcess:\n def __init__(self, desc, ctx):\n self.desc = desc\n self._context = ctx\n self._reported_stats = None\n\n def update_metrics(self):\n \"\"\"Updates the metrics with values from this process.\"\"\"\n new_stats = self._context.get_resource_usage()\n if self._reported_stats is None:\n diff = new_stats\n else:\n diff = new_stats - self._reported_stats\n self._reported_stats = new_stats\n\n _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)\n _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)\n _background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)\n _background_process_db_txn_duration.labels(self.desc).inc(\n diff.db_txn_duration_sec\n )\n _background_process_db_sched_duration.labels(self.desc).inc(\n diff.db_sched_duration_sec\n )\n\n\ndef run_as_background_process(desc: str, func, *args, **kwargs):\n \"\"\"Run the given function in its own logcontext, with resource metrics\n\n This should be used to wrap processes which are fired off to run in the\n background, instead of being associated with a particular request.\n\n It returns a Deferred which completes when the function completes, but it doesn't\n follow the synapse logcontext rules, which makes it appropriate for passing to\n clock.looping_call and friends (or for firing-and-forgetting in the middle of a\n normal synapse async function).\n\n Args:\n desc: a description for this background process type\n func: a function, which may return a Deferred or a coroutine\n args: positional args for func\n kwargs: keyword args for func\n\n Returns: Deferred which returns the result of func, but note that it does not\n follow the synapse logcontext rules.\n \"\"\"\n\n async def run():\n with _bg_metrics_lock:\n count = _background_process_counts.get(desc, 0)\n _background_process_counts[desc] = count + 1\n\n _background_process_start_count.labels(desc).inc()\n _background_process_in_flight_count.labels(desc).inc()\n\n with BackgroundProcessLoggingContext(desc) as context:\n context.request = \"%s-%i\" % (desc, count)\n\n try:\n result = func(*args, **kwargs)\n\n if inspect.isawaitable(result):\n result = await result\n\n return result\n except Exception:\n logger.exception(\n \"Background process '%s' threw an exception\", desc,\n )\n finally:\n _background_process_in_flight_count.labels(desc).dec()\n\n with PreserveLoggingContext():\n # Note that we return a Deferred here so that it can be used in a\n # looping_call and other places that expect a Deferred.\n return defer.ensureDeferred(run())\n\n\ndef wrap_as_background_process(desc):\n \"\"\"Decorator that wraps a function that gets called as a background\n process.\n\n Equivalent of calling the function with `run_as_background_process`\n \"\"\"\n\n def wrap_as_background_process_inner(func):\n @wraps(func)\n def wrap_as_background_process_inner_2(*args, **kwargs):\n return run_as_background_process(desc, func, *args, **kwargs)\n\n return wrap_as_background_process_inner_2\n\n return wrap_as_background_process_inner\n\n\nclass BackgroundProcessLoggingContext(LoggingContext):\n \"\"\"A logging context that tracks in flight metrics for background\n processes.\n \"\"\"\n\n __slots__ = [\"_proc\"]\n\n def __init__(self, name: str):\n super().__init__(name)\n\n self._proc = _BackgroundProcess(name, self)\n\n def start(self, rusage: \"Optional[resource._RUsage]\"):\n \"\"\"Log context has started running (again).\n \"\"\"\n\n super().start(rusage)\n\n # We've become active again so we make sure we're in the list of active\n # procs. (Note that \"start\" here means we've become active, as opposed\n # to starting for the first time.)\n with _bg_metrics_lock:\n _background_processes_active_since_last_scrape.add(self._proc)\n\n def __exit__(self, type, value, traceback) -> None:\n \"\"\"Log context has finished.\n \"\"\"\n\n super().__exit__(type, value, traceback)\n\n # The background process has finished. We explictly remove and manually\n # update the metrics here so that if nothing is scraping metrics the set\n # doesn't infinitely grow.\n with _bg_metrics_lock:\n _background_processes_active_since_last_scrape.discard(self._proc)\n\n self._proc.update_metrics()\n", "path": "synapse/metrics/background_process_metrics.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport logging\nimport threading\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Dict, Optional, Set\n\nfrom prometheus_client.core import REGISTRY, Counter, Gauge\n\nfrom twisted.internet import defer\n\nfrom synapse.logging.context import LoggingContext, PreserveLoggingContext\nfrom synapse.logging.opentracing import start_active_span\n\nif TYPE_CHECKING:\n import resource\n\n\nlogger = logging.getLogger(__name__)\n\n\n_background_process_start_count = Counter(\n \"synapse_background_process_start_count\",\n \"Number of background processes started\",\n [\"name\"],\n)\n\n_background_process_in_flight_count = Gauge(\n \"synapse_background_process_in_flight_count\",\n \"Number of background processes in flight\",\n labelnames=[\"name\"],\n)\n\n# we set registry=None in all of these to stop them getting registered with\n# the default registry. Instead we collect them all via the CustomCollector,\n# which ensures that we can update them before they are collected.\n#\n_background_process_ru_utime = Counter(\n \"synapse_background_process_ru_utime_seconds\",\n \"User CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_ru_stime = Counter(\n \"synapse_background_process_ru_stime_seconds\",\n \"System CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_count = Counter(\n \"synapse_background_process_db_txn_count\",\n \"Number of database transactions done by background processes\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_duration = Counter(\n \"synapse_background_process_db_txn_duration_seconds\",\n (\n \"Seconds spent by background processes waiting for database \"\n \"transactions, excluding scheduling time\"\n ),\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_sched_duration = Counter(\n \"synapse_background_process_db_sched_duration_seconds\",\n \"Seconds spent by background processes waiting for database connections\",\n [\"name\"],\n registry=None,\n)\n\n# map from description to a counter, so that we can name our logcontexts\n# incrementally. (It actually duplicates _background_process_start_count, but\n# it's much simpler to do so than to try to combine them.)\n_background_process_counts = {} # type: Dict[str, int]\n\n# Set of all running background processes that became active active since the\n# last time metrics were scraped (i.e. background processes that performed some\n# work since the last scrape.)\n#\n# We do it like this to handle the case where we have a large number of\n# background processes stacking up behind a lock or linearizer, where we then\n# only need to iterate over and update metrics for the process that have\n# actually been active and can ignore the idle ones.\n_background_processes_active_since_last_scrape = set() # type: Set[_BackgroundProcess]\n\n# A lock that covers the above set and dict\n_bg_metrics_lock = threading.Lock()\n\n\nclass _Collector:\n \"\"\"A custom metrics collector for the background process metrics.\n\n Ensures that all of the metrics are up-to-date with any in-flight processes\n before they are returned.\n \"\"\"\n\n def collect(self):\n global _background_processes_active_since_last_scrape\n\n # We swap out the _background_processes set with an empty one so that\n # we can safely iterate over the set without holding the lock.\n with _bg_metrics_lock:\n _background_processes_copy = _background_processes_active_since_last_scrape\n _background_processes_active_since_last_scrape = set()\n\n for process in _background_processes_copy:\n process.update_metrics()\n\n # now we need to run collect() over each of the static Counters, and\n # yield each metric they return.\n for m in (\n _background_process_ru_utime,\n _background_process_ru_stime,\n _background_process_db_txn_count,\n _background_process_db_txn_duration,\n _background_process_db_sched_duration,\n ):\n for r in m.collect():\n yield r\n\n\nREGISTRY.register(_Collector())\n\n\nclass _BackgroundProcess:\n def __init__(self, desc, ctx):\n self.desc = desc\n self._context = ctx\n self._reported_stats = None\n\n def update_metrics(self):\n \"\"\"Updates the metrics with values from this process.\"\"\"\n new_stats = self._context.get_resource_usage()\n if self._reported_stats is None:\n diff = new_stats\n else:\n diff = new_stats - self._reported_stats\n self._reported_stats = new_stats\n\n _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)\n _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)\n _background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)\n _background_process_db_txn_duration.labels(self.desc).inc(\n diff.db_txn_duration_sec\n )\n _background_process_db_sched_duration.labels(self.desc).inc(\n diff.db_sched_duration_sec\n )\n\n\ndef run_as_background_process(desc: str, func, *args, **kwargs):\n \"\"\"Run the given function in its own logcontext, with resource metrics\n\n This should be used to wrap processes which are fired off to run in the\n background, instead of being associated with a particular request.\n\n It returns a Deferred which completes when the function completes, but it doesn't\n follow the synapse logcontext rules, which makes it appropriate for passing to\n clock.looping_call and friends (or for firing-and-forgetting in the middle of a\n normal synapse async function).\n\n Args:\n desc: a description for this background process type\n func: a function, which may return a Deferred or a coroutine\n args: positional args for func\n kwargs: keyword args for func\n\n Returns: Deferred which returns the result of func, but note that it does not\n follow the synapse logcontext rules.\n \"\"\"\n\n async def run():\n with _bg_metrics_lock:\n count = _background_process_counts.get(desc, 0)\n _background_process_counts[desc] = count + 1\n\n _background_process_start_count.labels(desc).inc()\n _background_process_in_flight_count.labels(desc).inc()\n\n with BackgroundProcessLoggingContext(desc) as context:\n context.request = \"%s-%i\" % (desc, count)\n try:\n with start_active_span(desc, tags={\"request_id\": context.request}):\n result = func(*args, **kwargs)\n\n if inspect.isawaitable(result):\n result = await result\n\n return result\n except Exception:\n logger.exception(\n \"Background process '%s' threw an exception\", desc,\n )\n finally:\n _background_process_in_flight_count.labels(desc).dec()\n\n with PreserveLoggingContext():\n # Note that we return a Deferred here so that it can be used in a\n # looping_call and other places that expect a Deferred.\n return defer.ensureDeferred(run())\n\n\ndef wrap_as_background_process(desc):\n \"\"\"Decorator that wraps a function that gets called as a background\n process.\n\n Equivalent of calling the function with `run_as_background_process`\n \"\"\"\n\n def wrap_as_background_process_inner(func):\n @wraps(func)\n def wrap_as_background_process_inner_2(*args, **kwargs):\n return run_as_background_process(desc, func, *args, **kwargs)\n\n return wrap_as_background_process_inner_2\n\n return wrap_as_background_process_inner\n\n\nclass BackgroundProcessLoggingContext(LoggingContext):\n \"\"\"A logging context that tracks in flight metrics for background\n processes.\n \"\"\"\n\n __slots__ = [\"_proc\"]\n\n def __init__(self, name: str):\n super().__init__(name)\n\n self._proc = _BackgroundProcess(name, self)\n\n def start(self, rusage: \"Optional[resource._RUsage]\"):\n \"\"\"Log context has started running (again).\n \"\"\"\n\n super().start(rusage)\n\n # We've become active again so we make sure we're in the list of active\n # procs. (Note that \"start\" here means we've become active, as opposed\n # to starting for the first time.)\n with _bg_metrics_lock:\n _background_processes_active_since_last_scrape.add(self._proc)\n\n def __exit__(self, type, value, traceback) -> None:\n \"\"\"Log context has finished.\n \"\"\"\n\n super().__exit__(type, value, traceback)\n\n # The background process has finished. We explictly remove and manually\n # update the metrics here so that if nothing is scraping metrics the set\n # doesn't infinitely grow.\n with _bg_metrics_lock:\n _background_processes_active_since_last_scrape.discard(self._proc)\n\n self._proc.update_metrics()\n", "path": "synapse/metrics/background_process_metrics.py"}]} | 3,074 | 251 |
gh_patches_debug_23886 | rasdani/github-patches | git_diff | Kinto__kinto-1927 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kinto not able to start - Regression due to jsonschema Draft7Validator
#1808 introduced a regression, because the Draft7Validator is not currently released by the jsonschema package (they introduced it in master but it's not released yet)
Solution is either to depend on the master branch of jsonschema (not recommended I guess) or revert this change back until a new version is issued.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/schema_validation.py`
Content:
```
1 import colander
2 from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate
3 from pyramid.settings import asbool
4
5 from kinto.core import utils
6 from kinto.core.errors import raise_invalid
7 from kinto.views import object_exists_or_404
8
9
10 class JSONSchemaMapping(colander.SchemaNode):
11 def schema_type(self, **kw):
12 return colander.Mapping(unknown="preserve")
13
14 def deserialize(self, cstruct=colander.null):
15 # Start by deserializing a simple mapping.
16 validated = super().deserialize(cstruct)
17
18 # In case it is optional in parent schema.
19 if not validated or validated in (colander.null, colander.drop):
20 return validated
21 try:
22 check_schema(validated)
23 except ValidationError as e:
24 self.raise_invalid(e.message)
25 return validated
26
27
28 def check_schema(data):
29 try:
30 Draft7Validator.check_schema(data)
31 except SchemaError as e:
32 message = e.path.pop() + e.message
33 raise ValidationError(message)
34
35
36 def validate_schema(data, schema, ignore_fields=[]):
37 required_fields = [f for f in schema.get("required", []) if f not in ignore_fields]
38 # jsonschema doesn't accept 'required': [] yet.
39 # See https://github.com/Julian/jsonschema/issues/337.
40 # In the meantime, strip out 'required' if no other fields are required.
41 if required_fields:
42 schema = {**schema, "required": required_fields}
43 else:
44 schema = {f: v for f, v in schema.items() if f != "required"}
45
46 data = {f: v for f, v in data.items() if f not in ignore_fields}
47
48 try:
49 validate(data, schema)
50 except ValidationError as e:
51 if e.path:
52 field = e.path[-1]
53 elif e.validator_value:
54 field = e.validator_value[-1]
55 else:
56 field = e.schema_path[-1]
57 e.field = field
58 raise e
59 # Raise an error here if a reference in the schema doesn't resolve.
60 # jsonschema doesn't provide schema validation checking upon creation yet,
61 # it must be validated against data.
62 # See https://github.com/Julian/jsonschema/issues/399
63 # For future support https://github.com/Julian/jsonschema/issues/346.
64 except RefResolutionError as e:
65 raise e
66
67
68 def validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):
69 """Lookup in the parent objects if a schema was defined for this resource.
70
71 If the schema validation feature is enabled, if a schema is/are defined, and if the
72 data does not validate it/them, then it raises a 400 exception.
73 """
74 settings = request.registry.settings
75 schema_validation = "experimental_collection_schema_validation"
76 # If disabled from settings, do nothing.
77 if not asbool(settings.get(schema_validation)):
78 return
79
80 bucket_id = request.matchdict["bucket_id"]
81 bucket_uri = utils.instance_uri(request, "bucket", id=bucket_id)
82 buckets = request.bound_data.setdefault("buckets", {})
83 if bucket_uri not in buckets:
84 # Unknown yet, fetch from storage.
85 bucket = object_exists_or_404(
86 request, collection_id="bucket", parent_id="", object_id=bucket_id
87 )
88 buckets[bucket_uri] = bucket
89
90 # Let's see if the bucket defines a schema for this resource.
91 metadata_field = "{}:schema".format(resource_name)
92 bucket = buckets[bucket_uri]
93 if metadata_field not in bucket:
94 return
95
96 # Validate or fail with 400.
97 schema = bucket[metadata_field]
98 try:
99 validate_schema(data, schema, ignore_fields=ignore_fields)
100 except ValidationError as e:
101 raise_invalid(request, name=e.field, description=e.message)
102 except RefResolutionError as e:
103 raise_invalid(request, name="schema", description=str(e))
104
```
Path: `setup.py`
Content:
```
1 import codecs
2 import os
3 from setuptools import setup, find_packages
4
5 # abspath here because setup.py may be __main__, in which case
6 # __file__ is not guaranteed to be absolute
7 here = os.path.abspath(os.path.dirname(__file__))
8
9
10 def read_file(filename):
11 """Open a related file and return its content."""
12 with codecs.open(os.path.join(here, filename), encoding="utf-8") as f:
13 content = f.read()
14 return content
15
16
17 README = read_file("README.rst")
18 CHANGELOG = read_file("CHANGELOG.rst")
19 CONTRIBUTORS = read_file("CONTRIBUTORS.rst")
20
21 REQUIREMENTS = [
22 "bcrypt",
23 "colander >= 1.4.0",
24 "cornice",
25 "cornice_swagger >= 0.5.1",
26 "dockerflow",
27 "jsonschema",
28 "jsonpatch",
29 "logging-color-formatter >= 1.0.1", # Message interpolations.
30 "python-dateutil",
31 "pyramid >= 1.9.1, < 2.0",
32 "pyramid_multiauth >= 0.8", # User on policy selected event.
33 "transaction",
34 # pyramid_tm changed the location of their tween in 2.x and one of
35 # our tests fails on 2.0.
36 "pyramid_tm >= 2.1",
37 "requests",
38 "waitress",
39 "ujson >= 1.35",
40 ]
41
42 POSTGRESQL_REQUIRES = ["SQLAlchemy", "psycopg2 > 2.5", "zope.sqlalchemy"]
43
44 REDIS_REQUIRES = ["kinto_redis"]
45
46 MEMCACHED_REQUIRES = ["python-memcached"]
47
48 SETUP_REQUIRES = ["pytest-runner"]
49
50 TEST_REQUIREMENTS = ["bravado_core", "pytest", "WebTest"]
51
52 DEPENDENCY_LINKS = []
53
54 MONITORING_REQUIRES = ["raven", "statsd", "newrelic", "werkzeug"]
55
56 ENTRY_POINTS = {
57 "paste.app_factory": ["main = kinto:main"],
58 "console_scripts": ["kinto = kinto.__main__:main"],
59 }
60
61
62 setup(
63 name="kinto",
64 version="11.2.0",
65 description="Kinto Web Service - Store, Sync, Share, and Self-Host.",
66 long_description="{}\n\n{}\n\n{}".format(README, CHANGELOG, CONTRIBUTORS),
67 license="Apache License (2.0)",
68 classifiers=[
69 "Programming Language :: Python",
70 "Programming Language :: Python :: 3",
71 "Programming Language :: Python :: 3.5",
72 "Programming Language :: Python :: 3.6",
73 "Programming Language :: Python :: Implementation :: CPython",
74 "Topic :: Internet :: WWW/HTTP",
75 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
76 "License :: OSI Approved :: Apache Software License",
77 ],
78 keywords="web sync json storage services",
79 author="Mozilla Services",
80 author_email="[email protected]",
81 url="https://github.com/Kinto/kinto",
82 packages=find_packages(),
83 package_data={"": ["*.rst", "*.py", "*.yaml"]},
84 include_package_data=True,
85 zip_safe=False,
86 setup_requires=SETUP_REQUIRES,
87 tests_require=TEST_REQUIREMENTS,
88 install_requires=REQUIREMENTS,
89 extras_require={
90 "redis": REDIS_REQUIRES,
91 "memcached": MEMCACHED_REQUIRES,
92 "postgresql": POSTGRESQL_REQUIRES,
93 "monitoring": MONITORING_REQUIRES,
94 },
95 test_suite="tests",
96 dependency_links=DEPENDENCY_LINKS,
97 entry_points=ENTRY_POINTS,
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py
--- a/kinto/schema_validation.py
+++ b/kinto/schema_validation.py
@@ -1,5 +1,11 @@
import colander
-from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate
+from jsonschema import ValidationError, SchemaError, RefResolutionError, validate
+
+try: # pragma: no cover
+ from jsonschema import Draft7Validator as DraftValidator
+except ImportError:
+ from jsonschema import Draft4Validator as DraftValidator
+
from pyramid.settings import asbool
from kinto.core import utils
@@ -27,7 +33,7 @@
def check_schema(data):
try:
- Draft7Validator.check_schema(data)
+ DraftValidator.check_schema(data)
except SchemaError as e:
message = e.path.pop() + e.message
raise ValidationError(message)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
setup(
name="kinto",
- version="11.2.0",
+ version="11.2.1",
description="Kinto Web Service - Store, Sync, Share, and Self-Host.",
long_description="{}\n\n{}\n\n{}".format(README, CHANGELOG, CONTRIBUTORS),
license="Apache License (2.0)",
| {"golden_diff": "diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py\n--- a/kinto/schema_validation.py\n+++ b/kinto/schema_validation.py\n@@ -1,5 +1,11 @@\n import colander\n-from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate\n+from jsonschema import ValidationError, SchemaError, RefResolutionError, validate\n+\n+try: # pragma: no cover\n+ from jsonschema import Draft7Validator as DraftValidator\n+except ImportError:\n+ from jsonschema import Draft4Validator as DraftValidator\n+\n from pyramid.settings import asbool\n \n from kinto.core import utils\n@@ -27,7 +33,7 @@\n \n def check_schema(data):\n try:\n- Draft7Validator.check_schema(data)\n+ DraftValidator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,7 @@\n \n setup(\n name=\"kinto\",\n- version=\"11.2.0\",\n+ version=\"11.2.1\",\n description=\"Kinto Web Service - Store, Sync, Share, and Self-Host.\",\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license=\"Apache License (2.0)\",\n", "issue": "Kinto not able to start - Regression due to jsonschema Draft7Validator\n#1808 introduced a regression, because the Draft7Validator is not currently released by the jsonschema package (they introduced it in master but it's not released yet)\r\n\r\nSolution is either to depend on the master branch of jsonschema (not recommended I guess) or revert this change back until a new version is issued.\n", "before_files": [{"content": "import colander\nfrom jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate\nfrom pyramid.settings import asbool\n\nfrom kinto.core import utils\nfrom kinto.core.errors import raise_invalid\nfrom kinto.views import object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown=\"preserve\")\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super().deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n try:\n check_schema(validated)\n except ValidationError as e:\n self.raise_invalid(e.message)\n return validated\n\n\ndef check_schema(data):\n try:\n Draft7Validator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\n\n\ndef validate_schema(data, schema, ignore_fields=[]):\n required_fields = [f for f in schema.get(\"required\", []) if f not in ignore_fields]\n # jsonschema doesn't accept 'required': [] yet.\n # See https://github.com/Julian/jsonschema/issues/337.\n # In the meantime, strip out 'required' if no other fields are required.\n if required_fields:\n schema = {**schema, \"required\": required_fields}\n else:\n schema = {f: v for f, v in schema.items() if f != \"required\"}\n\n data = {f: v for f, v in data.items() if f not in ignore_fields}\n\n try:\n validate(data, schema)\n except ValidationError as e:\n if e.path:\n field = e.path[-1]\n elif e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n e.field = field\n raise e\n # Raise an error here if a reference in the schema doesn't resolve.\n # jsonschema doesn't provide schema validation checking upon creation yet,\n # it must be validated against data.\n # See https://github.com/Julian/jsonschema/issues/399\n # For future support https://github.com/Julian/jsonschema/issues/346.\n except RefResolutionError as e:\n raise e\n\n\ndef validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):\n \"\"\"Lookup in the parent objects if a schema was defined for this resource.\n\n If the schema validation feature is enabled, if a schema is/are defined, and if the\n data does not validate it/them, then it raises a 400 exception.\n \"\"\"\n settings = request.registry.settings\n schema_validation = \"experimental_collection_schema_validation\"\n # If disabled from settings, do nothing.\n if not asbool(settings.get(schema_validation)):\n return\n\n bucket_id = request.matchdict[\"bucket_id\"]\n bucket_uri = utils.instance_uri(request, \"bucket\", id=bucket_id)\n buckets = request.bound_data.setdefault(\"buckets\", {})\n if bucket_uri not in buckets:\n # Unknown yet, fetch from storage.\n bucket = object_exists_or_404(\n request, collection_id=\"bucket\", parent_id=\"\", object_id=bucket_id\n )\n buckets[bucket_uri] = bucket\n\n # Let's see if the bucket defines a schema for this resource.\n metadata_field = \"{}:schema\".format(resource_name)\n bucket = buckets[bucket_uri]\n if metadata_field not in bucket:\n return\n\n # Validate or fail with 400.\n schema = bucket[metadata_field]\n try:\n validate_schema(data, schema, ignore_fields=ignore_fields)\n except ValidationError as e:\n raise_invalid(request, name=e.field, description=e.message)\n except RefResolutionError as e:\n raise_invalid(request, name=\"schema\", description=str(e))\n", "path": "kinto/schema_validation.py"}, {"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\n# abspath here because setup.py may be __main__, in which case\n# __file__ is not guaranteed to be absolute\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding=\"utf-8\") as f:\n content = f.read()\n return content\n\n\nREADME = read_file(\"README.rst\")\nCHANGELOG = read_file(\"CHANGELOG.rst\")\nCONTRIBUTORS = read_file(\"CONTRIBUTORS.rst\")\n\nREQUIREMENTS = [\n \"bcrypt\",\n \"colander >= 1.4.0\",\n \"cornice\",\n \"cornice_swagger >= 0.5.1\",\n \"dockerflow\",\n \"jsonschema\",\n \"jsonpatch\",\n \"logging-color-formatter >= 1.0.1\", # Message interpolations.\n \"python-dateutil\",\n \"pyramid >= 1.9.1, < 2.0\",\n \"pyramid_multiauth >= 0.8\", # User on policy selected event.\n \"transaction\",\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n \"pyramid_tm >= 2.1\",\n \"requests\",\n \"waitress\",\n \"ujson >= 1.35\",\n]\n\nPOSTGRESQL_REQUIRES = [\"SQLAlchemy\", \"psycopg2 > 2.5\", \"zope.sqlalchemy\"]\n\nREDIS_REQUIRES = [\"kinto_redis\"]\n\nMEMCACHED_REQUIRES = [\"python-memcached\"]\n\nSETUP_REQUIRES = [\"pytest-runner\"]\n\nTEST_REQUIREMENTS = [\"bravado_core\", \"pytest\", \"WebTest\"]\n\nDEPENDENCY_LINKS = []\n\nMONITORING_REQUIRES = [\"raven\", \"statsd\", \"newrelic\", \"werkzeug\"]\n\nENTRY_POINTS = {\n \"paste.app_factory\": [\"main = kinto:main\"],\n \"console_scripts\": [\"kinto = kinto.__main__:main\"],\n}\n\n\nsetup(\n name=\"kinto\",\n version=\"11.2.0\",\n description=\"Kinto Web Service - Store, Sync, Share, and Self-Host.\",\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license=\"Apache License (2.0)\",\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n keywords=\"web sync json storage services\",\n author=\"Mozilla Services\",\n author_email=\"[email protected]\",\n url=\"https://github.com/Kinto/kinto\",\n packages=find_packages(),\n package_data={\"\": [\"*.rst\", \"*.py\", \"*.yaml\"]},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n \"redis\": REDIS_REQUIRES,\n \"memcached\": MEMCACHED_REQUIRES,\n \"postgresql\": POSTGRESQL_REQUIRES,\n \"monitoring\": MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import colander\nfrom jsonschema import ValidationError, SchemaError, RefResolutionError, validate\n\ntry: # pragma: no cover\n from jsonschema import Draft7Validator as DraftValidator\nexcept ImportError:\n from jsonschema import Draft4Validator as DraftValidator\n\nfrom pyramid.settings import asbool\n\nfrom kinto.core import utils\nfrom kinto.core.errors import raise_invalid\nfrom kinto.views import object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown=\"preserve\")\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super().deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n try:\n check_schema(validated)\n except ValidationError as e:\n self.raise_invalid(e.message)\n return validated\n\n\ndef check_schema(data):\n try:\n DraftValidator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\n\n\ndef validate_schema(data, schema, ignore_fields=[]):\n required_fields = [f for f in schema.get(\"required\", []) if f not in ignore_fields]\n # jsonschema doesn't accept 'required': [] yet.\n # See https://github.com/Julian/jsonschema/issues/337.\n # In the meantime, strip out 'required' if no other fields are required.\n if required_fields:\n schema = {**schema, \"required\": required_fields}\n else:\n schema = {f: v for f, v in schema.items() if f != \"required\"}\n\n data = {f: v for f, v in data.items() if f not in ignore_fields}\n\n try:\n validate(data, schema)\n except ValidationError as e:\n if e.path:\n field = e.path[-1]\n elif e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n e.field = field\n raise e\n # Raise an error here if a reference in the schema doesn't resolve.\n # jsonschema doesn't provide schema validation checking upon creation yet,\n # it must be validated against data.\n # See https://github.com/Julian/jsonschema/issues/399\n # For future support https://github.com/Julian/jsonschema/issues/346.\n except RefResolutionError as e:\n raise e\n\n\ndef validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):\n \"\"\"Lookup in the parent objects if a schema was defined for this resource.\n\n If the schema validation feature is enabled, if a schema is/are defined, and if the\n data does not validate it/them, then it raises a 400 exception.\n \"\"\"\n settings = request.registry.settings\n schema_validation = \"experimental_collection_schema_validation\"\n # If disabled from settings, do nothing.\n if not asbool(settings.get(schema_validation)):\n return\n\n bucket_id = request.matchdict[\"bucket_id\"]\n bucket_uri = utils.instance_uri(request, \"bucket\", id=bucket_id)\n buckets = request.bound_data.setdefault(\"buckets\", {})\n if bucket_uri not in buckets:\n # Unknown yet, fetch from storage.\n bucket = object_exists_or_404(\n request, collection_id=\"bucket\", parent_id=\"\", object_id=bucket_id\n )\n buckets[bucket_uri] = bucket\n\n # Let's see if the bucket defines a schema for this resource.\n metadata_field = \"{}:schema\".format(resource_name)\n bucket = buckets[bucket_uri]\n if metadata_field not in bucket:\n return\n\n # Validate or fail with 400.\n schema = bucket[metadata_field]\n try:\n validate_schema(data, schema, ignore_fields=ignore_fields)\n except ValidationError as e:\n raise_invalid(request, name=e.field, description=e.message)\n except RefResolutionError as e:\n raise_invalid(request, name=\"schema\", description=str(e))\n", "path": "kinto/schema_validation.py"}, {"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\n# abspath here because setup.py may be __main__, in which case\n# __file__ is not guaranteed to be absolute\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding=\"utf-8\") as f:\n content = f.read()\n return content\n\n\nREADME = read_file(\"README.rst\")\nCHANGELOG = read_file(\"CHANGELOG.rst\")\nCONTRIBUTORS = read_file(\"CONTRIBUTORS.rst\")\n\nREQUIREMENTS = [\n \"bcrypt\",\n \"colander >= 1.4.0\",\n \"cornice\",\n \"cornice_swagger >= 0.5.1\",\n \"dockerflow\",\n \"jsonschema\",\n \"jsonpatch\",\n \"logging-color-formatter >= 1.0.1\", # Message interpolations.\n \"python-dateutil\",\n \"pyramid >= 1.9.1, < 2.0\",\n \"pyramid_multiauth >= 0.8\", # User on policy selected event.\n \"transaction\",\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n \"pyramid_tm >= 2.1\",\n \"requests\",\n \"waitress\",\n \"ujson >= 1.35\",\n]\n\nPOSTGRESQL_REQUIRES = [\"SQLAlchemy\", \"psycopg2 > 2.5\", \"zope.sqlalchemy\"]\n\nREDIS_REQUIRES = [\"kinto_redis\"]\n\nMEMCACHED_REQUIRES = [\"python-memcached\"]\n\nSETUP_REQUIRES = [\"pytest-runner\"]\n\nTEST_REQUIREMENTS = [\"bravado_core\", \"pytest\", \"WebTest\"]\n\nDEPENDENCY_LINKS = []\n\nMONITORING_REQUIRES = [\"raven\", \"statsd\", \"newrelic\", \"werkzeug\"]\n\nENTRY_POINTS = {\n \"paste.app_factory\": [\"main = kinto:main\"],\n \"console_scripts\": [\"kinto = kinto.__main__:main\"],\n}\n\n\nsetup(\n name=\"kinto\",\n version=\"11.2.1\",\n description=\"Kinto Web Service - Store, Sync, Share, and Self-Host.\",\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license=\"Apache License (2.0)\",\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n keywords=\"web sync json storage services\",\n author=\"Mozilla Services\",\n author_email=\"[email protected]\",\n url=\"https://github.com/Kinto/kinto\",\n packages=find_packages(),\n package_data={\"\": [\"*.rst\", \"*.py\", \"*.yaml\"]},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n \"redis\": REDIS_REQUIRES,\n \"memcached\": MEMCACHED_REQUIRES,\n \"postgresql\": POSTGRESQL_REQUIRES,\n \"monitoring\": MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS,\n)\n", "path": "setup.py"}]} | 2,410 | 311 |
gh_patches_debug_50213 | rasdani/github-patches | git_diff | pex-tool__pex-1590 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.64
On the docket:
+ [x] Pex does not support mac universal2 wheels #1587
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.63"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.63"
+__version__ = "2.1.64"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.63\"\n+__version__ = \"2.1.64\"\n", "issue": "Release 2.1.64\nOn the docket:\r\n+ [x] Pex does not support mac universal2 wheels #1587 \r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.63\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.64\"\n", "path": "pex/version.py"}]} | 342 | 96 |
gh_patches_debug_19859 | rasdani/github-patches | git_diff | sublimelsp__LSP-1265 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support document symbol sorting
Currently, when we call `LSP: Document Symbols`, we get document symbols displayed in reverse order from the actual code. Like this:

This is because the LSP server returns document symbols in reverse order (in this example, the server is `rust-analyzer`).
Language Server Protocol doesn't specify the order of document symbols returned from the server, so how symbols are displayed should probably be decided and handled on the client-side.
Looking at VSCode, users have options to sort symbols in several ways (by position, by name, by category).

It would be best to provide sort options like VSCode, but for now, to sort them by something (I think by position is good) should be implemented before providing sort options.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/symbols.py`
Content:
```
1 from .core.protocol import Request, Range
2 from .core.registry import LspTextCommand
3 from .core.typing import Any, List, Optional, Tuple, Dict, Generator
4 from .core.views import location_to_encoded_filename
5 from .core.views import range_to_region
6 from .core.views import SYMBOL_KINDS
7 from .core.views import text_document_identifier
8 from contextlib import contextmanager
9 import os
10 import sublime
11 import sublime_plugin
12
13
14 def unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:
15 if 1 <= kind <= len(SYMBOL_KINDS):
16 return SYMBOL_KINDS[kind - 1]
17 return sublime.KIND_ID_AMBIGUOUS, "?", "???", "comment"
18
19
20 def format_symbol_kind(kind: int) -> str:
21 if 1 <= kind <= len(SYMBOL_KINDS):
22 return SYMBOL_KINDS[kind - 1][2]
23 return str(kind)
24
25
26 def get_symbol_scope_from_lsp_kind(kind: int) -> str:
27 if 1 <= kind <= len(SYMBOL_KINDS):
28 return SYMBOL_KINDS[kind - 1][3]
29 return 'comment'
30
31
32 @contextmanager
33 def _additional_name(names: List[str], name: str) -> Generator[None, None, None]:
34 names.append(name)
35 yield
36 names.pop(-1)
37
38
39 class LspSelectionClearCommand(sublime_plugin.TextCommand):
40 """
41 Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async
42 context we need to have dedicated commands for this.
43
44 https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388
45 """
46
47 def run(self, _: sublime.Edit) -> None:
48 self.view.sel().clear()
49
50
51 class LspSelectionAddCommand(sublime_plugin.TextCommand):
52
53 def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
54 for region in regions:
55 self.view.sel().add(sublime.Region(*region))
56
57
58 class LspSelectionSetCommand(sublime_plugin.TextCommand):
59
60 def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:
61 self.view.sel().clear()
62 for region in regions:
63 self.view.sel().add(sublime.Region(*region))
64
65
66 class LspDocumentSymbolsCommand(LspTextCommand):
67
68 capability = 'documentSymbolProvider'
69 REGIONS_KEY = 'lsp_document_symbols'
70
71 def __init__(self, view: sublime.View) -> None:
72 super().__init__(view)
73 self.old_regions = [] # type: List[sublime.Region]
74 self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]
75 self.is_first_selection = False
76
77 def run(self, edit: sublime.Edit) -> None:
78 session = self.best_session(self.capability)
79 if session:
80 session.send_request(
81 Request.documentSymbols({"textDocument": text_document_identifier(self.view)}), self.handle_response)
82
83 def handle_response(self, response: Any) -> None:
84 window = self.view.window()
85 if window and isinstance(response, list) and len(response) > 0:
86 self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]
87 self.is_first_selection = True
88 window.show_quick_panel(
89 self.process_symbols(response),
90 self.on_symbol_selected,
91 sublime.KEEP_OPEN_ON_FOCUS_LOST,
92 0,
93 self.on_highlighted)
94 self.view.run_command("lsp_selection_clear")
95
96 def region(self, index: int) -> sublime.Region:
97 return self.regions[index][0]
98
99 def selection_region(self, index: int) -> Optional[sublime.Region]:
100 return self.regions[index][1]
101
102 def scope(self, index: int) -> str:
103 return self.regions[index][2]
104
105 def on_symbol_selected(self, index: int) -> None:
106 if index == -1:
107 if len(self.old_regions) > 0:
108 self.view.run_command("lsp_selection_add", {"regions": [(r.a, r.b) for r in self.old_regions]})
109 self.view.show_at_center(self.old_regions[0].begin())
110 else:
111 region = self.selection_region(index) or self.region(index)
112 self.view.run_command("lsp_selection_add", {"regions": [(region.a, region.a)]})
113 self.view.show_at_center(region.a)
114 self.view.erase_regions(self.REGIONS_KEY)
115 self.old_regions.clear()
116 self.regions.clear()
117
118 def on_highlighted(self, index: int) -> None:
119 if self.is_first_selection:
120 self.is_first_selection = False
121 return
122 region = self.region(index)
123 self.view.show_at_center(region.a)
124 self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)
125
126 def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
127 self.regions.clear()
128 if 'selectionRange' in items[0]:
129 return self.process_document_symbols(items)
130 else:
131 return self.process_symbol_informations(items)
132
133 def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
134 quick_panel_items = [] # type: List[sublime.QuickPanelItem]
135 names = [] # type: List[str]
136 for item in items:
137 self.process_document_symbol_recursive(quick_panel_items, item, names)
138 return quick_panel_items
139
140 def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],
141 names: List[str]) -> None:
142 lsp_kind = item["kind"]
143 self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),
144 range_to_region(Range.from_lsp(item['selectionRange']), self.view),
145 get_symbol_scope_from_lsp_kind(lsp_kind)))
146 name = item['name']
147 with _additional_name(names, name):
148 st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)
149 formatted_names = " > ".join(names)
150 st_details = item.get("detail") or ""
151 if st_details:
152 st_details = "{} | {}".format(st_details, formatted_names)
153 else:
154 st_details = formatted_names
155 quick_panel_items.append(
156 sublime.QuickPanelItem(
157 trigger=name,
158 details=st_details,
159 annotation=st_display_type,
160 kind=(st_kind, st_icon, st_display_type)))
161 children = item.get('children') or []
162 for child in children:
163 self.process_document_symbol_recursive(quick_panel_items, child, names)
164
165 def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
166 quick_panel_items = [] # type: List[sublime.QuickPanelItem]
167 for item in items:
168 lsp_kind = item['kind']
169 self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),
170 None, get_symbol_scope_from_lsp_kind(lsp_kind)))
171 container = item.get("containerName")
172 st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)
173 quick_panel_items.append(
174 sublime.QuickPanelItem(
175 trigger=item["name"],
176 details=container or "",
177 annotation=st_display_type,
178 kind=(st_kind, st_icon, st_display_type)))
179 return quick_panel_items
180
181
182 class SymbolQueryInput(sublime_plugin.TextInputHandler):
183
184 def validate(self, txt: str) -> bool:
185 return txt != ""
186
187 def placeholder(self) -> str:
188 return "Symbol"
189
190
191 class LspWorkspaceSymbolsCommand(LspTextCommand):
192
193 capability = 'workspaceSymbolProvider'
194
195 def input(self, _args: Any) -> sublime_plugin.TextInputHandler:
196 return SymbolQueryInput()
197
198 def run(self, edit: sublime.Edit, symbol_query_input: str = "") -> None:
199 if symbol_query_input:
200 session = self.best_session(self.capability)
201 if session:
202 self.view.set_status("lsp_workspace_symbols", "Searching for '{}'...".format(symbol_query_input))
203 request = Request.workspaceSymbol({"query": symbol_query_input})
204 session.send_request(request, lambda r: self._handle_response(
205 symbol_query_input, r), self._handle_error)
206
207 def _format(self, s: Dict[str, Any]) -> str:
208 file_name = os.path.basename(s['location']['uri'])
209 symbol_kind = format_symbol_kind(s["kind"])
210 name = "{} ({}) - {} -- {}".format(s['name'], symbol_kind, s.get('containerName', ""), file_name)
211 return name
212
213 def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:
214 if index != -1:
215 symbol = symbols[index]
216 window = self.view.window()
217 if window:
218 window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)
219
220 def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:
221 self.view.erase_status("lsp_workspace_symbols")
222 if response:
223 matches = response
224 window = self.view.window()
225 if window:
226 window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))
227 else:
228 sublime.message_dialog("No matches found for query string: '{}'".format(query))
229
230 def _handle_error(self, error: Dict[str, Any]) -> None:
231 self.view.erase_status("lsp_workspace_symbols")
232 reason = error.get("message", "none provided by server :(")
233 msg = "command 'workspace/symbol' failed. Reason: {}".format(reason)
234 sublime.error_message(msg)
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/symbols.py b/plugin/symbols.py
--- a/plugin/symbols.py
+++ b/plugin/symbols.py
@@ -125,10 +125,16 @@
def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
self.regions.clear()
+ panel_items = []
if 'selectionRange' in items[0]:
- return self.process_document_symbols(items)
+ panel_items = self.process_document_symbols(items)
else:
- return self.process_symbol_informations(items)
+ panel_items = self.process_symbol_informations(items)
+ # Sort both lists in sync according to the range's begin point.
+ sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))
+ sorted_regions, sorted_panel_items = sorted_results
+ self.regions = list(sorted_regions)
+ return list(sorted_panel_items)
def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:
quick_panel_items = [] # type: List[sublime.QuickPanelItem]
| {"golden_diff": "diff --git a/plugin/symbols.py b/plugin/symbols.py\n--- a/plugin/symbols.py\n+++ b/plugin/symbols.py\n@@ -125,10 +125,16 @@\n \n def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n self.regions.clear()\n+ panel_items = []\n if 'selectionRange' in items[0]:\n- return self.process_document_symbols(items)\n+ panel_items = self.process_document_symbols(items)\n else:\n- return self.process_symbol_informations(items)\n+ panel_items = self.process_symbol_informations(items)\n+ # Sort both lists in sync according to the range's begin point.\n+ sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))\n+ sorted_regions, sorted_panel_items = sorted_results\n+ self.regions = list(sorted_regions)\n+ return list(sorted_panel_items)\n \n def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n", "issue": "Support document symbol sorting\nCurrently, when we call `LSP: Document Symbols`, we get document symbols displayed in reverse order from the actual code. Like this:\r\n\r\n\r\n\r\nThis is because the LSP server returns document symbols in reverse order (in this example, the server is `rust-analyzer`).\r\nLanguage Server Protocol doesn't specify the order of document symbols returned from the server, so how symbols are displayed should probably be decided and handled on the client-side.\r\n\r\nLooking at VSCode, users have options to sort symbols in several ways (by position, by name, by category).\r\n\r\n\r\n\r\nIt would be best to provide sort options like VSCode, but for now, to sort them by something (I think by position is good) should be implemented before providing sort options.\n", "before_files": [{"content": "from .core.protocol import Request, Range\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, List, Optional, Tuple, Dict, Generator\nfrom .core.views import location_to_encoded_filename\nfrom .core.views import range_to_region\nfrom .core.views import SYMBOL_KINDS\nfrom .core.views import text_document_identifier\nfrom contextlib import contextmanager\nimport os\nimport sublime\nimport sublime_plugin\n\n\ndef unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1]\n return sublime.KIND_ID_AMBIGUOUS, \"?\", \"???\", \"comment\"\n\n\ndef format_symbol_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][2]\n return str(kind)\n\n\ndef get_symbol_scope_from_lsp_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][3]\n return 'comment'\n\n\n@contextmanager\ndef _additional_name(names: List[str], name: str) -> Generator[None, None, None]:\n names.append(name)\n yield\n names.pop(-1)\n\n\nclass LspSelectionClearCommand(sublime_plugin.TextCommand):\n \"\"\"\n Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async\n context we need to have dedicated commands for this.\n\n https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388\n \"\"\"\n\n def run(self, _: sublime.Edit) -> None:\n self.view.sel().clear()\n\n\nclass LspSelectionAddCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspSelectionSetCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n self.view.sel().clear()\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n\n capability = 'documentSymbolProvider'\n REGIONS_KEY = 'lsp_document_symbols'\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.old_regions = [] # type: List[sublime.Region]\n self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]\n self.is_first_selection = False\n\n def run(self, edit: sublime.Edit) -> None:\n session = self.best_session(self.capability)\n if session:\n session.send_request(\n Request.documentSymbols({\"textDocument\": text_document_identifier(self.view)}), self.handle_response)\n\n def handle_response(self, response: Any) -> None:\n window = self.view.window()\n if window and isinstance(response, list) and len(response) > 0:\n self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]\n self.is_first_selection = True\n window.show_quick_panel(\n self.process_symbols(response),\n self.on_symbol_selected,\n sublime.KEEP_OPEN_ON_FOCUS_LOST,\n 0,\n self.on_highlighted)\n self.view.run_command(\"lsp_selection_clear\")\n\n def region(self, index: int) -> sublime.Region:\n return self.regions[index][0]\n\n def selection_region(self, index: int) -> Optional[sublime.Region]:\n return self.regions[index][1]\n\n def scope(self, index: int) -> str:\n return self.regions[index][2]\n\n def on_symbol_selected(self, index: int) -> None:\n if index == -1:\n if len(self.old_regions) > 0:\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(r.a, r.b) for r in self.old_regions]})\n self.view.show_at_center(self.old_regions[0].begin())\n else:\n region = self.selection_region(index) or self.region(index)\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(region.a, region.a)]})\n self.view.show_at_center(region.a)\n self.view.erase_regions(self.REGIONS_KEY)\n self.old_regions.clear()\n self.regions.clear()\n\n def on_highlighted(self, index: int) -> None:\n if self.is_first_selection:\n self.is_first_selection = False\n return\n region = self.region(index)\n self.view.show_at_center(region.a)\n self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)\n\n def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n self.regions.clear()\n if 'selectionRange' in items[0]:\n return self.process_document_symbols(items)\n else:\n return self.process_symbol_informations(items)\n\n def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n names = [] # type: List[str]\n for item in items:\n self.process_document_symbol_recursive(quick_panel_items, item, names)\n return quick_panel_items\n\n def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],\n names: List[str]) -> None:\n lsp_kind = item[\"kind\"]\n self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),\n range_to_region(Range.from_lsp(item['selectionRange']), self.view),\n get_symbol_scope_from_lsp_kind(lsp_kind)))\n name = item['name']\n with _additional_name(names, name):\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n formatted_names = \" > \".join(names)\n st_details = item.get(\"detail\") or \"\"\n if st_details:\n st_details = \"{} | {}\".format(st_details, formatted_names)\n else:\n st_details = formatted_names\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=name,\n details=st_details,\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n children = item.get('children') or []\n for child in children:\n self.process_document_symbol_recursive(quick_panel_items, child, names)\n\n def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n for item in items:\n lsp_kind = item['kind']\n self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),\n None, get_symbol_scope_from_lsp_kind(lsp_kind)))\n container = item.get(\"containerName\")\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=item[\"name\"],\n details=container or \"\",\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n return quick_panel_items\n\n\nclass SymbolQueryInput(sublime_plugin.TextInputHandler):\n\n def validate(self, txt: str) -> bool:\n return txt != \"\"\n\n def placeholder(self) -> str:\n return \"Symbol\"\n\n\nclass LspWorkspaceSymbolsCommand(LspTextCommand):\n\n capability = 'workspaceSymbolProvider'\n\n def input(self, _args: Any) -> sublime_plugin.TextInputHandler:\n return SymbolQueryInput()\n\n def run(self, edit: sublime.Edit, symbol_query_input: str = \"\") -> None:\n if symbol_query_input:\n session = self.best_session(self.capability)\n if session:\n self.view.set_status(\"lsp_workspace_symbols\", \"Searching for '{}'...\".format(symbol_query_input))\n request = Request.workspaceSymbol({\"query\": symbol_query_input})\n session.send_request(request, lambda r: self._handle_response(\n symbol_query_input, r), self._handle_error)\n\n def _format(self, s: Dict[str, Any]) -> str:\n file_name = os.path.basename(s['location']['uri'])\n symbol_kind = format_symbol_kind(s[\"kind\"])\n name = \"{} ({}) - {} -- {}\".format(s['name'], symbol_kind, s.get('containerName', \"\"), file_name)\n return name\n\n def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:\n if index != -1:\n symbol = symbols[index]\n window = self.view.window()\n if window:\n window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)\n\n def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n if response:\n matches = response\n window = self.view.window()\n if window:\n window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))\n else:\n sublime.message_dialog(\"No matches found for query string: '{}'\".format(query))\n\n def _handle_error(self, error: Dict[str, Any]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n reason = error.get(\"message\", \"none provided by server :(\")\n msg = \"command 'workspace/symbol' failed. Reason: {}\".format(reason)\n sublime.error_message(msg)\n", "path": "plugin/symbols.py"}], "after_files": [{"content": "from .core.protocol import Request, Range\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, List, Optional, Tuple, Dict, Generator\nfrom .core.views import location_to_encoded_filename\nfrom .core.views import range_to_region\nfrom .core.views import SYMBOL_KINDS\nfrom .core.views import text_document_identifier\nfrom contextlib import contextmanager\nimport os\nimport sublime\nimport sublime_plugin\n\n\ndef unpack_lsp_kind(kind: int) -> Tuple[int, str, str, str]:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1]\n return sublime.KIND_ID_AMBIGUOUS, \"?\", \"???\", \"comment\"\n\n\ndef format_symbol_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][2]\n return str(kind)\n\n\ndef get_symbol_scope_from_lsp_kind(kind: int) -> str:\n if 1 <= kind <= len(SYMBOL_KINDS):\n return SYMBOL_KINDS[kind - 1][3]\n return 'comment'\n\n\n@contextmanager\ndef _additional_name(names: List[str], name: str) -> Generator[None, None, None]:\n names.append(name)\n yield\n names.pop(-1)\n\n\nclass LspSelectionClearCommand(sublime_plugin.TextCommand):\n \"\"\"\n Selections may not be modified outside the run method of a text command. Thus, to allow modification in an async\n context we need to have dedicated commands for this.\n\n https://github.com/sublimehq/sublime_text/issues/485#issuecomment-337480388\n \"\"\"\n\n def run(self, _: sublime.Edit) -> None:\n self.view.sel().clear()\n\n\nclass LspSelectionAddCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspSelectionSetCommand(sublime_plugin.TextCommand):\n\n def run(self, _: sublime.Edit, regions: List[Tuple[int, int]]) -> None:\n self.view.sel().clear()\n for region in regions:\n self.view.sel().add(sublime.Region(*region))\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n\n capability = 'documentSymbolProvider'\n REGIONS_KEY = 'lsp_document_symbols'\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.old_regions = [] # type: List[sublime.Region]\n self.regions = [] # type: List[Tuple[sublime.Region, Optional[sublime.Region], str]]\n self.is_first_selection = False\n\n def run(self, edit: sublime.Edit) -> None:\n session = self.best_session(self.capability)\n if session:\n session.send_request(\n Request.documentSymbols({\"textDocument\": text_document_identifier(self.view)}), self.handle_response)\n\n def handle_response(self, response: Any) -> None:\n window = self.view.window()\n if window and isinstance(response, list) and len(response) > 0:\n self.old_regions = [sublime.Region(r.a, r.b) for r in self.view.sel()]\n self.is_first_selection = True\n window.show_quick_panel(\n self.process_symbols(response),\n self.on_symbol_selected,\n sublime.KEEP_OPEN_ON_FOCUS_LOST,\n 0,\n self.on_highlighted)\n self.view.run_command(\"lsp_selection_clear\")\n\n def region(self, index: int) -> sublime.Region:\n return self.regions[index][0]\n\n def selection_region(self, index: int) -> Optional[sublime.Region]:\n return self.regions[index][1]\n\n def scope(self, index: int) -> str:\n return self.regions[index][2]\n\n def on_symbol_selected(self, index: int) -> None:\n if index == -1:\n if len(self.old_regions) > 0:\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(r.a, r.b) for r in self.old_regions]})\n self.view.show_at_center(self.old_regions[0].begin())\n else:\n region = self.selection_region(index) or self.region(index)\n self.view.run_command(\"lsp_selection_add\", {\"regions\": [(region.a, region.a)]})\n self.view.show_at_center(region.a)\n self.view.erase_regions(self.REGIONS_KEY)\n self.old_regions.clear()\n self.regions.clear()\n\n def on_highlighted(self, index: int) -> None:\n if self.is_first_selection:\n self.is_first_selection = False\n return\n region = self.region(index)\n self.view.show_at_center(region.a)\n self.view.add_regions(self.REGIONS_KEY, [region], self.scope(index), '', sublime.DRAW_NO_FILL)\n\n def process_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n self.regions.clear()\n panel_items = []\n if 'selectionRange' in items[0]:\n panel_items = self.process_document_symbols(items)\n else:\n panel_items = self.process_symbol_informations(items)\n # Sort both lists in sync according to the range's begin point.\n sorted_results = zip(*sorted(zip(self.regions, panel_items), key=lambda item: item[0][0].begin()))\n sorted_regions, sorted_panel_items = sorted_results\n self.regions = list(sorted_regions)\n return list(sorted_panel_items)\n\n def process_document_symbols(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n names = [] # type: List[str]\n for item in items:\n self.process_document_symbol_recursive(quick_panel_items, item, names)\n return quick_panel_items\n\n def process_document_symbol_recursive(self, quick_panel_items: List[sublime.QuickPanelItem], item: Dict[str, Any],\n names: List[str]) -> None:\n lsp_kind = item[\"kind\"]\n self.regions.append((range_to_region(Range.from_lsp(item['range']), self.view),\n range_to_region(Range.from_lsp(item['selectionRange']), self.view),\n get_symbol_scope_from_lsp_kind(lsp_kind)))\n name = item['name']\n with _additional_name(names, name):\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n formatted_names = \" > \".join(names)\n st_details = item.get(\"detail\") or \"\"\n if st_details:\n st_details = \"{} | {}\".format(st_details, formatted_names)\n else:\n st_details = formatted_names\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=name,\n details=st_details,\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n children = item.get('children') or []\n for child in children:\n self.process_document_symbol_recursive(quick_panel_items, child, names)\n\n def process_symbol_informations(self, items: List[Dict[str, Any]]) -> List[sublime.QuickPanelItem]:\n quick_panel_items = [] # type: List[sublime.QuickPanelItem]\n for item in items:\n lsp_kind = item['kind']\n self.regions.append((range_to_region(Range.from_lsp(item['location']['range']), self.view),\n None, get_symbol_scope_from_lsp_kind(lsp_kind)))\n container = item.get(\"containerName\")\n st_kind, st_icon, st_display_type, _ = unpack_lsp_kind(lsp_kind)\n quick_panel_items.append(\n sublime.QuickPanelItem(\n trigger=item[\"name\"],\n details=container or \"\",\n annotation=st_display_type,\n kind=(st_kind, st_icon, st_display_type)))\n return quick_panel_items\n\n\nclass SymbolQueryInput(sublime_plugin.TextInputHandler):\n\n def validate(self, txt: str) -> bool:\n return txt != \"\"\n\n def placeholder(self) -> str:\n return \"Symbol\"\n\n\nclass LspWorkspaceSymbolsCommand(LspTextCommand):\n\n capability = 'workspaceSymbolProvider'\n\n def input(self, _args: Any) -> sublime_plugin.TextInputHandler:\n return SymbolQueryInput()\n\n def run(self, edit: sublime.Edit, symbol_query_input: str = \"\") -> None:\n if symbol_query_input:\n session = self.best_session(self.capability)\n if session:\n self.view.set_status(\"lsp_workspace_symbols\", \"Searching for '{}'...\".format(symbol_query_input))\n request = Request.workspaceSymbol({\"query\": symbol_query_input})\n session.send_request(request, lambda r: self._handle_response(\n symbol_query_input, r), self._handle_error)\n\n def _format(self, s: Dict[str, Any]) -> str:\n file_name = os.path.basename(s['location']['uri'])\n symbol_kind = format_symbol_kind(s[\"kind\"])\n name = \"{} ({}) - {} -- {}\".format(s['name'], symbol_kind, s.get('containerName', \"\"), file_name)\n return name\n\n def _open_file(self, symbols: List[Dict[str, Any]], index: int) -> None:\n if index != -1:\n symbol = symbols[index]\n window = self.view.window()\n if window:\n window.open_file(location_to_encoded_filename(symbol['location']), sublime.ENCODED_POSITION)\n\n def _handle_response(self, query: str, response: Optional[List[Dict[str, Any]]]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n if response:\n matches = response\n window = self.view.window()\n if window:\n window.show_quick_panel(list(map(self._format, matches)), lambda i: self._open_file(matches, i))\n else:\n sublime.message_dialog(\"No matches found for query string: '{}'\".format(query))\n\n def _handle_error(self, error: Dict[str, Any]) -> None:\n self.view.erase_status(\"lsp_workspace_symbols\")\n reason = error.get(\"message\", \"none provided by server :(\")\n msg = \"command 'workspace/symbol' failed. Reason: {}\".format(reason)\n sublime.error_message(msg)\n", "path": "plugin/symbols.py"}]} | 3,286 | 257 |
gh_patches_debug_8532 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2994 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom reports removed from 'Reports' general view
Remove custom reports from 'Reports' general view, i.e. Plan Finland or EUTF reports are not visible for other partners.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/report.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db.models import Q
8 from rest_framework.decorators import api_view
9 from rest_framework.response import Response
10
11 from akvo.rsr.models import Report, ReportFormat
12 from ..serializers import ReportSerializer, ReportFormatSerializer
13
14
15 @api_view(['GET'])
16 def reports(request):
17 """
18 A view for displaying all report information, sorted by title.
19 """
20
21 user = request.user
22 is_admin = user.is_active and (user.is_superuser or user.is_admin)
23 reports = Report.objects.all()
24 if not is_admin:
25 # Show only those reports that the user is allowed to see
26 reports = reports.filter(
27 Q(organisations=None) | Q(organisations__in=user.approved_organisations())
28 ).distinct()
29
30 # FIXME: Use a viewset instead?
31 return Response({
32 'count': reports.count(),
33 'results': [ReportSerializer(r).data for r in reports.order_by('title')],
34 })
35
36
37 @api_view(['GET'])
38 def report_formats(request):
39 """
40 A view for displaying all report format information.
41 """
42 return Response({
43 'count': ReportFormat.objects.all().count(),
44 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],
45 })
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py
--- a/akvo/rest/views/report.py
+++ b/akvo/rest/views/report.py
@@ -23,8 +23,9 @@
reports = Report.objects.all()
if not is_admin:
# Show only those reports that the user is allowed to see
+ approved_orgs = user.approved_organisations() if not user.is_anonymous() else []
reports = reports.filter(
- Q(organisations=None) | Q(organisations__in=user.approved_organisations())
+ Q(organisations=None) | Q(organisations__in=approved_orgs)
).distinct()
# FIXME: Use a viewset instead?
| {"golden_diff": "diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py\n--- a/akvo/rest/views/report.py\n+++ b/akvo/rest/views/report.py\n@@ -23,8 +23,9 @@\n reports = Report.objects.all()\n if not is_admin:\n # Show only those reports that the user is allowed to see\n+ approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n- Q(organisations=None) | Q(organisations__in=user.approved_organisations())\n+ Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n \n # FIXME: Use a viewset instead?\n", "issue": "Custom reports removed from 'Reports' general view\nRemove custom reports from 'Reports' general view, i.e. Plan Finland or EUTF reports are not visible for other partners. \r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\n\n\n@api_view(['GET'])\ndef reports(request):\n \"\"\"\n A view for displaying all report information, sorted by title.\n \"\"\"\n\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n reports = Report.objects.all()\n if not is_admin:\n # Show only those reports that the user is allowed to see\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=user.approved_organisations())\n ).distinct()\n\n # FIXME: Use a viewset instead?\n return Response({\n 'count': reports.count(),\n 'results': [ReportSerializer(r).data for r in reports.order_by('title')],\n })\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n", "path": "akvo/rest/views/report.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\n\n\n@api_view(['GET'])\ndef reports(request):\n \"\"\"\n A view for displaying all report information, sorted by title.\n \"\"\"\n\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n reports = Report.objects.all()\n if not is_admin:\n # Show only those reports that the user is allowed to see\n approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n\n # FIXME: Use a viewset instead?\n return Response({\n 'count': reports.count(),\n 'results': [ReportSerializer(r).data for r in reports.order_by('title')],\n })\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n", "path": "akvo/rest/views/report.py"}]} | 781 | 166 |
gh_patches_debug_18194 | rasdani/github-patches | git_diff | quantumlib__Cirq-5060 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clifford optimize either should support more general pauli phases or make assert
In the code to merge Clifford operations there is code that only works if a pauli string coefficient is $\pm 1$. If not it raises a NotImplementError, lines that are not tested :(
My suspicion here is that at this point the phase can only be these two values, but someone will need to better understand this code and the associated TODO.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/contrib/paulistring/clifford_optimize.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Tuple, cast
16
17 from cirq import ops, circuits
18 from cirq.contrib.paulistring.convert_gate_set import converted_gate_set
19
20
21 def clifford_optimized_circuit(circuit: circuits.Circuit, atol: float = 1e-8) -> circuits.Circuit:
22 # Convert to a circuit with SingleQubitCliffordGates,
23 # CZs and other ignored gates
24 c_cliff = converted_gate_set(circuit, no_clifford_gates=False, atol=atol)
25
26 all_ops = list(c_cliff.all_operations())
27
28 def find_merge_point(
29 start_i: int,
30 string_op: ops.PauliStringPhasor,
31 stop_at_cz: bool,
32 ) -> Tuple[int, ops.PauliStringPhasor, int]:
33 STOP = 0
34 CONTINUE = 1
35 SKIP = 2
36
37 def continue_condition(
38 op: ops.Operation, current_string: ops.PauliStringPhasor, is_first: bool
39 ) -> int:
40 if isinstance(op.gate, ops.SingleQubitCliffordGate):
41 return CONTINUE if len(current_string.pauli_string) != 1 else STOP
42 if isinstance(op.gate, ops.CZPowGate):
43 return STOP if stop_at_cz else CONTINUE
44 if (
45 isinstance(op, ops.PauliStringPhasor)
46 and len(op.qubits) == 1
47 and (op.pauli_string[op.qubits[0]] == current_string.pauli_string[op.qubits[0]])
48 ):
49 return SKIP
50 return STOP
51
52 modified_op = string_op
53 furthest_op = string_op
54 furthest_i = start_i + 1
55 num_passed_over = 0
56 for i in range(start_i + 1, len(all_ops)):
57 op = all_ops[i]
58 if not set(op.qubits) & set(modified_op.qubits):
59 # No qubits in common
60 continue
61 cont_cond = continue_condition(op, modified_op, i == start_i + 1)
62 if cont_cond == STOP:
63 if len(modified_op.pauli_string) == 1:
64 furthest_op = modified_op
65 furthest_i = i
66 break
67 if cont_cond == CONTINUE:
68 modified_op = modified_op.pass_operations_over([op], after_to_before=True)
69 num_passed_over += 1
70 if len(modified_op.pauli_string) == 1:
71 furthest_op = modified_op
72 furthest_i = i + 1
73
74 return furthest_i, furthest_op, num_passed_over
75
76 def try_merge_clifford(cliff_op: ops.GateOperation, start_i: int) -> bool:
77 (orig_qubit,) = cliff_op.qubits
78 remaining_cliff_gate = ops.SingleQubitCliffordGate.I
79 for pauli, quarter_turns in reversed(
80 cast(ops.SingleQubitCliffordGate, cliff_op.gate).decompose_rotation()
81 ):
82 trans = remaining_cliff_gate.transform(pauli)
83 pauli = trans.to
84 quarter_turns *= -1 if trans.flip else 1
85 string_op = ops.PauliStringPhasor(
86 ops.PauliString(pauli(cliff_op.qubits[0])), exponent_neg=quarter_turns / 2
87 )
88
89 merge_i, merge_op, num_passed = find_merge_point(start_i, string_op, quarter_turns == 2)
90 assert merge_i > start_i
91 assert len(merge_op.pauli_string) == 1, 'PauliString length != 1'
92
93 qubit, pauli = next(iter(merge_op.pauli_string.items()))
94 quarter_turns = round(merge_op.exponent_relative * 2)
95 if merge_op.pauli_string.coefficient not in [1, -1]:
96 # TODO: Add support for more general phases.
97 # Github issue: https://github.com/quantumlib/Cirq/issues/2962
98 # Legacy coverage ignore, we need test code that hits this.
99 # coverage: ignore
100 raise NotImplementedError(
101 'Only +1/-1 pauli string coefficients currently supported'
102 )
103 quarter_turns *= int(merge_op.pauli_string.coefficient.real)
104 quarter_turns %= 4
105 part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)
106
107 other_op = all_ops[merge_i] if merge_i < len(all_ops) else None
108 if other_op is not None and qubit not in set(other_op.qubits):
109 other_op = None
110
111 if isinstance(other_op, ops.GateOperation) and isinstance(
112 other_op.gate, ops.SingleQubitCliffordGate
113 ):
114 # Merge with another SingleQubitCliffordGate
115 new_op = part_cliff_gate.merged_with(other_op.gate)(qubit)
116 all_ops[merge_i] = new_op
117 elif (
118 isinstance(other_op, ops.GateOperation)
119 and isinstance(other_op.gate, ops.CZPowGate)
120 and other_op.gate.exponent == 1
121 and quarter_turns == 2
122 ):
123 # Pass whole Pauli gate over CZ, possibly adding a Z gate
124 if pauli != ops.pauli_gates.Z:
125 other_qubit = other_op.qubits[other_op.qubits.index(qubit) - 1]
126 all_ops.insert(merge_i + 1, ops.SingleQubitCliffordGate.Z(other_qubit))
127 all_ops.insert(merge_i + 1, part_cliff_gate(qubit))
128 elif isinstance(other_op, ops.PauliStringPhasor):
129 # Pass over a non-Clifford gate
130 mod_op = other_op.pass_operations_over([part_cliff_gate(qubit)])
131 all_ops[merge_i] = mod_op
132 all_ops.insert(merge_i + 1, part_cliff_gate(qubit))
133 elif merge_i > start_i + 1 and num_passed > 0:
134 # Moved Clifford through the circuit but nothing to merge
135 all_ops.insert(merge_i, part_cliff_gate(qubit))
136 else:
137 # Couldn't move Clifford
138 remaining_cliff_gate = remaining_cliff_gate.merged_with(part_cliff_gate)
139
140 if remaining_cliff_gate == ops.SingleQubitCliffordGate.I:
141 all_ops.pop(start_i)
142 return True
143 all_ops[start_i] = remaining_cliff_gate(orig_qubit)
144 return False
145
146 def try_merge_cz(cz_op: ops.GateOperation, start_i: int) -> int:
147 """Returns the number of operations removed at or before start_i."""
148 for i in reversed(range(start_i)):
149 op = all_ops[i]
150 if not set(cz_op.qubits) & set(op.qubits):
151 # Don't share qubits
152 # Keep looking
153 continue
154 elif not (
155 isinstance(op, ops.GateOperation)
156 and isinstance(op.gate, ops.CZPowGate)
157 and op.gate.exponent == 1
158 ):
159 # Not a CZ gate
160 return 0
161 elif cz_op == op:
162 # Cancel two CZ gates
163 all_ops.pop(start_i)
164 all_ops.pop(i)
165 return 2
166 else:
167 # Two CZ gates that share one qubit
168 # Pass through and keep looking
169 continue # coverage: ignore
170 # The above line is covered by test_remove_staggered_czs but the
171 # coverage checker disagrees.
172 return 0
173
174 i = 0
175 while i < len(all_ops):
176 op = all_ops[i]
177 if isinstance(op, ops.GateOperation) and isinstance(op.gate, ops.SingleQubitCliffordGate):
178 if try_merge_clifford(op, i):
179 i -= 1
180 elif (
181 isinstance(op, ops.GateOperation)
182 and isinstance(op.gate, ops.CZPowGate)
183 and op.gate.exponent == 1
184 ):
185 num_rm = try_merge_cz(op, i)
186 i -= num_rm
187 i += 1
188
189 return circuits.Circuit(all_ops, strategy=circuits.InsertStrategy.EARLIEST)
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py
--- a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py
+++ b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py
@@ -92,14 +92,6 @@
qubit, pauli = next(iter(merge_op.pauli_string.items()))
quarter_turns = round(merge_op.exponent_relative * 2)
- if merge_op.pauli_string.coefficient not in [1, -1]:
- # TODO: Add support for more general phases.
- # Github issue: https://github.com/quantumlib/Cirq/issues/2962
- # Legacy coverage ignore, we need test code that hits this.
- # coverage: ignore
- raise NotImplementedError(
- 'Only +1/-1 pauli string coefficients currently supported'
- )
quarter_turns *= int(merge_op.pauli_string.coefficient.real)
quarter_turns %= 4
part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)
| {"golden_diff": "diff --git a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py\n--- a/cirq-core/cirq/contrib/paulistring/clifford_optimize.py\n+++ b/cirq-core/cirq/contrib/paulistring/clifford_optimize.py\n@@ -92,14 +92,6 @@\n \n qubit, pauli = next(iter(merge_op.pauli_string.items()))\n quarter_turns = round(merge_op.exponent_relative * 2)\n- if merge_op.pauli_string.coefficient not in [1, -1]:\n- # TODO: Add support for more general phases.\n- # Github issue: https://github.com/quantumlib/Cirq/issues/2962\n- # Legacy coverage ignore, we need test code that hits this.\n- # coverage: ignore\n- raise NotImplementedError(\n- 'Only +1/-1 pauli string coefficients currently supported'\n- )\n quarter_turns *= int(merge_op.pauli_string.coefficient.real)\n quarter_turns %= 4\n part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)\n", "issue": "Clifford optimize either should support more general pauli phases or make assert\nIn the code to merge Clifford operations there is code that only works if a pauli string coefficient is $\\pm 1$. If not it raises a NotImplementError, lines that are not tested :(\r\n\r\nMy suspicion here is that at this point the phase can only be these two values, but someone will need to better understand this code and the associated TODO.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, cast\n\nfrom cirq import ops, circuits\nfrom cirq.contrib.paulistring.convert_gate_set import converted_gate_set\n\n\ndef clifford_optimized_circuit(circuit: circuits.Circuit, atol: float = 1e-8) -> circuits.Circuit:\n # Convert to a circuit with SingleQubitCliffordGates,\n # CZs and other ignored gates\n c_cliff = converted_gate_set(circuit, no_clifford_gates=False, atol=atol)\n\n all_ops = list(c_cliff.all_operations())\n\n def find_merge_point(\n start_i: int,\n string_op: ops.PauliStringPhasor,\n stop_at_cz: bool,\n ) -> Tuple[int, ops.PauliStringPhasor, int]:\n STOP = 0\n CONTINUE = 1\n SKIP = 2\n\n def continue_condition(\n op: ops.Operation, current_string: ops.PauliStringPhasor, is_first: bool\n ) -> int:\n if isinstance(op.gate, ops.SingleQubitCliffordGate):\n return CONTINUE if len(current_string.pauli_string) != 1 else STOP\n if isinstance(op.gate, ops.CZPowGate):\n return STOP if stop_at_cz else CONTINUE\n if (\n isinstance(op, ops.PauliStringPhasor)\n and len(op.qubits) == 1\n and (op.pauli_string[op.qubits[0]] == current_string.pauli_string[op.qubits[0]])\n ):\n return SKIP\n return STOP\n\n modified_op = string_op\n furthest_op = string_op\n furthest_i = start_i + 1\n num_passed_over = 0\n for i in range(start_i + 1, len(all_ops)):\n op = all_ops[i]\n if not set(op.qubits) & set(modified_op.qubits):\n # No qubits in common\n continue\n cont_cond = continue_condition(op, modified_op, i == start_i + 1)\n if cont_cond == STOP:\n if len(modified_op.pauli_string) == 1:\n furthest_op = modified_op\n furthest_i = i\n break\n if cont_cond == CONTINUE:\n modified_op = modified_op.pass_operations_over([op], after_to_before=True)\n num_passed_over += 1\n if len(modified_op.pauli_string) == 1:\n furthest_op = modified_op\n furthest_i = i + 1\n\n return furthest_i, furthest_op, num_passed_over\n\n def try_merge_clifford(cliff_op: ops.GateOperation, start_i: int) -> bool:\n (orig_qubit,) = cliff_op.qubits\n remaining_cliff_gate = ops.SingleQubitCliffordGate.I\n for pauli, quarter_turns in reversed(\n cast(ops.SingleQubitCliffordGate, cliff_op.gate).decompose_rotation()\n ):\n trans = remaining_cliff_gate.transform(pauli)\n pauli = trans.to\n quarter_turns *= -1 if trans.flip else 1\n string_op = ops.PauliStringPhasor(\n ops.PauliString(pauli(cliff_op.qubits[0])), exponent_neg=quarter_turns / 2\n )\n\n merge_i, merge_op, num_passed = find_merge_point(start_i, string_op, quarter_turns == 2)\n assert merge_i > start_i\n assert len(merge_op.pauli_string) == 1, 'PauliString length != 1'\n\n qubit, pauli = next(iter(merge_op.pauli_string.items()))\n quarter_turns = round(merge_op.exponent_relative * 2)\n if merge_op.pauli_string.coefficient not in [1, -1]:\n # TODO: Add support for more general phases.\n # Github issue: https://github.com/quantumlib/Cirq/issues/2962\n # Legacy coverage ignore, we need test code that hits this.\n # coverage: ignore\n raise NotImplementedError(\n 'Only +1/-1 pauli string coefficients currently supported'\n )\n quarter_turns *= int(merge_op.pauli_string.coefficient.real)\n quarter_turns %= 4\n part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)\n\n other_op = all_ops[merge_i] if merge_i < len(all_ops) else None\n if other_op is not None and qubit not in set(other_op.qubits):\n other_op = None\n\n if isinstance(other_op, ops.GateOperation) and isinstance(\n other_op.gate, ops.SingleQubitCliffordGate\n ):\n # Merge with another SingleQubitCliffordGate\n new_op = part_cliff_gate.merged_with(other_op.gate)(qubit)\n all_ops[merge_i] = new_op\n elif (\n isinstance(other_op, ops.GateOperation)\n and isinstance(other_op.gate, ops.CZPowGate)\n and other_op.gate.exponent == 1\n and quarter_turns == 2\n ):\n # Pass whole Pauli gate over CZ, possibly adding a Z gate\n if pauli != ops.pauli_gates.Z:\n other_qubit = other_op.qubits[other_op.qubits.index(qubit) - 1]\n all_ops.insert(merge_i + 1, ops.SingleQubitCliffordGate.Z(other_qubit))\n all_ops.insert(merge_i + 1, part_cliff_gate(qubit))\n elif isinstance(other_op, ops.PauliStringPhasor):\n # Pass over a non-Clifford gate\n mod_op = other_op.pass_operations_over([part_cliff_gate(qubit)])\n all_ops[merge_i] = mod_op\n all_ops.insert(merge_i + 1, part_cliff_gate(qubit))\n elif merge_i > start_i + 1 and num_passed > 0:\n # Moved Clifford through the circuit but nothing to merge\n all_ops.insert(merge_i, part_cliff_gate(qubit))\n else:\n # Couldn't move Clifford\n remaining_cliff_gate = remaining_cliff_gate.merged_with(part_cliff_gate)\n\n if remaining_cliff_gate == ops.SingleQubitCliffordGate.I:\n all_ops.pop(start_i)\n return True\n all_ops[start_i] = remaining_cliff_gate(orig_qubit)\n return False\n\n def try_merge_cz(cz_op: ops.GateOperation, start_i: int) -> int:\n \"\"\"Returns the number of operations removed at or before start_i.\"\"\"\n for i in reversed(range(start_i)):\n op = all_ops[i]\n if not set(cz_op.qubits) & set(op.qubits):\n # Don't share qubits\n # Keep looking\n continue\n elif not (\n isinstance(op, ops.GateOperation)\n and isinstance(op.gate, ops.CZPowGate)\n and op.gate.exponent == 1\n ):\n # Not a CZ gate\n return 0\n elif cz_op == op:\n # Cancel two CZ gates\n all_ops.pop(start_i)\n all_ops.pop(i)\n return 2\n else:\n # Two CZ gates that share one qubit\n # Pass through and keep looking\n continue # coverage: ignore\n # The above line is covered by test_remove_staggered_czs but the\n # coverage checker disagrees.\n return 0\n\n i = 0\n while i < len(all_ops):\n op = all_ops[i]\n if isinstance(op, ops.GateOperation) and isinstance(op.gate, ops.SingleQubitCliffordGate):\n if try_merge_clifford(op, i):\n i -= 1\n elif (\n isinstance(op, ops.GateOperation)\n and isinstance(op.gate, ops.CZPowGate)\n and op.gate.exponent == 1\n ):\n num_rm = try_merge_cz(op, i)\n i -= num_rm\n i += 1\n\n return circuits.Circuit(all_ops, strategy=circuits.InsertStrategy.EARLIEST)\n", "path": "cirq-core/cirq/contrib/paulistring/clifford_optimize.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, cast\n\nfrom cirq import ops, circuits\nfrom cirq.contrib.paulistring.convert_gate_set import converted_gate_set\n\n\ndef clifford_optimized_circuit(circuit: circuits.Circuit, atol: float = 1e-8) -> circuits.Circuit:\n # Convert to a circuit with SingleQubitCliffordGates,\n # CZs and other ignored gates\n c_cliff = converted_gate_set(circuit, no_clifford_gates=False, atol=atol)\n\n all_ops = list(c_cliff.all_operations())\n\n def find_merge_point(\n start_i: int,\n string_op: ops.PauliStringPhasor,\n stop_at_cz: bool,\n ) -> Tuple[int, ops.PauliStringPhasor, int]:\n STOP = 0\n CONTINUE = 1\n SKIP = 2\n\n def continue_condition(\n op: ops.Operation, current_string: ops.PauliStringPhasor, is_first: bool\n ) -> int:\n if isinstance(op.gate, ops.SingleQubitCliffordGate):\n return CONTINUE if len(current_string.pauli_string) != 1 else STOP\n if isinstance(op.gate, ops.CZPowGate):\n return STOP if stop_at_cz else CONTINUE\n if (\n isinstance(op, ops.PauliStringPhasor)\n and len(op.qubits) == 1\n and (op.pauli_string[op.qubits[0]] == current_string.pauli_string[op.qubits[0]])\n ):\n return SKIP\n return STOP\n\n modified_op = string_op\n furthest_op = string_op\n furthest_i = start_i + 1\n num_passed_over = 0\n for i in range(start_i + 1, len(all_ops)):\n op = all_ops[i]\n if not set(op.qubits) & set(modified_op.qubits):\n # No qubits in common\n continue\n cont_cond = continue_condition(op, modified_op, i == start_i + 1)\n if cont_cond == STOP:\n if len(modified_op.pauli_string) == 1:\n furthest_op = modified_op\n furthest_i = i\n break\n if cont_cond == CONTINUE:\n modified_op = modified_op.pass_operations_over([op], after_to_before=True)\n num_passed_over += 1\n if len(modified_op.pauli_string) == 1:\n furthest_op = modified_op\n furthest_i = i + 1\n\n return furthest_i, furthest_op, num_passed_over\n\n def try_merge_clifford(cliff_op: ops.GateOperation, start_i: int) -> bool:\n (orig_qubit,) = cliff_op.qubits\n remaining_cliff_gate = ops.SingleQubitCliffordGate.I\n for pauli, quarter_turns in reversed(\n cast(ops.SingleQubitCliffordGate, cliff_op.gate).decompose_rotation()\n ):\n trans = remaining_cliff_gate.transform(pauli)\n pauli = trans.to\n quarter_turns *= -1 if trans.flip else 1\n string_op = ops.PauliStringPhasor(\n ops.PauliString(pauli(cliff_op.qubits[0])), exponent_neg=quarter_turns / 2\n )\n\n merge_i, merge_op, num_passed = find_merge_point(start_i, string_op, quarter_turns == 2)\n assert merge_i > start_i\n assert len(merge_op.pauli_string) == 1, 'PauliString length != 1'\n\n qubit, pauli = next(iter(merge_op.pauli_string.items()))\n quarter_turns = round(merge_op.exponent_relative * 2)\n quarter_turns *= int(merge_op.pauli_string.coefficient.real)\n quarter_turns %= 4\n part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)\n\n other_op = all_ops[merge_i] if merge_i < len(all_ops) else None\n if other_op is not None and qubit not in set(other_op.qubits):\n other_op = None\n\n if isinstance(other_op, ops.GateOperation) and isinstance(\n other_op.gate, ops.SingleQubitCliffordGate\n ):\n # Merge with another SingleQubitCliffordGate\n new_op = part_cliff_gate.merged_with(other_op.gate)(qubit)\n all_ops[merge_i] = new_op\n elif (\n isinstance(other_op, ops.GateOperation)\n and isinstance(other_op.gate, ops.CZPowGate)\n and other_op.gate.exponent == 1\n and quarter_turns == 2\n ):\n # Pass whole Pauli gate over CZ, possibly adding a Z gate\n if pauli != ops.pauli_gates.Z:\n other_qubit = other_op.qubits[other_op.qubits.index(qubit) - 1]\n all_ops.insert(merge_i + 1, ops.SingleQubitCliffordGate.Z(other_qubit))\n all_ops.insert(merge_i + 1, part_cliff_gate(qubit))\n elif isinstance(other_op, ops.PauliStringPhasor):\n # Pass over a non-Clifford gate\n mod_op = other_op.pass_operations_over([part_cliff_gate(qubit)])\n all_ops[merge_i] = mod_op\n all_ops.insert(merge_i + 1, part_cliff_gate(qubit))\n elif merge_i > start_i + 1 and num_passed > 0:\n # Moved Clifford through the circuit but nothing to merge\n all_ops.insert(merge_i, part_cliff_gate(qubit))\n else:\n # Couldn't move Clifford\n remaining_cliff_gate = remaining_cliff_gate.merged_with(part_cliff_gate)\n\n if remaining_cliff_gate == ops.SingleQubitCliffordGate.I:\n all_ops.pop(start_i)\n return True\n all_ops[start_i] = remaining_cliff_gate(orig_qubit)\n return False\n\n def try_merge_cz(cz_op: ops.GateOperation, start_i: int) -> int:\n \"\"\"Returns the number of operations removed at or before start_i.\"\"\"\n for i in reversed(range(start_i)):\n op = all_ops[i]\n if not set(cz_op.qubits) & set(op.qubits):\n # Don't share qubits\n # Keep looking\n continue\n elif not (\n isinstance(op, ops.GateOperation)\n and isinstance(op.gate, ops.CZPowGate)\n and op.gate.exponent == 1\n ):\n # Not a CZ gate\n return 0\n elif cz_op == op:\n # Cancel two CZ gates\n all_ops.pop(start_i)\n all_ops.pop(i)\n return 2\n else:\n # Two CZ gates that share one qubit\n # Pass through and keep looking\n continue # coverage: ignore\n # The above line is covered by test_remove_staggered_czs but the\n # coverage checker disagrees.\n return 0\n\n i = 0\n while i < len(all_ops):\n op = all_ops[i]\n if isinstance(op, ops.GateOperation) and isinstance(op.gate, ops.SingleQubitCliffordGate):\n if try_merge_clifford(op, i):\n i -= 1\n elif (\n isinstance(op, ops.GateOperation)\n and isinstance(op.gate, ops.CZPowGate)\n and op.gate.exponent == 1\n ):\n num_rm = try_merge_cz(op, i)\n i -= num_rm\n i += 1\n\n return circuits.Circuit(all_ops, strategy=circuits.InsertStrategy.EARLIEST)\n", "path": "cirq-core/cirq/contrib/paulistring/clifford_optimize.py"}]} | 2,759 | 275 |
gh_patches_debug_11850 | rasdani/github-patches | git_diff | cobbler__cobbler-3397 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Backport] GHSA-mcg6-h362-cmq5
### Original feature issue
- Advisory: https://github.com/advisories/GHSA-mcg6-h362-cmq5
### Target release
- [ ] release33
- [x] release32
- [ ] release30
### Reason
Stabilization of Cobbler 3.2.x in the Fedora Ecosystem
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/modules/authentication/pam.py`
Content:
```
1 """
2 Authentication module that uses /etc/cobbler/auth.conf
3 Choice of authentication module is in /etc/cobbler/modules.conf
4
5 Copyright 2007-2009, Red Hat, Inc and Others
6 Michael DeHaan <michael.dehaan AT gmail>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301 USA
22
23 PAM python code based on the pam_python code created by Chris AtLee:
24 http://atlee.ca/software/pam/
25
26 #-----------------------------------------------
27 pam_python (c) 2007 Chris AtLee <[email protected]>
28 Licensed under the MIT license:
29 http://www.opensource.org/licenses/mit-license.php
30
31 PAM module for python
32
33 Provides an authenticate function that will allow the caller to authenticate
34 a user against the Pluggable Authentication Modules (PAM) on the system.
35
36 Implemented using ctypes, so no compilation is necessary.
37 """
38
39 from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
40 from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
41 from ctypes.util import find_library
42
43 LIBPAM = CDLL(find_library("pam"))
44 LIBC = CDLL(find_library("c"))
45
46 CALLOC = LIBC.calloc
47 CALLOC.restype = c_void_p
48 CALLOC.argtypes = [c_uint, c_uint]
49
50 STRDUP = LIBC.strdup
51 STRDUP.argstypes = [c_char_p]
52 STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!
53
54 # Various constants
55 PAM_PROMPT_ECHO_OFF = 1
56 PAM_PROMPT_ECHO_ON = 2
57 PAM_ERROR_MSG = 3
58 PAM_TEXT_INFO = 4
59
60
61 def register() -> str:
62 """
63 The mandatory Cobbler module registration hook.
64 """
65 return "authn"
66
67
68 class PamHandle(Structure):
69 """
70 wrapper class for pam_handle_t
71 """
72 _fields_ = [("handle", c_void_p)]
73
74 def __init__(self):
75 Structure.__init__(self)
76 self.handle = 0
77
78
79 class PamMessage(Structure):
80 """
81 wrapper class for pam_message structure
82 """
83 _fields_ = [("msg_style", c_int), ("msg", c_char_p)]
84
85 def __repr__(self):
86 return "<PamMessage %i '%s'>" % (self.msg_style, self.msg)
87
88
89 class PamResponse(Structure):
90 """
91 wrapper class for pam_response structure
92 """
93 _fields_ = [("resp", c_char_p), ("resp_retcode", c_int)]
94
95 def __repr__(self):
96 return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp)
97
98
99 CONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p)
100
101
102 class PamConv(Structure):
103 """
104 wrapper class for pam_conv structure
105 """
106 _fields_ = [("conv", CONV_FUNC), ("appdata_ptr", c_void_p)]
107
108
109 PAM_START = LIBPAM.pam_start
110 PAM_START.restype = c_int
111 PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]
112
113 PAM_AUTHENTICATE = LIBPAM.pam_authenticate
114 PAM_AUTHENTICATE.restype = c_int
115 PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
116
117
118 def authenticate(api_handle, username: str, password: str) -> bool:
119 """
120 :param api_handle: Used for resolving the the pam service name and getting the Logger.
121 :param username:The username to log in with.
122 :param password: The password to log in with.
123 :returns: True if the given username and password authenticate for the given service. Otherwise False
124 """
125
126 @CONV_FUNC
127 def my_conv(n_messages, messages, p_response, app_data):
128 """
129 Simple conversation function that responds to any prompt where the echo is off with the supplied password
130 """
131 # Create an array of n_messages response objects
132 addr = CALLOC(n_messages, sizeof(PamResponse))
133 p_response[0] = cast(addr, POINTER(PamResponse))
134 for i in range(n_messages):
135 if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
136 pw_copy = STRDUP(password.encode())
137 p_response.contents[i].resp = cast(pw_copy, c_char_p)
138 p_response.contents[i].resp_retcode = 0
139 return 0
140
141 try:
142 service = api_handle.settings().authn_pam_service
143 except:
144 service = 'login'
145
146 api_handle.logger.debug("authn_pam: PAM service is %s" % service)
147
148 handle = PamHandle()
149 conv = PamConv(my_conv, 0)
150 retval = PAM_START(service.encode(), username.encode(), pointer(conv), pointer(handle))
151
152 if retval != 0:
153 # TODO: This is not an authentication error, something has gone wrong starting up PAM
154 api_handle.logger.error("authn_pam: error initializing PAM library")
155 return False
156
157 retval = PAM_AUTHENTICATE(handle, 0)
158 return retval == 0
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cobbler/modules/authentication/pam.py b/cobbler/modules/authentication/pam.py
--- a/cobbler/modules/authentication/pam.py
+++ b/cobbler/modules/authentication/pam.py
@@ -114,6 +114,10 @@
PAM_AUTHENTICATE.restype = c_int
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
+PAM_ACCT_MGMT = LIBPAM.pam_acct_mgmt
+PAM_ACCT_MGMT.restype = c_int
+PAM_ACCT_MGMT.argtypes = [PamHandle, c_int]
+
def authenticate(api_handle, username: str, password: str) -> bool:
"""
@@ -155,4 +159,8 @@
return False
retval = PAM_AUTHENTICATE(handle, 0)
+
+ if retval == 0:
+ retval = PAM_ACCT_MGMT(handle, 0)
+
return retval == 0
| {"golden_diff": "diff --git a/cobbler/modules/authentication/pam.py b/cobbler/modules/authentication/pam.py\n--- a/cobbler/modules/authentication/pam.py\n+++ b/cobbler/modules/authentication/pam.py\n@@ -114,6 +114,10 @@\n PAM_AUTHENTICATE.restype = c_int\n PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]\n \n+PAM_ACCT_MGMT = LIBPAM.pam_acct_mgmt\n+PAM_ACCT_MGMT.restype = c_int\n+PAM_ACCT_MGMT.argtypes = [PamHandle, c_int]\n+\n \n def authenticate(api_handle, username: str, password: str) -> bool:\n \"\"\"\n@@ -155,4 +159,8 @@\n return False\n \n retval = PAM_AUTHENTICATE(handle, 0)\n+\n+ if retval == 0:\n+ retval = PAM_ACCT_MGMT(handle, 0)\n+\n return retval == 0\n", "issue": "[Backport] GHSA-mcg6-h362-cmq5\n### Original feature issue\r\n\r\n- Advisory: https://github.com/advisories/GHSA-mcg6-h362-cmq5\r\n\r\n### Target release\r\n\r\n- [ ] release33\r\n- [x] release32\r\n- [ ] release30\r\n\r\n### Reason\r\n\r\nStabilization of Cobbler 3.2.x in the Fedora Ecosystem\r\n\n", "before_files": [{"content": "\"\"\"\nAuthentication module that uses /etc/cobbler/auth.conf\nChoice of authentication module is in /etc/cobbler/modules.conf\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\nPAM python code based on the pam_python code created by Chris AtLee:\nhttp://atlee.ca/software/pam/\n\n#-----------------------------------------------\npam_python (c) 2007 Chris AtLee <[email protected]>\nLicensed under the MIT license:\nhttp://www.opensource.org/licenses/mit-license.php\n\nPAM module for python\n\nProvides an authenticate function that will allow the caller to authenticate\na user against the Pluggable Authentication Modules (PAM) on the system.\n\nImplemented using ctypes, so no compilation is necessary.\n\"\"\"\n\nfrom ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof\nfrom ctypes import c_void_p, c_uint, c_char_p, c_char, c_int\nfrom ctypes.util import find_library\n\nLIBPAM = CDLL(find_library(\"pam\"))\nLIBC = CDLL(find_library(\"c\"))\n\nCALLOC = LIBC.calloc\nCALLOC.restype = c_void_p\nCALLOC.argtypes = [c_uint, c_uint]\n\nSTRDUP = LIBC.strdup\nSTRDUP.argstypes = [c_char_p]\nSTRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!\n\n# Various constants\nPAM_PROMPT_ECHO_OFF = 1\nPAM_PROMPT_ECHO_ON = 2\nPAM_ERROR_MSG = 3\nPAM_TEXT_INFO = 4\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n return \"authn\"\n\n\nclass PamHandle(Structure):\n \"\"\"\n wrapper class for pam_handle_t\n \"\"\"\n _fields_ = [(\"handle\", c_void_p)]\n\n def __init__(self):\n Structure.__init__(self)\n self.handle = 0\n\n\nclass PamMessage(Structure):\n \"\"\"\n wrapper class for pam_message structure\n \"\"\"\n _fields_ = [(\"msg_style\", c_int), (\"msg\", c_char_p)]\n\n def __repr__(self):\n return \"<PamMessage %i '%s'>\" % (self.msg_style, self.msg)\n\n\nclass PamResponse(Structure):\n \"\"\"\n wrapper class for pam_response structure\n \"\"\"\n _fields_ = [(\"resp\", c_char_p), (\"resp_retcode\", c_int)]\n\n def __repr__(self):\n return \"<PamResponse %i '%s'>\" % (self.resp_retcode, self.resp)\n\n\nCONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p)\n\n\nclass PamConv(Structure):\n \"\"\"\n wrapper class for pam_conv structure\n \"\"\"\n _fields_ = [(\"conv\", CONV_FUNC), (\"appdata_ptr\", c_void_p)]\n\n\nPAM_START = LIBPAM.pam_start\nPAM_START.restype = c_int\nPAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]\n\nPAM_AUTHENTICATE = LIBPAM.pam_authenticate\nPAM_AUTHENTICATE.restype = c_int\nPAM_AUTHENTICATE.argtypes = [PamHandle, c_int]\n\n\ndef authenticate(api_handle, username: str, password: str) -> bool:\n \"\"\"\n :param api_handle: Used for resolving the the pam service name and getting the Logger.\n :param username:The username to log in with.\n :param password: The password to log in with.\n :returns: True if the given username and password authenticate for the given service. Otherwise False\n \"\"\"\n\n @CONV_FUNC\n def my_conv(n_messages, messages, p_response, app_data):\n \"\"\"\n Simple conversation function that responds to any prompt where the echo is off with the supplied password\n \"\"\"\n # Create an array of n_messages response objects\n addr = CALLOC(n_messages, sizeof(PamResponse))\n p_response[0] = cast(addr, POINTER(PamResponse))\n for i in range(n_messages):\n if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:\n pw_copy = STRDUP(password.encode())\n p_response.contents[i].resp = cast(pw_copy, c_char_p)\n p_response.contents[i].resp_retcode = 0\n return 0\n\n try:\n service = api_handle.settings().authn_pam_service\n except:\n service = 'login'\n\n api_handle.logger.debug(\"authn_pam: PAM service is %s\" % service)\n\n handle = PamHandle()\n conv = PamConv(my_conv, 0)\n retval = PAM_START(service.encode(), username.encode(), pointer(conv), pointer(handle))\n\n if retval != 0:\n # TODO: This is not an authentication error, something has gone wrong starting up PAM\n api_handle.logger.error(\"authn_pam: error initializing PAM library\")\n return False\n\n retval = PAM_AUTHENTICATE(handle, 0)\n return retval == 0\n", "path": "cobbler/modules/authentication/pam.py"}], "after_files": [{"content": "\"\"\"\nAuthentication module that uses /etc/cobbler/auth.conf\nChoice of authentication module is in /etc/cobbler/modules.conf\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\nPAM python code based on the pam_python code created by Chris AtLee:\nhttp://atlee.ca/software/pam/\n\n#-----------------------------------------------\npam_python (c) 2007 Chris AtLee <[email protected]>\nLicensed under the MIT license:\nhttp://www.opensource.org/licenses/mit-license.php\n\nPAM module for python\n\nProvides an authenticate function that will allow the caller to authenticate\na user against the Pluggable Authentication Modules (PAM) on the system.\n\nImplemented using ctypes, so no compilation is necessary.\n\"\"\"\n\nfrom ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof\nfrom ctypes import c_void_p, c_uint, c_char_p, c_char, c_int\nfrom ctypes.util import find_library\n\nLIBPAM = CDLL(find_library(\"pam\"))\nLIBC = CDLL(find_library(\"c\"))\n\nCALLOC = LIBC.calloc\nCALLOC.restype = c_void_p\nCALLOC.argtypes = [c_uint, c_uint]\n\nSTRDUP = LIBC.strdup\nSTRDUP.argstypes = [c_char_p]\nSTRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!\n\n# Various constants\nPAM_PROMPT_ECHO_OFF = 1\nPAM_PROMPT_ECHO_ON = 2\nPAM_ERROR_MSG = 3\nPAM_TEXT_INFO = 4\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n return \"authn\"\n\n\nclass PamHandle(Structure):\n \"\"\"\n wrapper class for pam_handle_t\n \"\"\"\n _fields_ = [(\"handle\", c_void_p)]\n\n def __init__(self):\n Structure.__init__(self)\n self.handle = 0\n\n\nclass PamMessage(Structure):\n \"\"\"\n wrapper class for pam_message structure\n \"\"\"\n _fields_ = [(\"msg_style\", c_int), (\"msg\", c_char_p)]\n\n def __repr__(self):\n return \"<PamMessage %i '%s'>\" % (self.msg_style, self.msg)\n\n\nclass PamResponse(Structure):\n \"\"\"\n wrapper class for pam_response structure\n \"\"\"\n _fields_ = [(\"resp\", c_char_p), (\"resp_retcode\", c_int)]\n\n def __repr__(self):\n return \"<PamResponse %i '%s'>\" % (self.resp_retcode, self.resp)\n\n\nCONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p)\n\n\nclass PamConv(Structure):\n \"\"\"\n wrapper class for pam_conv structure\n \"\"\"\n _fields_ = [(\"conv\", CONV_FUNC), (\"appdata_ptr\", c_void_p)]\n\n\nPAM_START = LIBPAM.pam_start\nPAM_START.restype = c_int\nPAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]\n\nPAM_AUTHENTICATE = LIBPAM.pam_authenticate\nPAM_AUTHENTICATE.restype = c_int\nPAM_AUTHENTICATE.argtypes = [PamHandle, c_int]\n\nPAM_ACCT_MGMT = LIBPAM.pam_acct_mgmt\nPAM_ACCT_MGMT.restype = c_int\nPAM_ACCT_MGMT.argtypes = [PamHandle, c_int]\n\n\ndef authenticate(api_handle, username: str, password: str) -> bool:\n \"\"\"\n :param api_handle: Used for resolving the the pam service name and getting the Logger.\n :param username:The username to log in with.\n :param password: The password to log in with.\n :returns: True if the given username and password authenticate for the given service. Otherwise False\n \"\"\"\n\n @CONV_FUNC\n def my_conv(n_messages, messages, p_response, app_data):\n \"\"\"\n Simple conversation function that responds to any prompt where the echo is off with the supplied password\n \"\"\"\n # Create an array of n_messages response objects\n addr = CALLOC(n_messages, sizeof(PamResponse))\n p_response[0] = cast(addr, POINTER(PamResponse))\n for i in range(n_messages):\n if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:\n pw_copy = STRDUP(password.encode())\n p_response.contents[i].resp = cast(pw_copy, c_char_p)\n p_response.contents[i].resp_retcode = 0\n return 0\n\n try:\n service = api_handle.settings().authn_pam_service\n except:\n service = 'login'\n\n api_handle.logger.debug(\"authn_pam: PAM service is %s\" % service)\n\n handle = PamHandle()\n conv = PamConv(my_conv, 0)\n retval = PAM_START(service.encode(), username.encode(), pointer(conv), pointer(handle))\n\n if retval != 0:\n # TODO: This is not an authentication error, something has gone wrong starting up PAM\n api_handle.logger.error(\"authn_pam: error initializing PAM library\")\n return False\n\n retval = PAM_AUTHENTICATE(handle, 0)\n\n if retval == 0:\n retval = PAM_ACCT_MGMT(handle, 0)\n\n return retval == 0\n", "path": "cobbler/modules/authentication/pam.py"}]} | 2,016 | 218 |
gh_patches_debug_17259 | rasdani/github-patches | git_diff | encode__httpx-629 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTTP/2 download speeds / flow control settings
Which having an initial look at HTTP/3, @jlaine dug out that we seem to have signiifcantly slow uploading right now.
First thing to do would be to investigate and replicate, by eg. compare and contrast a simple upload from `httpx` vs `requests/urllib3` - does it replicate trivially and reliably? This really *shouldn't* be an issue for us so will need some looking at.
Initial thoughts on where issues could be:
* We're sending too-small chunks, and always waiting for the network to fully drain on each chunk is negatively impacting performance.
* We're sending too-small chunks, and ending up with largish computation overhead relative to data transfer as a result.
* We've got some big unseen overhead in the sync-to-async marshalling.
* Something else?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/dispatch/http2.py`
Content:
```
1 import typing
2
3 import h2.connection
4 import h2.events
5 from h2.settings import SettingCodes, Settings
6
7 from ..concurrency.base import (
8 BaseEvent,
9 BaseSocketStream,
10 ConcurrencyBackend,
11 lookup_backend,
12 )
13 from ..config import Timeout
14 from ..exceptions import ProtocolError
15 from ..models import Request, Response
16 from ..utils import get_logger
17 from .base import OpenConnection
18
19 logger = get_logger(__name__)
20
21
22 class HTTP2Connection(OpenConnection):
23 READ_NUM_BYTES = 4096
24
25 def __init__(
26 self,
27 socket: BaseSocketStream,
28 backend: typing.Union[str, ConcurrencyBackend] = "auto",
29 on_release: typing.Callable = None,
30 ):
31 self.socket = socket
32 self.backend = lookup_backend(backend)
33 self.on_release = on_release
34 self.state = h2.connection.H2Connection()
35
36 self.streams = {} # type: typing.Dict[int, HTTP2Stream]
37 self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]
38
39 self.init_started = False
40
41 @property
42 def is_http2(self) -> bool:
43 return True
44
45 @property
46 def init_complete(self) -> BaseEvent:
47 # We do this lazily, to make sure backend autodetection always
48 # runs within an async context.
49 if not hasattr(self, "_initialization_complete"):
50 self._initialization_complete = self.backend.create_event()
51 return self._initialization_complete
52
53 async def send(self, request: Request, timeout: Timeout = None) -> Response:
54 timeout = Timeout() if timeout is None else timeout
55
56 if not self.init_started:
57 # The very first stream is responsible for initiating the connection.
58 self.init_started = True
59 await self.send_connection_init(timeout)
60 stream_id = self.state.get_next_available_stream_id()
61 self.init_complete.set()
62 else:
63 # All other streams need to wait until the connection is established.
64 await self.init_complete.wait()
65 stream_id = self.state.get_next_available_stream_id()
66
67 stream = HTTP2Stream(stream_id=stream_id, connection=self)
68 self.streams[stream_id] = stream
69 self.events[stream_id] = []
70 return await stream.send(request, timeout)
71
72 async def send_connection_init(self, timeout: Timeout) -> None:
73 """
74 The HTTP/2 connection requires some initial setup before we can start
75 using individual request/response streams on it.
76 """
77
78 # Need to set these manually here instead of manipulating via
79 # __setitem__() otherwise the H2Connection will emit SettingsUpdate
80 # frames in addition to sending the undesired defaults.
81 self.state.local_settings = Settings(
82 client=True,
83 initial_values={
84 # Disable PUSH_PROMISE frames from the server since we don't do anything
85 # with them for now. Maybe when we support caching?
86 SettingCodes.ENABLE_PUSH: 0,
87 # These two are taken from h2 for safe defaults
88 SettingCodes.MAX_CONCURRENT_STREAMS: 100,
89 SettingCodes.MAX_HEADER_LIST_SIZE: 65536,
90 },
91 )
92
93 # Some websites (*cough* Yahoo *cough*) balk at this setting being
94 # present in the initial handshake since it's not defined in the original
95 # RFC despite the RFC mandating ignoring settings you don't know about.
96 del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]
97
98 self.state.initiate_connection()
99 data_to_send = self.state.data_to_send()
100 await self.socket.write(data_to_send, timeout)
101
102 @property
103 def is_closed(self) -> bool:
104 return False
105
106 def is_connection_dropped(self) -> bool:
107 return self.socket.is_connection_dropped()
108
109 async def close(self) -> None:
110 await self.socket.close()
111
112 async def wait_for_outgoing_flow(self, stream_id: int, timeout: Timeout) -> int:
113 """
114 Returns the maximum allowable outgoing flow for a given stream.
115
116 If the allowable flow is zero, then waits on the network until
117 WindowUpdated frames have increased the flow rate.
118
119 https://tools.ietf.org/html/rfc7540#section-6.9
120 """
121 local_flow = self.state.local_flow_control_window(stream_id)
122 connection_flow = self.state.max_outbound_frame_size
123 flow = min(local_flow, connection_flow)
124 while flow == 0:
125 await self.receive_events(timeout)
126 local_flow = self.state.local_flow_control_window(stream_id)
127 connection_flow = self.state.max_outbound_frame_size
128 flow = min(local_flow, connection_flow)
129 return flow
130
131 async def wait_for_event(self, stream_id: int, timeout: Timeout) -> h2.events.Event:
132 """
133 Returns the next event for a given stream.
134
135 If no events are available yet, then waits on the network until
136 an event is available.
137 """
138 while not self.events[stream_id]:
139 await self.receive_events(timeout)
140 return self.events[stream_id].pop(0)
141
142 async def receive_events(self, timeout: Timeout) -> None:
143 """
144 Read some data from the network, and update the H2 state.
145 """
146 data = await self.socket.read(self.READ_NUM_BYTES, timeout)
147 events = self.state.receive_data(data)
148 for event in events:
149 event_stream_id = getattr(event, "stream_id", 0)
150 logger.trace(f"receive_event stream_id={event_stream_id} event={event!r}")
151
152 if hasattr(event, "error_code"):
153 raise ProtocolError(event)
154
155 if event_stream_id in self.events:
156 self.events[event_stream_id].append(event)
157
158 data_to_send = self.state.data_to_send()
159 await self.socket.write(data_to_send, timeout)
160
161 async def send_headers(
162 self,
163 stream_id: int,
164 headers: typing.List[typing.Tuple[bytes, bytes]],
165 timeout: Timeout,
166 ) -> None:
167 self.state.send_headers(stream_id, headers)
168 data_to_send = self.state.data_to_send()
169 await self.socket.write(data_to_send, timeout)
170
171 async def send_data(self, stream_id: int, chunk: bytes, timeout: Timeout) -> None:
172 self.state.send_data(stream_id, chunk)
173 data_to_send = self.state.data_to_send()
174 await self.socket.write(data_to_send, timeout)
175
176 async def end_stream(self, stream_id: int, timeout: Timeout) -> None:
177 self.state.end_stream(stream_id)
178 data_to_send = self.state.data_to_send()
179 await self.socket.write(data_to_send, timeout)
180
181 async def acknowledge_received_data(
182 self, stream_id: int, amount: int, timeout: Timeout
183 ) -> None:
184 self.state.acknowledge_received_data(amount, stream_id)
185 data_to_send = self.state.data_to_send()
186 await self.socket.write(data_to_send, timeout)
187
188 async def close_stream(self, stream_id: int) -> None:
189 del self.streams[stream_id]
190 del self.events[stream_id]
191
192 if not self.streams and self.on_release is not None:
193 await self.on_release()
194
195
196 class HTTP2Stream:
197 def __init__(self, stream_id: int, connection: HTTP2Connection) -> None:
198 self.stream_id = stream_id
199 self.connection = connection
200
201 async def send(self, request: Request, timeout: Timeout) -> Response:
202 # Send the request.
203 await self.send_headers(request, timeout)
204 await self.send_body(request, timeout)
205
206 # Receive the response.
207 status_code, headers = await self.receive_response(timeout)
208 content = self.body_iter(timeout)
209 return Response(
210 status_code=status_code,
211 http_version="HTTP/2",
212 headers=headers,
213 content=content,
214 on_close=self.close,
215 request=request,
216 )
217
218 async def send_headers(self, request: Request, timeout: Timeout) -> None:
219 headers = [
220 (b":method", request.method.encode("ascii")),
221 (b":authority", request.url.authority.encode("ascii")),
222 (b":scheme", request.url.scheme.encode("ascii")),
223 (b":path", request.url.full_path.encode("ascii")),
224 ] + [(k, v) for k, v in request.headers.raw if k != b"host"]
225
226 logger.trace(
227 f"send_headers "
228 f"stream_id={self.stream_id} "
229 f"method={request.method!r} "
230 f"target={request.url.full_path!r} "
231 f"headers={headers!r}"
232 )
233 await self.connection.send_headers(self.stream_id, headers, timeout)
234
235 async def send_body(self, request: Request, timeout: Timeout) -> None:
236 logger.trace(f"send_body stream_id={self.stream_id}")
237 async for data in request.stream():
238 while data:
239 max_flow = await self.connection.wait_for_outgoing_flow(
240 self.stream_id, timeout
241 )
242 chunk_size = min(len(data), max_flow)
243 chunk, data = data[:chunk_size], data[chunk_size:]
244 await self.connection.send_data(self.stream_id, chunk, timeout)
245
246 await self.connection.end_stream(self.stream_id, timeout)
247
248 async def receive_response(
249 self, timeout: Timeout
250 ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:
251 """
252 Read the response status and headers from the network.
253 """
254 while True:
255 event = await self.connection.wait_for_event(self.stream_id, timeout)
256 if isinstance(event, h2.events.ResponseReceived):
257 break
258
259 status_code = 200
260 headers = []
261 for k, v in event.headers:
262 if k == b":status":
263 status_code = int(v.decode("ascii", errors="ignore"))
264 elif not k.startswith(b":"):
265 headers.append((k, v))
266
267 return (status_code, headers)
268
269 async def body_iter(self, timeout: Timeout) -> typing.AsyncIterator[bytes]:
270 while True:
271 event = await self.connection.wait_for_event(self.stream_id, timeout)
272 if isinstance(event, h2.events.DataReceived):
273 amount = event.flow_controlled_length
274 await self.connection.acknowledge_received_data(
275 self.stream_id, amount, timeout
276 )
277 yield event.data
278 elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):
279 break
280
281 async def close(self) -> None:
282 await self.connection.close_stream(self.stream_id)
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/dispatch/http2.py b/httpx/dispatch/http2.py
--- a/httpx/dispatch/http2.py
+++ b/httpx/dispatch/http2.py
@@ -96,6 +96,7 @@
del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]
self.state.initiate_connection()
+ self.state.increment_flow_control_window(2 ** 24)
data_to_send = self.state.data_to_send()
await self.socket.write(data_to_send, timeout)
@@ -165,6 +166,7 @@
timeout: Timeout,
) -> None:
self.state.send_headers(stream_id, headers)
+ self.state.increment_flow_control_window(2 ** 24, stream_id=stream_id)
data_to_send = self.state.data_to_send()
await self.socket.write(data_to_send, timeout)
| {"golden_diff": "diff --git a/httpx/dispatch/http2.py b/httpx/dispatch/http2.py\n--- a/httpx/dispatch/http2.py\n+++ b/httpx/dispatch/http2.py\n@@ -96,6 +96,7 @@\n del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]\n \n self.state.initiate_connection()\n+ self.state.increment_flow_control_window(2 ** 24)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n \n@@ -165,6 +166,7 @@\n timeout: Timeout,\n ) -> None:\n self.state.send_headers(stream_id, headers)\n+ self.state.increment_flow_control_window(2 ** 24, stream_id=stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n", "issue": "HTTP/2 download speeds / flow control settings\nWhich having an initial look at HTTP/3, @jlaine dug out that we seem to have signiifcantly slow uploading right now.\r\n\r\nFirst thing to do would be to investigate and replicate, by eg. compare and contrast a simple upload from `httpx` vs `requests/urllib3` - does it replicate trivially and reliably? This really *shouldn't* be an issue for us so will need some looking at.\r\n\r\nInitial thoughts on where issues could be:\r\n\r\n* We're sending too-small chunks, and always waiting for the network to fully drain on each chunk is negatively impacting performance.\r\n* We're sending too-small chunks, and ending up with largish computation overhead relative to data transfer as a result.\r\n* We've got some big unseen overhead in the sync-to-async marshalling.\r\n* Something else?\n", "before_files": [{"content": "import typing\n\nimport h2.connection\nimport h2.events\nfrom h2.settings import SettingCodes, Settings\n\nfrom ..concurrency.base import (\n BaseEvent,\n BaseSocketStream,\n ConcurrencyBackend,\n lookup_backend,\n)\nfrom ..config import Timeout\nfrom ..exceptions import ProtocolError\nfrom ..models import Request, Response\nfrom ..utils import get_logger\nfrom .base import OpenConnection\n\nlogger = get_logger(__name__)\n\n\nclass HTTP2Connection(OpenConnection):\n READ_NUM_BYTES = 4096\n\n def __init__(\n self,\n socket: BaseSocketStream,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n on_release: typing.Callable = None,\n ):\n self.socket = socket\n self.backend = lookup_backend(backend)\n self.on_release = on_release\n self.state = h2.connection.H2Connection()\n\n self.streams = {} # type: typing.Dict[int, HTTP2Stream]\n self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]\n\n self.init_started = False\n\n @property\n def is_http2(self) -> bool:\n return True\n\n @property\n def init_complete(self) -> BaseEvent:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_initialization_complete\"):\n self._initialization_complete = self.backend.create_event()\n return self._initialization_complete\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n timeout = Timeout() if timeout is None else timeout\n\n if not self.init_started:\n # The very first stream is responsible for initiating the connection.\n self.init_started = True\n await self.send_connection_init(timeout)\n stream_id = self.state.get_next_available_stream_id()\n self.init_complete.set()\n else:\n # All other streams need to wait until the connection is established.\n await self.init_complete.wait()\n stream_id = self.state.get_next_available_stream_id()\n\n stream = HTTP2Stream(stream_id=stream_id, connection=self)\n self.streams[stream_id] = stream\n self.events[stream_id] = []\n return await stream.send(request, timeout)\n\n async def send_connection_init(self, timeout: Timeout) -> None:\n \"\"\"\n The HTTP/2 connection requires some initial setup before we can start\n using individual request/response streams on it.\n \"\"\"\n\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self.state.local_settings = Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]\n\n self.state.initiate_connection()\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n @property\n def is_closed(self) -> bool:\n return False\n\n def is_connection_dropped(self) -> bool:\n return self.socket.is_connection_dropped()\n\n async def close(self) -> None:\n await self.socket.close()\n\n async def wait_for_outgoing_flow(self, stream_id: int, timeout: Timeout) -> int:\n \"\"\"\n Returns the maximum allowable outgoing flow for a given stream.\n\n If the allowable flow is zero, then waits on the network until\n WindowUpdated frames have increased the flow rate.\n\n https://tools.ietf.org/html/rfc7540#section-6.9\n \"\"\"\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n while flow == 0:\n await self.receive_events(timeout)\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n return flow\n\n async def wait_for_event(self, stream_id: int, timeout: Timeout) -> h2.events.Event:\n \"\"\"\n Returns the next event for a given stream.\n\n If no events are available yet, then waits on the network until\n an event is available.\n \"\"\"\n while not self.events[stream_id]:\n await self.receive_events(timeout)\n return self.events[stream_id].pop(0)\n\n async def receive_events(self, timeout: Timeout) -> None:\n \"\"\"\n Read some data from the network, and update the H2 state.\n \"\"\"\n data = await self.socket.read(self.READ_NUM_BYTES, timeout)\n events = self.state.receive_data(data)\n for event in events:\n event_stream_id = getattr(event, \"stream_id\", 0)\n logger.trace(f\"receive_event stream_id={event_stream_id} event={event!r}\")\n\n if hasattr(event, \"error_code\"):\n raise ProtocolError(event)\n\n if event_stream_id in self.events:\n self.events[event_stream_id].append(event)\n\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_headers(\n self,\n stream_id: int,\n headers: typing.List[typing.Tuple[bytes, bytes]],\n timeout: Timeout,\n ) -> None:\n self.state.send_headers(stream_id, headers)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_data(self, stream_id: int, chunk: bytes, timeout: Timeout) -> None:\n self.state.send_data(stream_id, chunk)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def end_stream(self, stream_id: int, timeout: Timeout) -> None:\n self.state.end_stream(stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def acknowledge_received_data(\n self, stream_id: int, amount: int, timeout: Timeout\n ) -> None:\n self.state.acknowledge_received_data(amount, stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def close_stream(self, stream_id: int) -> None:\n del self.streams[stream_id]\n del self.events[stream_id]\n\n if not self.streams and self.on_release is not None:\n await self.on_release()\n\n\nclass HTTP2Stream:\n def __init__(self, stream_id: int, connection: HTTP2Connection) -> None:\n self.stream_id = stream_id\n self.connection = connection\n\n async def send(self, request: Request, timeout: Timeout) -> Response:\n # Send the request.\n await self.send_headers(request, timeout)\n await self.send_body(request, timeout)\n\n # Receive the response.\n status_code, headers = await self.receive_response(timeout)\n content = self.body_iter(timeout)\n return Response(\n status_code=status_code,\n http_version=\"HTTP/2\",\n headers=headers,\n content=content,\n on_close=self.close,\n request=request,\n )\n\n async def send_headers(self, request: Request, timeout: Timeout) -> None:\n headers = [\n (b\":method\", request.method.encode(\"ascii\")),\n (b\":authority\", request.url.authority.encode(\"ascii\")),\n (b\":scheme\", request.url.scheme.encode(\"ascii\")),\n (b\":path\", request.url.full_path.encode(\"ascii\")),\n ] + [(k, v) for k, v in request.headers.raw if k != b\"host\"]\n\n logger.trace(\n f\"send_headers \"\n f\"stream_id={self.stream_id} \"\n f\"method={request.method!r} \"\n f\"target={request.url.full_path!r} \"\n f\"headers={headers!r}\"\n )\n await self.connection.send_headers(self.stream_id, headers, timeout)\n\n async def send_body(self, request: Request, timeout: Timeout) -> None:\n logger.trace(f\"send_body stream_id={self.stream_id}\")\n async for data in request.stream():\n while data:\n max_flow = await self.connection.wait_for_outgoing_flow(\n self.stream_id, timeout\n )\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n await self.connection.send_data(self.stream_id, chunk, timeout)\n\n await self.connection.end_stream(self.stream_id, timeout)\n\n async def receive_response(\n self, timeout: Timeout\n ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:\n \"\"\"\n Read the response status and headers from the network.\n \"\"\"\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.ResponseReceived):\n break\n\n status_code = 200\n headers = []\n for k, v in event.headers:\n if k == b\":status\":\n status_code = int(v.decode(\"ascii\", errors=\"ignore\"))\n elif not k.startswith(b\":\"):\n headers.append((k, v))\n\n return (status_code, headers)\n\n async def body_iter(self, timeout: Timeout) -> typing.AsyncIterator[bytes]:\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.DataReceived):\n amount = event.flow_controlled_length\n await self.connection.acknowledge_received_data(\n self.stream_id, amount, timeout\n )\n yield event.data\n elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):\n break\n\n async def close(self) -> None:\n await self.connection.close_stream(self.stream_id)\n", "path": "httpx/dispatch/http2.py"}], "after_files": [{"content": "import typing\n\nimport h2.connection\nimport h2.events\nfrom h2.settings import SettingCodes, Settings\n\nfrom ..concurrency.base import (\n BaseEvent,\n BaseSocketStream,\n ConcurrencyBackend,\n lookup_backend,\n)\nfrom ..config import Timeout\nfrom ..exceptions import ProtocolError\nfrom ..models import Request, Response\nfrom ..utils import get_logger\nfrom .base import OpenConnection\n\nlogger = get_logger(__name__)\n\n\nclass HTTP2Connection(OpenConnection):\n READ_NUM_BYTES = 4096\n\n def __init__(\n self,\n socket: BaseSocketStream,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n on_release: typing.Callable = None,\n ):\n self.socket = socket\n self.backend = lookup_backend(backend)\n self.on_release = on_release\n self.state = h2.connection.H2Connection()\n\n self.streams = {} # type: typing.Dict[int, HTTP2Stream]\n self.events = {} # type: typing.Dict[int, typing.List[h2.events.Event]]\n\n self.init_started = False\n\n @property\n def is_http2(self) -> bool:\n return True\n\n @property\n def init_complete(self) -> BaseEvent:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_initialization_complete\"):\n self._initialization_complete = self.backend.create_event()\n return self._initialization_complete\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n timeout = Timeout() if timeout is None else timeout\n\n if not self.init_started:\n # The very first stream is responsible for initiating the connection.\n self.init_started = True\n await self.send_connection_init(timeout)\n stream_id = self.state.get_next_available_stream_id()\n self.init_complete.set()\n else:\n # All other streams need to wait until the connection is established.\n await self.init_complete.wait()\n stream_id = self.state.get_next_available_stream_id()\n\n stream = HTTP2Stream(stream_id=stream_id, connection=self)\n self.streams[stream_id] = stream\n self.events[stream_id] = []\n return await stream.send(request, timeout)\n\n async def send_connection_init(self, timeout: Timeout) -> None:\n \"\"\"\n The HTTP/2 connection requires some initial setup before we can start\n using individual request/response streams on it.\n \"\"\"\n\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self.state.local_settings = Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self.state.local_settings[h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL]\n\n self.state.initiate_connection()\n self.state.increment_flow_control_window(2 ** 24)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n @property\n def is_closed(self) -> bool:\n return False\n\n def is_connection_dropped(self) -> bool:\n return self.socket.is_connection_dropped()\n\n async def close(self) -> None:\n await self.socket.close()\n\n async def wait_for_outgoing_flow(self, stream_id: int, timeout: Timeout) -> int:\n \"\"\"\n Returns the maximum allowable outgoing flow for a given stream.\n\n If the allowable flow is zero, then waits on the network until\n WindowUpdated frames have increased the flow rate.\n\n https://tools.ietf.org/html/rfc7540#section-6.9\n \"\"\"\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n while flow == 0:\n await self.receive_events(timeout)\n local_flow = self.state.local_flow_control_window(stream_id)\n connection_flow = self.state.max_outbound_frame_size\n flow = min(local_flow, connection_flow)\n return flow\n\n async def wait_for_event(self, stream_id: int, timeout: Timeout) -> h2.events.Event:\n \"\"\"\n Returns the next event for a given stream.\n\n If no events are available yet, then waits on the network until\n an event is available.\n \"\"\"\n while not self.events[stream_id]:\n await self.receive_events(timeout)\n return self.events[stream_id].pop(0)\n\n async def receive_events(self, timeout: Timeout) -> None:\n \"\"\"\n Read some data from the network, and update the H2 state.\n \"\"\"\n data = await self.socket.read(self.READ_NUM_BYTES, timeout)\n events = self.state.receive_data(data)\n for event in events:\n event_stream_id = getattr(event, \"stream_id\", 0)\n logger.trace(f\"receive_event stream_id={event_stream_id} event={event!r}\")\n\n if hasattr(event, \"error_code\"):\n raise ProtocolError(event)\n\n if event_stream_id in self.events:\n self.events[event_stream_id].append(event)\n\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_headers(\n self,\n stream_id: int,\n headers: typing.List[typing.Tuple[bytes, bytes]],\n timeout: Timeout,\n ) -> None:\n self.state.send_headers(stream_id, headers)\n self.state.increment_flow_control_window(2 ** 24, stream_id=stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def send_data(self, stream_id: int, chunk: bytes, timeout: Timeout) -> None:\n self.state.send_data(stream_id, chunk)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def end_stream(self, stream_id: int, timeout: Timeout) -> None:\n self.state.end_stream(stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def acknowledge_received_data(\n self, stream_id: int, amount: int, timeout: Timeout\n ) -> None:\n self.state.acknowledge_received_data(amount, stream_id)\n data_to_send = self.state.data_to_send()\n await self.socket.write(data_to_send, timeout)\n\n async def close_stream(self, stream_id: int) -> None:\n del self.streams[stream_id]\n del self.events[stream_id]\n\n if not self.streams and self.on_release is not None:\n await self.on_release()\n\n\nclass HTTP2Stream:\n def __init__(self, stream_id: int, connection: HTTP2Connection) -> None:\n self.stream_id = stream_id\n self.connection = connection\n\n async def send(self, request: Request, timeout: Timeout) -> Response:\n # Send the request.\n await self.send_headers(request, timeout)\n await self.send_body(request, timeout)\n\n # Receive the response.\n status_code, headers = await self.receive_response(timeout)\n content = self.body_iter(timeout)\n return Response(\n status_code=status_code,\n http_version=\"HTTP/2\",\n headers=headers,\n content=content,\n on_close=self.close,\n request=request,\n )\n\n async def send_headers(self, request: Request, timeout: Timeout) -> None:\n headers = [\n (b\":method\", request.method.encode(\"ascii\")),\n (b\":authority\", request.url.authority.encode(\"ascii\")),\n (b\":scheme\", request.url.scheme.encode(\"ascii\")),\n (b\":path\", request.url.full_path.encode(\"ascii\")),\n ] + [(k, v) for k, v in request.headers.raw if k != b\"host\"]\n\n logger.trace(\n f\"send_headers \"\n f\"stream_id={self.stream_id} \"\n f\"method={request.method!r} \"\n f\"target={request.url.full_path!r} \"\n f\"headers={headers!r}\"\n )\n await self.connection.send_headers(self.stream_id, headers, timeout)\n\n async def send_body(self, request: Request, timeout: Timeout) -> None:\n logger.trace(f\"send_body stream_id={self.stream_id}\")\n async for data in request.stream():\n while data:\n max_flow = await self.connection.wait_for_outgoing_flow(\n self.stream_id, timeout\n )\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n await self.connection.send_data(self.stream_id, chunk, timeout)\n\n await self.connection.end_stream(self.stream_id, timeout)\n\n async def receive_response(\n self, timeout: Timeout\n ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:\n \"\"\"\n Read the response status and headers from the network.\n \"\"\"\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.ResponseReceived):\n break\n\n status_code = 200\n headers = []\n for k, v in event.headers:\n if k == b\":status\":\n status_code = int(v.decode(\"ascii\", errors=\"ignore\"))\n elif not k.startswith(b\":\"):\n headers.append((k, v))\n\n return (status_code, headers)\n\n async def body_iter(self, timeout: Timeout) -> typing.AsyncIterator[bytes]:\n while True:\n event = await self.connection.wait_for_event(self.stream_id, timeout)\n if isinstance(event, h2.events.DataReceived):\n amount = event.flow_controlled_length\n await self.connection.acknowledge_received_data(\n self.stream_id, amount, timeout\n )\n yield event.data\n elif isinstance(event, (h2.events.StreamEnded, h2.events.StreamReset)):\n break\n\n async def close(self) -> None:\n await self.connection.close_stream(self.stream_id)\n", "path": "httpx/dispatch/http2.py"}]} | 3,453 | 192 |
gh_patches_debug_23492 | rasdani/github-patches | git_diff | getsentry__sentry-45670 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crons: Increase quota call API Sentry monitors "check-in"
### Problem Statement
Due multi-tenancy was reached quota call API Sentry monitors "check-in":
"You are attempting to use this endpoint too frequently. Limit is 40 requests in 1 seconds"
### Solution Brainstorm
Increase quota call API Sentry monitors "check-in" at least to 60 requests in 1 seconds
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py`
Content:
```
1 from __future__ import annotations
2
3 from django.db import transaction
4 from drf_spectacular.utils import extend_schema
5 from rest_framework.exceptions import Throttled
6 from rest_framework.request import Request
7 from rest_framework.response import Response
8
9 from sentry import ratelimits
10 from sentry.api.base import region_silo_endpoint
11 from sentry.api.serializers import serialize
12 from sentry.apidocs.constants import (
13 RESPONSE_BAD_REQUEST,
14 RESPONSE_FORBIDDEN,
15 RESPONSE_NOTFOUND,
16 RESPONSE_UNAUTHORIZED,
17 )
18 from sentry.apidocs.parameters import GLOBAL_PARAMS, MONITOR_PARAMS
19 from sentry.apidocs.utils import inline_sentry_response_serializer
20 from sentry.models import Environment, Project, ProjectKey
21 from sentry.monitors.models import (
22 CheckInStatus,
23 Monitor,
24 MonitorCheckIn,
25 MonitorEnvironment,
26 MonitorStatus,
27 )
28 from sentry.monitors.serializers import MonitorCheckInSerializerResponse
29 from sentry.monitors.validators import MonitorCheckInValidator
30 from sentry.signals import first_cron_checkin_received, first_cron_monitor_created
31 from sentry.utils import metrics
32
33 from .base import MonitorIngestEndpoint
34
35 CHECKIN_QUOTA_LIMIT = 5
36 CHECKIN_QUOTA_WINDOW = 60
37
38
39 @region_silo_endpoint
40 @extend_schema(tags=["Crons"])
41 class MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):
42 public = {"POST"}
43
44 @extend_schema(
45 operation_id="Create a new check-in",
46 parameters=[
47 GLOBAL_PARAMS.ORG_SLUG,
48 MONITOR_PARAMS.MONITOR_ID,
49 ],
50 request=MonitorCheckInValidator,
51 responses={
52 200: inline_sentry_response_serializer(
53 "MonitorCheckIn", MonitorCheckInSerializerResponse
54 ),
55 201: inline_sentry_response_serializer(
56 "MonitorCheckIn", MonitorCheckInSerializerResponse
57 ),
58 400: RESPONSE_BAD_REQUEST,
59 401: RESPONSE_UNAUTHORIZED,
60 403: RESPONSE_FORBIDDEN,
61 404: RESPONSE_NOTFOUND,
62 },
63 )
64 def post(
65 self, request: Request, project, monitor, organization_slug: str | None = None
66 ) -> Response:
67 """
68 Creates a new check-in for a monitor.
69
70 If `status` is not present, it will be assumed that the check-in is starting, and be marked as `in_progress`.
71
72 To achieve a ping-like behavior, you can simply define `status` and optionally `duration` and
73 this check-in will be automatically marked as finished.
74
75 Note: If a DSN is utilized for authentication, the response will be limited in details.
76 """
77 if monitor.status in [MonitorStatus.PENDING_DELETION, MonitorStatus.DELETION_IN_PROGRESS]:
78 return self.respond(status=404)
79
80 serializer = MonitorCheckInValidator(
81 data=request.data, context={"project": project, "request": request}
82 )
83 if not serializer.is_valid():
84 return self.respond(serializer.errors, status=400)
85
86 if ratelimits.is_limited(
87 f"monitor-checkins:{monitor.id}",
88 limit=CHECKIN_QUOTA_LIMIT,
89 window=CHECKIN_QUOTA_WINDOW,
90 ):
91 metrics.incr("monitors.checkin.dropped.ratelimited")
92 raise Throttled(
93 detail="Rate limited, please send no more than 5 checkins per minute per monitor"
94 )
95
96 result = serializer.validated_data
97
98 with transaction.atomic():
99 environment_name = result.get("environment")
100 if not environment_name:
101 environment_name = "production"
102
103 environment = Environment.get_or_create(project=project, name=environment_name)
104
105 monitorenvironment_defaults = {
106 "status": monitor.status,
107 "next_checkin": monitor.next_checkin,
108 "last_checkin": monitor.last_checkin,
109 }
110 monitor_environment = MonitorEnvironment.objects.get_or_create(
111 monitor=monitor, environment=environment, defaults=monitorenvironment_defaults
112 )[0]
113
114 checkin = MonitorCheckIn.objects.create(
115 project_id=project.id,
116 monitor_id=monitor.id,
117 monitor_environment=monitor_environment,
118 duration=result.get("duration"),
119 status=getattr(CheckInStatus, result["status"].upper()),
120 )
121
122 if not project.flags.has_cron_checkins:
123 # Backfill users that already have cron monitors
124 if not project.flags.has_cron_monitors:
125 first_cron_monitor_created.send_robust(
126 project=project, user=None, sender=Project
127 )
128 first_cron_checkin_received.send_robust(
129 project=project, monitor_id=str(monitor.guid), sender=Project
130 )
131
132 if checkin.status == CheckInStatus.ERROR and monitor.status != MonitorStatus.DISABLED:
133 monitor_failed = monitor.mark_failed(last_checkin=checkin.date_added)
134 monitor_environment.mark_failed(last_checkin=checkin.date_added)
135 if not monitor_failed:
136 if isinstance(request.auth, ProjectKey):
137 return self.respond(status=200)
138 return self.respond(serialize(checkin, request.user), status=200)
139 else:
140 monitor_params = {
141 "last_checkin": checkin.date_added,
142 "next_checkin": monitor.get_next_scheduled_checkin(checkin.date_added),
143 }
144 if checkin.status == CheckInStatus.OK and monitor.status != MonitorStatus.DISABLED:
145 monitor_params["status"] = MonitorStatus.OK
146 Monitor.objects.filter(id=monitor.id).exclude(
147 last_checkin__gt=checkin.date_added
148 ).update(**monitor_params)
149 MonitorEnvironment.objects.filter(id=monitor_environment.id).exclude(
150 last_checkin__gt=checkin.date_added
151 ).update(**monitor_params)
152
153 if isinstance(request.auth, ProjectKey):
154 return self.respond({"id": str(checkin.guid)}, status=201)
155
156 response = self.respond(serialize(checkin, request.user), status=201)
157 # TODO(dcramer): this should return a single aboslute uri, aka ALWAYS including org domains if enabled
158 # TODO(dcramer): both of these are patterns that we should make easier to accomplish in other endpoints
159 response["Link"] = self.build_link_header(request, "checkins/latest/", rel="latest")
160 response["Location"] = request.build_absolute_uri(f"checkins/{checkin.guid}/")
161 return response
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
--- a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
+++ b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
@@ -27,7 +27,9 @@
)
from sentry.monitors.serializers import MonitorCheckInSerializerResponse
from sentry.monitors.validators import MonitorCheckInValidator
+from sentry.ratelimits.config import RateLimitConfig
from sentry.signals import first_cron_checkin_received, first_cron_monitor_created
+from sentry.types.ratelimit import RateLimit, RateLimitCategory
from sentry.utils import metrics
from .base import MonitorIngestEndpoint
@@ -41,6 +43,16 @@
class MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):
public = {"POST"}
+ rate_limits = RateLimitConfig(
+ limit_overrides={
+ "POST": {
+ RateLimitCategory.IP: RateLimit(40 * 60, 60),
+ RateLimitCategory.USER: RateLimit(40 * 60, 60),
+ RateLimitCategory.ORGANIZATION: RateLimit(40 * 60, 60),
+ }
+ },
+ )
+
@extend_schema(
operation_id="Create a new check-in",
parameters=[
| {"golden_diff": "diff --git a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py\n--- a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py\n+++ b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py\n@@ -27,7 +27,9 @@\n )\n from sentry.monitors.serializers import MonitorCheckInSerializerResponse\n from sentry.monitors.validators import MonitorCheckInValidator\n+from sentry.ratelimits.config import RateLimitConfig\n from sentry.signals import first_cron_checkin_received, first_cron_monitor_created\n+from sentry.types.ratelimit import RateLimit, RateLimitCategory\n from sentry.utils import metrics\n \n from .base import MonitorIngestEndpoint\n@@ -41,6 +43,16 @@\n class MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):\n public = {\"POST\"}\n \n+ rate_limits = RateLimitConfig(\n+ limit_overrides={\n+ \"POST\": {\n+ RateLimitCategory.IP: RateLimit(40 * 60, 60),\n+ RateLimitCategory.USER: RateLimit(40 * 60, 60),\n+ RateLimitCategory.ORGANIZATION: RateLimit(40 * 60, 60),\n+ }\n+ },\n+ )\n+\n @extend_schema(\n operation_id=\"Create a new check-in\",\n parameters=[\n", "issue": "Crons: Increase quota call API Sentry monitors \"check-in\"\n### Problem Statement\n\nDue multi-tenancy was reached quota call API Sentry monitors \"check-in\":\r\n\"You are attempting to use this endpoint too frequently. Limit is 40 requests in 1 seconds\"\n\n### Solution Brainstorm\n\nIncrease quota call API Sentry monitors \"check-in\" at least to 60 requests in 1 seconds\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom django.db import transaction\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.exceptions import Throttled\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import ratelimits\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.apidocs.constants import (\n RESPONSE_BAD_REQUEST,\n RESPONSE_FORBIDDEN,\n RESPONSE_NOTFOUND,\n RESPONSE_UNAUTHORIZED,\n)\nfrom sentry.apidocs.parameters import GLOBAL_PARAMS, MONITOR_PARAMS\nfrom sentry.apidocs.utils import inline_sentry_response_serializer\nfrom sentry.models import Environment, Project, ProjectKey\nfrom sentry.monitors.models import (\n CheckInStatus,\n Monitor,\n MonitorCheckIn,\n MonitorEnvironment,\n MonitorStatus,\n)\nfrom sentry.monitors.serializers import MonitorCheckInSerializerResponse\nfrom sentry.monitors.validators import MonitorCheckInValidator\nfrom sentry.signals import first_cron_checkin_received, first_cron_monitor_created\nfrom sentry.utils import metrics\n\nfrom .base import MonitorIngestEndpoint\n\nCHECKIN_QUOTA_LIMIT = 5\nCHECKIN_QUOTA_WINDOW = 60\n\n\n@region_silo_endpoint\n@extend_schema(tags=[\"Crons\"])\nclass MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):\n public = {\"POST\"}\n\n @extend_schema(\n operation_id=\"Create a new check-in\",\n parameters=[\n GLOBAL_PARAMS.ORG_SLUG,\n MONITOR_PARAMS.MONITOR_ID,\n ],\n request=MonitorCheckInValidator,\n responses={\n 200: inline_sentry_response_serializer(\n \"MonitorCheckIn\", MonitorCheckInSerializerResponse\n ),\n 201: inline_sentry_response_serializer(\n \"MonitorCheckIn\", MonitorCheckInSerializerResponse\n ),\n 400: RESPONSE_BAD_REQUEST,\n 401: RESPONSE_UNAUTHORIZED,\n 403: RESPONSE_FORBIDDEN,\n 404: RESPONSE_NOTFOUND,\n },\n )\n def post(\n self, request: Request, project, monitor, organization_slug: str | None = None\n ) -> Response:\n \"\"\"\n Creates a new check-in for a monitor.\n\n If `status` is not present, it will be assumed that the check-in is starting, and be marked as `in_progress`.\n\n To achieve a ping-like behavior, you can simply define `status` and optionally `duration` and\n this check-in will be automatically marked as finished.\n\n Note: If a DSN is utilized for authentication, the response will be limited in details.\n \"\"\"\n if monitor.status in [MonitorStatus.PENDING_DELETION, MonitorStatus.DELETION_IN_PROGRESS]:\n return self.respond(status=404)\n\n serializer = MonitorCheckInValidator(\n data=request.data, context={\"project\": project, \"request\": request}\n )\n if not serializer.is_valid():\n return self.respond(serializer.errors, status=400)\n\n if ratelimits.is_limited(\n f\"monitor-checkins:{monitor.id}\",\n limit=CHECKIN_QUOTA_LIMIT,\n window=CHECKIN_QUOTA_WINDOW,\n ):\n metrics.incr(\"monitors.checkin.dropped.ratelimited\")\n raise Throttled(\n detail=\"Rate limited, please send no more than 5 checkins per minute per monitor\"\n )\n\n result = serializer.validated_data\n\n with transaction.atomic():\n environment_name = result.get(\"environment\")\n if not environment_name:\n environment_name = \"production\"\n\n environment = Environment.get_or_create(project=project, name=environment_name)\n\n monitorenvironment_defaults = {\n \"status\": monitor.status,\n \"next_checkin\": monitor.next_checkin,\n \"last_checkin\": monitor.last_checkin,\n }\n monitor_environment = MonitorEnvironment.objects.get_or_create(\n monitor=monitor, environment=environment, defaults=monitorenvironment_defaults\n )[0]\n\n checkin = MonitorCheckIn.objects.create(\n project_id=project.id,\n monitor_id=monitor.id,\n monitor_environment=monitor_environment,\n duration=result.get(\"duration\"),\n status=getattr(CheckInStatus, result[\"status\"].upper()),\n )\n\n if not project.flags.has_cron_checkins:\n # Backfill users that already have cron monitors\n if not project.flags.has_cron_monitors:\n first_cron_monitor_created.send_robust(\n project=project, user=None, sender=Project\n )\n first_cron_checkin_received.send_robust(\n project=project, monitor_id=str(monitor.guid), sender=Project\n )\n\n if checkin.status == CheckInStatus.ERROR and monitor.status != MonitorStatus.DISABLED:\n monitor_failed = monitor.mark_failed(last_checkin=checkin.date_added)\n monitor_environment.mark_failed(last_checkin=checkin.date_added)\n if not monitor_failed:\n if isinstance(request.auth, ProjectKey):\n return self.respond(status=200)\n return self.respond(serialize(checkin, request.user), status=200)\n else:\n monitor_params = {\n \"last_checkin\": checkin.date_added,\n \"next_checkin\": monitor.get_next_scheduled_checkin(checkin.date_added),\n }\n if checkin.status == CheckInStatus.OK and monitor.status != MonitorStatus.DISABLED:\n monitor_params[\"status\"] = MonitorStatus.OK\n Monitor.objects.filter(id=monitor.id).exclude(\n last_checkin__gt=checkin.date_added\n ).update(**monitor_params)\n MonitorEnvironment.objects.filter(id=monitor_environment.id).exclude(\n last_checkin__gt=checkin.date_added\n ).update(**monitor_params)\n\n if isinstance(request.auth, ProjectKey):\n return self.respond({\"id\": str(checkin.guid)}, status=201)\n\n response = self.respond(serialize(checkin, request.user), status=201)\n # TODO(dcramer): this should return a single aboslute uri, aka ALWAYS including org domains if enabled\n # TODO(dcramer): both of these are patterns that we should make easier to accomplish in other endpoints\n response[\"Link\"] = self.build_link_header(request, \"checkins/latest/\", rel=\"latest\")\n response[\"Location\"] = request.build_absolute_uri(f\"checkins/{checkin.guid}/\")\n return response\n", "path": "src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom django.db import transaction\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.exceptions import Throttled\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import ratelimits\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.apidocs.constants import (\n RESPONSE_BAD_REQUEST,\n RESPONSE_FORBIDDEN,\n RESPONSE_NOTFOUND,\n RESPONSE_UNAUTHORIZED,\n)\nfrom sentry.apidocs.parameters import GLOBAL_PARAMS, MONITOR_PARAMS\nfrom sentry.apidocs.utils import inline_sentry_response_serializer\nfrom sentry.models import Environment, Project, ProjectKey\nfrom sentry.monitors.models import (\n CheckInStatus,\n Monitor,\n MonitorCheckIn,\n MonitorEnvironment,\n MonitorStatus,\n)\nfrom sentry.monitors.serializers import MonitorCheckInSerializerResponse\nfrom sentry.monitors.validators import MonitorCheckInValidator\nfrom sentry.ratelimits.config import RateLimitConfig\nfrom sentry.signals import first_cron_checkin_received, first_cron_monitor_created\nfrom sentry.types.ratelimit import RateLimit, RateLimitCategory\nfrom sentry.utils import metrics\n\nfrom .base import MonitorIngestEndpoint\n\nCHECKIN_QUOTA_LIMIT = 5\nCHECKIN_QUOTA_WINDOW = 60\n\n\n@region_silo_endpoint\n@extend_schema(tags=[\"Crons\"])\nclass MonitorIngestCheckInIndexEndpoint(MonitorIngestEndpoint):\n public = {\"POST\"}\n\n rate_limits = RateLimitConfig(\n limit_overrides={\n \"POST\": {\n RateLimitCategory.IP: RateLimit(40 * 60, 60),\n RateLimitCategory.USER: RateLimit(40 * 60, 60),\n RateLimitCategory.ORGANIZATION: RateLimit(40 * 60, 60),\n }\n },\n )\n\n @extend_schema(\n operation_id=\"Create a new check-in\",\n parameters=[\n GLOBAL_PARAMS.ORG_SLUG,\n MONITOR_PARAMS.MONITOR_ID,\n ],\n request=MonitorCheckInValidator,\n responses={\n 200: inline_sentry_response_serializer(\n \"MonitorCheckIn\", MonitorCheckInSerializerResponse\n ),\n 201: inline_sentry_response_serializer(\n \"MonitorCheckIn\", MonitorCheckInSerializerResponse\n ),\n 400: RESPONSE_BAD_REQUEST,\n 401: RESPONSE_UNAUTHORIZED,\n 403: RESPONSE_FORBIDDEN,\n 404: RESPONSE_NOTFOUND,\n },\n )\n def post(\n self, request: Request, project, monitor, organization_slug: str | None = None\n ) -> Response:\n \"\"\"\n Creates a new check-in for a monitor.\n\n If `status` is not present, it will be assumed that the check-in is starting, and be marked as `in_progress`.\n\n To achieve a ping-like behavior, you can simply define `status` and optionally `duration` and\n this check-in will be automatically marked as finished.\n\n Note: If a DSN is utilized for authentication, the response will be limited in details.\n \"\"\"\n if monitor.status in [MonitorStatus.PENDING_DELETION, MonitorStatus.DELETION_IN_PROGRESS]:\n return self.respond(status=404)\n\n serializer = MonitorCheckInValidator(\n data=request.data, context={\"project\": project, \"request\": request}\n )\n if not serializer.is_valid():\n return self.respond(serializer.errors, status=400)\n\n if ratelimits.is_limited(\n f\"monitor-checkins:{monitor.id}\",\n limit=CHECKIN_QUOTA_LIMIT,\n window=CHECKIN_QUOTA_WINDOW,\n ):\n metrics.incr(\"monitors.checkin.dropped.ratelimited\")\n raise Throttled(\n detail=\"Rate limited, please send no more than 5 checkins per minute per monitor\"\n )\n\n result = serializer.validated_data\n\n with transaction.atomic():\n environment_name = result.get(\"environment\")\n if not environment_name:\n environment_name = \"production\"\n\n environment = Environment.get_or_create(project=project, name=environment_name)\n\n monitorenvironment_defaults = {\n \"status\": monitor.status,\n \"next_checkin\": monitor.next_checkin,\n \"last_checkin\": monitor.last_checkin,\n }\n monitor_environment = MonitorEnvironment.objects.get_or_create(\n monitor=monitor, environment=environment, defaults=monitorenvironment_defaults\n )[0]\n\n checkin = MonitorCheckIn.objects.create(\n project_id=project.id,\n monitor_id=monitor.id,\n monitor_environment=monitor_environment,\n duration=result.get(\"duration\"),\n status=getattr(CheckInStatus, result[\"status\"].upper()),\n )\n\n if not project.flags.has_cron_checkins:\n # Backfill users that already have cron monitors\n if not project.flags.has_cron_monitors:\n first_cron_monitor_created.send_robust(\n project=project, user=None, sender=Project\n )\n first_cron_checkin_received.send_robust(\n project=project, monitor_id=str(monitor.guid), sender=Project\n )\n\n if checkin.status == CheckInStatus.ERROR and monitor.status != MonitorStatus.DISABLED:\n monitor_failed = monitor.mark_failed(last_checkin=checkin.date_added)\n monitor_environment.mark_failed(last_checkin=checkin.date_added)\n if not monitor_failed:\n if isinstance(request.auth, ProjectKey):\n return self.respond(status=200)\n return self.respond(serialize(checkin, request.user), status=200)\n else:\n monitor_params = {\n \"last_checkin\": checkin.date_added,\n \"next_checkin\": monitor.get_next_scheduled_checkin(checkin.date_added),\n }\n if checkin.status == CheckInStatus.OK and monitor.status != MonitorStatus.DISABLED:\n monitor_params[\"status\"] = MonitorStatus.OK\n Monitor.objects.filter(id=monitor.id).exclude(\n last_checkin__gt=checkin.date_added\n ).update(**monitor_params)\n MonitorEnvironment.objects.filter(id=monitor_environment.id).exclude(\n last_checkin__gt=checkin.date_added\n ).update(**monitor_params)\n\n if isinstance(request.auth, ProjectKey):\n return self.respond({\"id\": str(checkin.guid)}, status=201)\n\n response = self.respond(serialize(checkin, request.user), status=201)\n # TODO(dcramer): this should return a single aboslute uri, aka ALWAYS including org domains if enabled\n # TODO(dcramer): both of these are patterns that we should make easier to accomplish in other endpoints\n response[\"Link\"] = self.build_link_header(request, \"checkins/latest/\", rel=\"latest\")\n response[\"Location\"] = request.build_absolute_uri(f\"checkins/{checkin.guid}/\")\n return response\n", "path": "src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py"}]} | 2,096 | 327 |
gh_patches_debug_22228 | rasdani/github-patches | git_diff | iterative__dvc-2337 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Two test_info_* tests are failing on tip of master
At the [tip of master right now](https://github.com/iterative/dvc/commit/32425e90691bfd4988eb0a2d70cdc4fdba910f49), the two tests `test_info_outside_of_repo` and `test_info_in_repo` are failing for me (but not in CI, so I'm guessing this is environment-specific), because the `dvc version` output does not contain the "Filesystem type" lines that the tests are expecting.
I'm on macOS 10.14.5 and running Python 3.7.3 installed with pyenv, in a fresh virtualenv configured as specified in https://dvc.org/doc/user-guide/contributing
Cut-n-pasted output from `python -m tests`:
```
______________________________________________________________________ test_info_outside_of_repo ______________________________________________________________________
[gw2] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python
repo_dir = <tests.basic_env.TestDirFixture object at 0x129f640b8>, caplog = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>
def test_info_outside_of_repo(repo_dir, caplog):
assert main(["version"]) == 0
assert re.search(re.compile(r"DVC version: \d+\.\d+\.\d+"), caplog.text)
assert re.search(re.compile(r"Python version: \d\.\d\.\d"), caplog.text)
assert re.search(re.compile(r"Platform: .*"), caplog.text)
assert re.search(re.compile(r"Binary: (True|False)"), caplog.text)
> assert re.search(
re.compile(r"Filesystem type \(workspace\): .*"), caplog.text
)
E AssertionError: assert None
E + where None = <function search at 0x109ed6ea0>(re.compile('Filesystem type \\(workspace\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n')
E + where <function search at 0x109ed6ea0> = re.search
E + and re.compile('Filesystem type \\(workspace\\): .*') = <function compile at 0x109ed7268>('Filesystem type \\(workspace\\): .*')
E + where <function compile at 0x109ed7268> = re.compile
E + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n' = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>.text
/Users/gbr/git/dvc/tests/func/test_version.py:31: AssertionError
------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------
-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------
INFO dvc.command.version:version.py:65 DVC version: 0.53.2
Python version: 3.7.3
Platform: Darwin-18.6.0-x86_64-i386-64bit
Binary: False
---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------
------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------
DEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40553.vmgjwy8n.3wkYFuPE6JggPk5TfN6ZXU'
__________________________________________________________________________ test_info_in_repo __________________________________________________________________________
[gw1] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python
dvc_repo = Repo: '/private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'
caplog = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>
def test_info_in_repo(dvc_repo, caplog):
assert main(["version"]) == 0
assert re.search(re.compile(r"DVC version: \d+\.\d+\.\d+"), caplog.text)
assert re.search(re.compile(r"Python version: \d\.\d\.\d"), caplog.text)
assert re.search(re.compile(r"Platform: .*"), caplog.text)
assert re.search(re.compile(r"Binary: (True|False)"), caplog.text)
> assert re.search(
re.compile(r"Filesystem type \(cache directory\): .*"), caplog.text
)
E AssertionError: assert None
E + where None = <function search at 0x104e41ea0>(re.compile('Filesystem type \\(cache directory\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n')
E + where <function search at 0x104e41ea0> = re.search
E + and re.compile('Filesystem type \\(cache directory\\): .*') = <function compile at 0x104e42268>('Filesystem type \\(cache directory\\): .*')
E + where <function compile at 0x104e42268> = re.compile
E + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\n Binary: False\n' = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>.text
/Users/gbr/git/dvc/tests/func/test_version.py:13: AssertionError
------------------------------------------------------------------------ Captured stdout setup ------------------------------------------------------------------------
What's next?
------------
- Check out the documentation: https://dvc.org/doc
- Get help and share ideas: https://dvc.org/chat
- Star us on GitHub: https://github.com/iterative/dvc
------------------------------------------------------------------------- Captured log setup --------------------------------------------------------------------------
INFO dvc.repo.init:init.py:40 What's next?
------------
- Check out the documentation: https://dvc.org/doc
- Get help and share ideas: https://dvc.org/chat
- Star us on GitHub: https://github.com/iterative/dvc
------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------
DVC version: 0.53.2
Python version: 3.7.3
Platform: Darwin-18.6.0-x86_64-i386-64bit
Binary: False
-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------
INFO dvc.command.version:version.py:65 DVC version: 0.53.2
Python version: 3.7.3
Platform: Darwin-18.6.0-x86_64-i386-64bit
Binary: False
---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------
DEBUG: Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'
------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------
DEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'
[...]
==== 2 failed, 619 passed, 41 skipped, 10 warnings in 73.48 seconds ====
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/command/version.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import os
4 import platform
5 import argparse
6 import logging
7 import uuid
8
9 try:
10 import psutil
11 except ImportError:
12 psutil = None
13
14 from dvc.utils import is_binary
15 from dvc.utils.compat import pathlib
16 from dvc.repo import Repo
17 from dvc.command.base import CmdBaseNoRepo, append_doc_link
18 from dvc.version import __version__
19 from dvc.exceptions import DvcException, NotDvcRepoError
20 from dvc.system import System
21
22
23 logger = logging.getLogger(__name__)
24
25
26 class CmdVersion(CmdBaseNoRepo):
27 def run(self):
28 dvc_version = __version__
29 python_version = platform.python_version()
30 platform_type = platform.platform()
31 binary = is_binary()
32
33 info = (
34 "DVC version: {dvc_version}\n"
35 "Python version: {python_version}\n"
36 "Platform: {platform_type}\n"
37 "Binary: {binary}\n"
38 ).format(
39 dvc_version=dvc_version,
40 python_version=python_version,
41 platform_type=platform_type,
42 binary=binary,
43 )
44
45 if psutil:
46 try:
47 repo = Repo()
48 root_directory = repo.root_dir
49
50 info += (
51 "Cache: {cache}\n"
52 "Filesystem type (cache directory): {fs_cache}\n"
53 ).format(
54 cache=self.get_linktype_support_info(repo),
55 fs_cache=self.get_fs_type(repo.cache.local.cache_dir),
56 )
57
58 except NotDvcRepoError:
59 root_directory = os.getcwd()
60
61 info += ("Filesystem type (workspace): {fs_root}").format(
62 fs_root=self.get_fs_type(os.path.abspath(root_directory))
63 )
64
65 logger.info(info)
66 return 0
67
68 @staticmethod
69 def get_fs_type(path):
70 partition = {
71 pathlib.Path(part.mountpoint): (part.fstype, part.device)
72 for part in psutil.disk_partitions()
73 }
74 for parent in pathlib.Path(path).parents:
75 if parent in partition:
76 return partition[parent]
77 return ("unkown", "none")
78
79 @staticmethod
80 def get_linktype_support_info(repo):
81 links = {
82 "reflink": System.reflink,
83 "hardlink": System.hardlink,
84 "symlink": System.symlink,
85 }
86
87 fname = "." + str(uuid.uuid4())
88 src = os.path.join(repo.cache.local.cache_dir, fname)
89 open(src, "w").close()
90 dst = os.path.join(repo.root_dir, fname)
91
92 cache = []
93
94 for name, link in links.items():
95 try:
96 link(src, dst)
97 os.unlink(dst)
98 supported = True
99 except DvcException:
100 supported = False
101 cache.append(
102 "{name} - {supported}".format(
103 name=name, supported=True if supported else False
104 )
105 )
106 os.remove(src)
107
108 return ", ".join(cache)
109
110
111 def add_parser(subparsers, parent_parser):
112 VERSION_HELP = "Show DVC version and system/environment informaion."
113
114 version_parser = subparsers.add_parser(
115 "version",
116 parents=[parent_parser],
117 description=append_doc_link(VERSION_HELP, "version"),
118 help=VERSION_HELP,
119 formatter_class=argparse.RawDescriptionHelpFormatter,
120 )
121 version_parser.set_defaults(func=CmdVersion)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/command/version.py b/dvc/command/version.py
--- a/dvc/command/version.py
+++ b/dvc/command/version.py
@@ -42,22 +42,22 @@
binary=binary,
)
- if psutil:
- try:
- repo = Repo()
- root_directory = repo.root_dir
+ try:
+ repo = Repo()
+ root_directory = repo.root_dir
+
+ info += "Cache: {cache}\n".format(
+ cache=self.get_linktype_support_info(repo)
+ )
+ if psutil:
info += (
- "Cache: {cache}\n"
"Filesystem type (cache directory): {fs_cache}\n"
- ).format(
- cache=self.get_linktype_support_info(repo),
- fs_cache=self.get_fs_type(repo.cache.local.cache_dir),
- )
-
- except NotDvcRepoError:
- root_directory = os.getcwd()
+ ).format(fs_cache=self.get_fs_type(repo.cache.local.cache_dir))
+ except NotDvcRepoError:
+ root_directory = os.getcwd()
+ if psutil:
info += ("Filesystem type (workspace): {fs_root}").format(
fs_root=self.get_fs_type(os.path.abspath(root_directory))
)
| {"golden_diff": "diff --git a/dvc/command/version.py b/dvc/command/version.py\n--- a/dvc/command/version.py\n+++ b/dvc/command/version.py\n@@ -42,22 +42,22 @@\n binary=binary,\n )\n \n- if psutil:\n- try:\n- repo = Repo()\n- root_directory = repo.root_dir\n+ try:\n+ repo = Repo()\n+ root_directory = repo.root_dir\n+\n+ info += \"Cache: {cache}\\n\".format(\n+ cache=self.get_linktype_support_info(repo)\n+ )\n \n+ if psutil:\n info += (\n- \"Cache: {cache}\\n\"\n \"Filesystem type (cache directory): {fs_cache}\\n\"\n- ).format(\n- cache=self.get_linktype_support_info(repo),\n- fs_cache=self.get_fs_type(repo.cache.local.cache_dir),\n- )\n-\n- except NotDvcRepoError:\n- root_directory = os.getcwd()\n+ ).format(fs_cache=self.get_fs_type(repo.cache.local.cache_dir))\n+ except NotDvcRepoError:\n+ root_directory = os.getcwd()\n \n+ if psutil:\n info += (\"Filesystem type (workspace): {fs_root}\").format(\n fs_root=self.get_fs_type(os.path.abspath(root_directory))\n )\n", "issue": "Two test_info_* tests are failing on tip of master\nAt the [tip of master right now](https://github.com/iterative/dvc/commit/32425e90691bfd4988eb0a2d70cdc4fdba910f49), the two tests `test_info_outside_of_repo` and `test_info_in_repo` are failing for me (but not in CI, so I'm guessing this is environment-specific), because the `dvc version` output does not contain the \"Filesystem type\" lines that the tests are expecting.\r\n\r\nI'm on macOS 10.14.5 and running Python 3.7.3 installed with pyenv, in a fresh virtualenv configured as specified in https://dvc.org/doc/user-guide/contributing\r\n\r\nCut-n-pasted output from `python -m tests`:\r\n\r\n```\r\n______________________________________________________________________ test_info_outside_of_repo ______________________________________________________________________\r\n[gw2] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python\r\n\r\nrepo_dir = <tests.basic_env.TestDirFixture object at 0x129f640b8>, caplog = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>\r\n\r\n def test_info_outside_of_repo(repo_dir, caplog):\r\n assert main([\"version\"]) == 0\r\n\r\n assert re.search(re.compile(r\"DVC version: \\d+\\.\\d+\\.\\d+\"), caplog.text)\r\n assert re.search(re.compile(r\"Python version: \\d\\.\\d\\.\\d\"), caplog.text)\r\n assert re.search(re.compile(r\"Platform: .*\"), caplog.text)\r\n assert re.search(re.compile(r\"Binary: (True|False)\"), caplog.text)\r\n> assert re.search(\r\n re.compile(r\"Filesystem type \\(workspace\\): .*\"), caplog.text\r\n )\r\nE AssertionError: assert None\r\nE + where None = <function search at 0x109ed6ea0>(re.compile('Filesystem type \\\\(workspace\\\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n')\r\nE + where <function search at 0x109ed6ea0> = re.search\r\nE + and re.compile('Filesystem type \\\\(workspace\\\\): .*') = <function compile at 0x109ed7268>('Filesystem type \\\\(workspace\\\\): .*')\r\nE + where <function compile at 0x109ed7268> = re.compile\r\nE + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n' = <_pytest.logging.LogCaptureFixture object at 0x129f640f0>.text\r\n\r\n/Users/gbr/git/dvc/tests/func/test_version.py:31: AssertionError\r\n------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------\r\n\r\n\r\n-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------\r\nINFO dvc.command.version:version.py:65 DVC version: 0.53.2\r\n Python version: 3.7.3\r\n Platform: Darwin-18.6.0-x86_64-i386-64bit\r\n Binary: False\r\n---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------\r\n\r\n\r\n------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------\r\nDEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40553.vmgjwy8n.3wkYFuPE6JggPk5TfN6ZXU'\r\n__________________________________________________________________________ test_info_in_repo __________________________________________________________________________\r\n[gw1] darwin -- Python 3.7.3 /Users/gbr/.pyenv/versions/3.7.3/envs/DVC/bin/python\r\n\r\ndvc_repo = Repo: '/private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'\r\ncaplog = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>\r\n\r\n def test_info_in_repo(dvc_repo, caplog):\r\n assert main([\"version\"]) == 0\r\n\r\n assert re.search(re.compile(r\"DVC version: \\d+\\.\\d+\\.\\d+\"), caplog.text)\r\n assert re.search(re.compile(r\"Python version: \\d\\.\\d\\.\\d\"), caplog.text)\r\n assert re.search(re.compile(r\"Platform: .*\"), caplog.text)\r\n assert re.search(re.compile(r\"Binary: (True|False)\"), caplog.text)\r\n> assert re.search(\r\n re.compile(r\"Filesystem type \\(cache directory\\): .*\"), caplog.text\r\n )\r\nE AssertionError: assert None\r\nE + where None = <function search at 0x104e41ea0>(re.compile('Filesystem type \\\\(cache directory\\\\): .*'), 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n')\r\nE + where <function search at 0x104e41ea0> = re.search\r\nE + and re.compile('Filesystem type \\\\(cache directory\\\\): .*') = <function compile at 0x104e42268>('Filesystem type \\\\(cache directory\\\\): .*')\r\nE + where <function compile at 0x104e42268> = re.compile\r\nE + and 'INFO dvc.command.version:version.py:65 DVC version: 0.53.2\\n Python ver... Platform: Darwin-18.6.0-x86_64-i386-64bit\\n Binary: False\\n' = <_pytest.logging.LogCaptureFixture object at 0x1236ef5f8>.text\r\n\r\n/Users/gbr/git/dvc/tests/func/test_version.py:13: AssertionError\r\n------------------------------------------------------------------------ Captured stdout setup ------------------------------------------------------------------------\r\n\r\nWhat's next?\r\n------------\r\n- Check out the documentation: https://dvc.org/doc\r\n- Get help and share ideas: https://dvc.org/chat\r\n- Star us on GitHub: https://github.com/iterative/dvc\r\n------------------------------------------------------------------------- Captured log setup --------------------------------------------------------------------------\r\nINFO dvc.repo.init:init.py:40 What's next?\r\n------------\r\n- Check out the documentation: https://dvc.org/doc\r\n- Get help and share ideas: https://dvc.org/chat\r\n- Star us on GitHub: https://github.com/iterative/dvc\r\n------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------\r\n\r\nDVC version: 0.53.2\r\nPython version: 3.7.3\r\nPlatform: Darwin-18.6.0-x86_64-i386-64bit\r\nBinary: False\r\n\r\n-------------------------------------------------------------------------- Captured log call --------------------------------------------------------------------------\r\nINFO dvc.command.version:version.py:65 DVC version: 0.53.2\r\n Python version: 3.7.3\r\n Platform: Darwin-18.6.0-x86_64-i386-64bit\r\n Binary: False\r\n---------------------------------------------------------------------- Captured stdout teardown -----------------------------------------------------------------------\r\n\r\nDEBUG: Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'\r\n------------------------------------------------------------------------ Captured log teardown ------------------------------------------------------------------------\r\nDEBUG dvc.utils:__init__.py:177 Removing '../../../../private/var/folders/_g/f_zc9wdj6vqcbsk1h62bwxmd9xsbxj/T/dvc-test.40552.f97sqyup.brvX7Jws37VU3abAn5sjD9'\r\n\r\n[...]\r\n\r\n==== 2 failed, 619 passed, 41 skipped, 10 warnings in 73.48 seconds ====\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport platform\nimport argparse\nimport logging\nimport uuid\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nfrom dvc.utils import is_binary\nfrom dvc.utils.compat import pathlib\nfrom dvc.repo import Repo\nfrom dvc.command.base import CmdBaseNoRepo, append_doc_link\nfrom dvc.version import __version__\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.system import System\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdVersion(CmdBaseNoRepo):\n def run(self):\n dvc_version = __version__\n python_version = platform.python_version()\n platform_type = platform.platform()\n binary = is_binary()\n\n info = (\n \"DVC version: {dvc_version}\\n\"\n \"Python version: {python_version}\\n\"\n \"Platform: {platform_type}\\n\"\n \"Binary: {binary}\\n\"\n ).format(\n dvc_version=dvc_version,\n python_version=python_version,\n platform_type=platform_type,\n binary=binary,\n )\n\n if psutil:\n try:\n repo = Repo()\n root_directory = repo.root_dir\n\n info += (\n \"Cache: {cache}\\n\"\n \"Filesystem type (cache directory): {fs_cache}\\n\"\n ).format(\n cache=self.get_linktype_support_info(repo),\n fs_cache=self.get_fs_type(repo.cache.local.cache_dir),\n )\n\n except NotDvcRepoError:\n root_directory = os.getcwd()\n\n info += (\"Filesystem type (workspace): {fs_root}\").format(\n fs_root=self.get_fs_type(os.path.abspath(root_directory))\n )\n\n logger.info(info)\n return 0\n\n @staticmethod\n def get_fs_type(path):\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype, part.device)\n for part in psutil.disk_partitions()\n }\n for parent in pathlib.Path(path).parents:\n if parent in partition:\n return partition[parent]\n return (\"unkown\", \"none\")\n\n @staticmethod\n def get_linktype_support_info(repo):\n links = {\n \"reflink\": System.reflink,\n \"hardlink\": System.hardlink,\n \"symlink\": System.symlink,\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, link in links.items():\n try:\n link(src, dst)\n os.unlink(dst)\n supported = True\n except DvcException:\n supported = False\n cache.append(\n \"{name} - {supported}\".format(\n name=name, supported=True if supported else False\n )\n )\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef add_parser(subparsers, parent_parser):\n VERSION_HELP = \"Show DVC version and system/environment informaion.\"\n\n version_parser = subparsers.add_parser(\n \"version\",\n parents=[parent_parser],\n description=append_doc_link(VERSION_HELP, \"version\"),\n help=VERSION_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n version_parser.set_defaults(func=CmdVersion)\n", "path": "dvc/command/version.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport platform\nimport argparse\nimport logging\nimport uuid\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nfrom dvc.utils import is_binary\nfrom dvc.utils.compat import pathlib\nfrom dvc.repo import Repo\nfrom dvc.command.base import CmdBaseNoRepo, append_doc_link\nfrom dvc.version import __version__\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.system import System\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdVersion(CmdBaseNoRepo):\n def run(self):\n dvc_version = __version__\n python_version = platform.python_version()\n platform_type = platform.platform()\n binary = is_binary()\n\n info = (\n \"DVC version: {dvc_version}\\n\"\n \"Python version: {python_version}\\n\"\n \"Platform: {platform_type}\\n\"\n \"Binary: {binary}\\n\"\n ).format(\n dvc_version=dvc_version,\n python_version=python_version,\n platform_type=platform_type,\n binary=binary,\n )\n\n try:\n repo = Repo()\n root_directory = repo.root_dir\n\n info += \"Cache: {cache}\\n\".format(\n cache=self.get_linktype_support_info(repo)\n )\n\n if psutil:\n info += (\n \"Filesystem type (cache directory): {fs_cache}\\n\"\n ).format(fs_cache=self.get_fs_type(repo.cache.local.cache_dir))\n except NotDvcRepoError:\n root_directory = os.getcwd()\n\n if psutil:\n info += (\"Filesystem type (workspace): {fs_root}\").format(\n fs_root=self.get_fs_type(os.path.abspath(root_directory))\n )\n\n logger.info(info)\n return 0\n\n @staticmethod\n def get_fs_type(path):\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype, part.device)\n for part in psutil.disk_partitions()\n }\n for parent in pathlib.Path(path).parents:\n if parent in partition:\n return partition[parent]\n return (\"unkown\", \"none\")\n\n @staticmethod\n def get_linktype_support_info(repo):\n links = {\n \"reflink\": System.reflink,\n \"hardlink\": System.hardlink,\n \"symlink\": System.symlink,\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, link in links.items():\n try:\n link(src, dst)\n os.unlink(dst)\n supported = True\n except DvcException:\n supported = False\n cache.append(\n \"{name} - {supported}\".format(\n name=name, supported=True if supported else False\n )\n )\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef add_parser(subparsers, parent_parser):\n VERSION_HELP = \"Show DVC version and system/environment informaion.\"\n\n version_parser = subparsers.add_parser(\n \"version\",\n parents=[parent_parser],\n description=append_doc_link(VERSION_HELP, \"version\"),\n help=VERSION_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n version_parser.set_defaults(func=CmdVersion)\n", "path": "dvc/command/version.py"}]} | 3,201 | 286 |
gh_patches_debug_44788 | rasdani/github-patches | git_diff | aws__aws-cli-2537 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reading/writing EMR key_pair_file configuration options behaves oddly
Version:
```
$ aws --version
aws-cli/1.11.75 Python/2.7.10 Darwin/15.6.0 botocore/1.5.38
```
[It's suggested that one can set a default key_pair_file argument here](https://github.com/aws/aws-cli/blob/master/awscli/customizations/emr/ssh.py#L25) by running `aws configure set emr.key_pair_file <value>`
By that token, I would expect `aws configure get emr.key_pair_file` to retrieve this item and to exit with a exit code of 0.
```
$ aws configure set emr.key_pair_file /tmp/foo
$ cat config
[default]
emr =
key_pair_file = /tmp/foo
$ aws configure get emr.key_pair_file
$ echo $?
1
```
As you can see, setting this and trying to retrieve it exits with a non-zero exit code which makes it a pain to check for this config item being set as part of shell scripts prior to do other EMR-based commands (such as create-cluster).
As an aside, trying to get the top level `emr` config item fails too;
```
$ aws configure get emr
expected a character buffer object
```
Additionally this item doesn't show up when `aws configure list` is run either;
```
$ aws configure list
Name Value Type Location
---- ----- ---- --------
profile <not set> None None
access_key REDACTED shared-credentials-file
secret_key REDACTED shared-credentials-file
region <not set> None None
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/configure/get.py`
Content:
```
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import sys
14
15 from awscli.customizations.commands import BasicCommand
16
17 from . import PREDEFINED_SECTION_NAMES
18
19
20 class ConfigureGetCommand(BasicCommand):
21 NAME = 'get'
22 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
23 '_description.rst')
24 SYNOPSIS = ('aws configure get varname [--profile profile-name]')
25 EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
26 ARG_TABLE = [
27 {'name': 'varname',
28 'help_text': 'The name of the config value to retrieve.',
29 'action': 'store',
30 'cli_type_name': 'string', 'positional_arg': True},
31 ]
32
33 def __init__(self, session, stream=sys.stdout):
34 super(ConfigureGetCommand, self).__init__(session)
35 self._stream = stream
36
37 def _run_main(self, args, parsed_globals):
38 varname = args.varname
39 value = None
40 if '.' not in varname:
41 # get_scoped_config() returns the config variables in the config
42 # file (not the logical_var names), which is what we want.
43 config = self._session.get_scoped_config()
44 value = config.get(varname)
45 else:
46 value = self._get_dotted_config_value(varname)
47 if value is not None:
48 self._stream.write(value)
49 self._stream.write('\n')
50 return 0
51 else:
52 return 1
53
54 def _get_dotted_config_value(self, varname):
55 parts = varname.split('.')
56 num_dots = varname.count('.')
57 # Logic to deal with predefined sections like [preview], [plugin] and etc.
58 if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
59 full_config = self._session.full_config
60 section, config_name = varname.split('.')
61 value = full_config.get(section, {}).get(config_name)
62 if value is None:
63 # Try to retrieve it from the profile config.
64 value = full_config['profiles'].get(
65 section, {}).get(config_name)
66 return value
67 if parts[0] == 'profile':
68 profile_name = parts[1]
69 config_name = parts[2]
70 remaining = parts[3:]
71 # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)
72 # If not, go further to check if varname starts with a known profile name
73 elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):
74 profile_name = parts[0]
75 config_name = parts[1]
76 remaining = parts[2:]
77 else:
78 profile_name = self._session.get_config_variable('profile')
79 config_name = parts[0]
80 remaining = parts[1:]
81
82 value = self._session.full_config['profiles'].get(
83 profile_name, {}).get(config_name)
84 if len(remaining) == 1:
85 try:
86 value = value.get(remaining[-1])
87 except AttributeError:
88 value = None
89 return value
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/configure/get.py b/awscli/customizations/configure/get.py
--- a/awscli/customizations/configure/get.py
+++ b/awscli/customizations/configure/get.py
@@ -11,17 +11,21 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
+import logging
from awscli.customizations.commands import BasicCommand
+from awscli.compat import six
from . import PREDEFINED_SECTION_NAMES
+LOG = logging.getLogger(__name__)
+
class ConfigureGetCommand(BasicCommand):
NAME = 'get'
DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
'_description.rst')
- SYNOPSIS = ('aws configure get varname [--profile profile-name]')
+ SYNOPSIS = 'aws configure get varname [--profile profile-name]'
EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
ARG_TABLE = [
{'name': 'varname',
@@ -30,13 +34,14 @@
'cli_type_name': 'string', 'positional_arg': True},
]
- def __init__(self, session, stream=sys.stdout):
+ def __init__(self, session, stream=sys.stdout, error_stream=sys.stderr):
super(ConfigureGetCommand, self).__init__(session)
self._stream = stream
+ self._error_stream = error_stream
def _run_main(self, args, parsed_globals):
varname = args.varname
- value = None
+
if '.' not in varname:
# get_scoped_config() returns the config variables in the config
# file (not the logical_var names), which is what we want.
@@ -44,17 +49,30 @@
value = config.get(varname)
else:
value = self._get_dotted_config_value(varname)
- if value is not None:
+
+ LOG.debug(u'Config value retrieved: %s' % value)
+
+ if isinstance(value, six.string_types):
self._stream.write(value)
self._stream.write('\n')
return 0
+ elif isinstance(value, dict):
+ # TODO: add support for this. We would need to print it off in
+ # the same format as the config file.
+ self._error_stream.write(
+ 'varname (%s) must reference a value, not a section or '
+ 'sub-section.' % varname
+ )
+ return 1
else:
return 1
def _get_dotted_config_value(self, varname):
parts = varname.split('.')
num_dots = varname.count('.')
- # Logic to deal with predefined sections like [preview], [plugin] and etc.
+
+ # Logic to deal with predefined sections like [preview], [plugin] and
+ # etc.
if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
full_config = self._session.full_config
section, config_name = varname.split('.')
@@ -64,18 +82,23 @@
value = full_config['profiles'].get(
section, {}).get(config_name)
return value
+
if parts[0] == 'profile':
profile_name = parts[1]
config_name = parts[2]
remaining = parts[3:]
- # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)
- # If not, go further to check if varname starts with a known profile name
- elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):
+ # Check if varname starts with 'default' profile (e.g.
+ # default.emr-dev.emr.instance_profile) If not, go further to check
+ # if varname starts with a known profile name
+ elif parts[0] == 'default' or (
+ parts[0] in self._session.full_config['profiles']):
profile_name = parts[0]
config_name = parts[1]
remaining = parts[2:]
else:
profile_name = self._session.get_config_variable('profile')
+ if profile_name is None:
+ profile_name = 'default'
config_name = parts[0]
remaining = parts[1:]
| {"golden_diff": "diff --git a/awscli/customizations/configure/get.py b/awscli/customizations/configure/get.py\n--- a/awscli/customizations/configure/get.py\n+++ b/awscli/customizations/configure/get.py\n@@ -11,17 +11,21 @@\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n import sys\n+import logging\n \n from awscli.customizations.commands import BasicCommand\n+from awscli.compat import six\n \n from . import PREDEFINED_SECTION_NAMES\n \n+LOG = logging.getLogger(__name__)\n+\n \n class ConfigureGetCommand(BasicCommand):\n NAME = 'get'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',\n '_description.rst')\n- SYNOPSIS = ('aws configure get varname [--profile profile-name]')\n+ SYNOPSIS = 'aws configure get varname [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n@@ -30,13 +34,14 @@\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n \n- def __init__(self, session, stream=sys.stdout):\n+ def __init__(self, session, stream=sys.stdout, error_stream=sys.stderr):\n super(ConfigureGetCommand, self).__init__(session)\n self._stream = stream\n+ self._error_stream = error_stream\n \n def _run_main(self, args, parsed_globals):\n varname = args.varname\n- value = None\n+\n if '.' not in varname:\n # get_scoped_config() returns the config variables in the config\n # file (not the logical_var names), which is what we want.\n@@ -44,17 +49,30 @@\n value = config.get(varname)\n else:\n value = self._get_dotted_config_value(varname)\n- if value is not None:\n+\n+ LOG.debug(u'Config value retrieved: %s' % value)\n+\n+ if isinstance(value, six.string_types):\n self._stream.write(value)\n self._stream.write('\\n')\n return 0\n+ elif isinstance(value, dict):\n+ # TODO: add support for this. We would need to print it off in\n+ # the same format as the config file.\n+ self._error_stream.write(\n+ 'varname (%s) must reference a value, not a section or '\n+ 'sub-section.' % varname\n+ )\n+ return 1\n else:\n return 1\n \n def _get_dotted_config_value(self, varname):\n parts = varname.split('.')\n num_dots = varname.count('.')\n- # Logic to deal with predefined sections like [preview], [plugin] and etc.\n+\n+ # Logic to deal with predefined sections like [preview], [plugin] and\n+ # etc.\n if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:\n full_config = self._session.full_config\n section, config_name = varname.split('.')\n@@ -64,18 +82,23 @@\n value = full_config['profiles'].get(\n section, {}).get(config_name)\n return value\n+\n if parts[0] == 'profile':\n profile_name = parts[1]\n config_name = parts[2]\n remaining = parts[3:]\n- # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)\n- # If not, go further to check if varname starts with a known profile name\n- elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):\n+ # Check if varname starts with 'default' profile (e.g.\n+ # default.emr-dev.emr.instance_profile) If not, go further to check\n+ # if varname starts with a known profile name\n+ elif parts[0] == 'default' or (\n+ parts[0] in self._session.full_config['profiles']):\n profile_name = parts[0]\n config_name = parts[1]\n remaining = parts[2:]\n else:\n profile_name = self._session.get_config_variable('profile')\n+ if profile_name is None:\n+ profile_name = 'default'\n config_name = parts[0]\n remaining = parts[1:]\n", "issue": "Reading/writing EMR key_pair_file configuration options behaves oddly\nVersion:\r\n\r\n```\r\n$ aws --version\r\naws-cli/1.11.75 Python/2.7.10 Darwin/15.6.0 botocore/1.5.38\r\n```\r\n\r\n[It's suggested that one can set a default key_pair_file argument here](https://github.com/aws/aws-cli/blob/master/awscli/customizations/emr/ssh.py#L25) by running `aws configure set emr.key_pair_file <value>`\r\n\r\nBy that token, I would expect `aws configure get emr.key_pair_file` to retrieve this item and to exit with a exit code of 0.\r\n\r\n```\r\n$ aws configure set emr.key_pair_file /tmp/foo\r\n\r\n$ cat config\r\n[default]\r\nemr =\r\n key_pair_file = /tmp/foo\r\n\r\n$ aws configure get emr.key_pair_file\r\n\r\n$ echo $?\r\n1\r\n```\r\n\r\nAs you can see, setting this and trying to retrieve it exits with a non-zero exit code which makes it a pain to check for this config item being set as part of shell scripts prior to do other EMR-based commands (such as create-cluster).\r\n\r\nAs an aside, trying to get the top level `emr` config item fails too;\r\n\r\n```\r\n$ aws configure get emr\r\n\r\nexpected a character buffer object\r\n```\r\n\r\nAdditionally this item doesn't show up when `aws configure list` is run either;\r\n\r\n```\r\n$ aws configure list\r\n Name Value Type Location\r\n ---- ----- ---- --------\r\n profile <not set> None None\r\naccess_key REDACTED shared-credentials-file\r\nsecret_key REDACTED shared-credentials-file\r\n region <not set> None None\r\n\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\n\nfrom awscli.customizations.commands import BasicCommand\n\nfrom . import PREDEFINED_SECTION_NAMES\n\n\nclass ConfigureGetCommand(BasicCommand):\n NAME = 'get'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',\n '_description.rst')\n SYNOPSIS = ('aws configure get varname [--profile profile-name]')\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to retrieve.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n\n def __init__(self, session, stream=sys.stdout):\n super(ConfigureGetCommand, self).__init__(session)\n self._stream = stream\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = None\n if '.' not in varname:\n # get_scoped_config() returns the config variables in the config\n # file (not the logical_var names), which is what we want.\n config = self._session.get_scoped_config()\n value = config.get(varname)\n else:\n value = self._get_dotted_config_value(varname)\n if value is not None:\n self._stream.write(value)\n self._stream.write('\\n')\n return 0\n else:\n return 1\n\n def _get_dotted_config_value(self, varname):\n parts = varname.split('.')\n num_dots = varname.count('.')\n # Logic to deal with predefined sections like [preview], [plugin] and etc.\n if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:\n full_config = self._session.full_config\n section, config_name = varname.split('.')\n value = full_config.get(section, {}).get(config_name)\n if value is None:\n # Try to retrieve it from the profile config.\n value = full_config['profiles'].get(\n section, {}).get(config_name)\n return value\n if parts[0] == 'profile':\n profile_name = parts[1]\n config_name = parts[2]\n remaining = parts[3:]\n # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)\n # If not, go further to check if varname starts with a known profile name\n elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):\n profile_name = parts[0]\n config_name = parts[1]\n remaining = parts[2:]\n else:\n profile_name = self._session.get_config_variable('profile')\n config_name = parts[0]\n remaining = parts[1:]\n\n value = self._session.full_config['profiles'].get(\n profile_name, {}).get(config_name)\n if len(remaining) == 1:\n try:\n value = value.get(remaining[-1])\n except AttributeError:\n value = None\n return value\n", "path": "awscli/customizations/configure/get.py"}], "after_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport logging\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.compat import six\n\nfrom . import PREDEFINED_SECTION_NAMES\n\nLOG = logging.getLogger(__name__)\n\n\nclass ConfigureGetCommand(BasicCommand):\n NAME = 'get'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',\n '_description.rst')\n SYNOPSIS = 'aws configure get varname [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to retrieve.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n\n def __init__(self, session, stream=sys.stdout, error_stream=sys.stderr):\n super(ConfigureGetCommand, self).__init__(session)\n self._stream = stream\n self._error_stream = error_stream\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n\n if '.' not in varname:\n # get_scoped_config() returns the config variables in the config\n # file (not the logical_var names), which is what we want.\n config = self._session.get_scoped_config()\n value = config.get(varname)\n else:\n value = self._get_dotted_config_value(varname)\n\n LOG.debug(u'Config value retrieved: %s' % value)\n\n if isinstance(value, six.string_types):\n self._stream.write(value)\n self._stream.write('\\n')\n return 0\n elif isinstance(value, dict):\n # TODO: add support for this. We would need to print it off in\n # the same format as the config file.\n self._error_stream.write(\n 'varname (%s) must reference a value, not a section or '\n 'sub-section.' % varname\n )\n return 1\n else:\n return 1\n\n def _get_dotted_config_value(self, varname):\n parts = varname.split('.')\n num_dots = varname.count('.')\n\n # Logic to deal with predefined sections like [preview], [plugin] and\n # etc.\n if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:\n full_config = self._session.full_config\n section, config_name = varname.split('.')\n value = full_config.get(section, {}).get(config_name)\n if value is None:\n # Try to retrieve it from the profile config.\n value = full_config['profiles'].get(\n section, {}).get(config_name)\n return value\n\n if parts[0] == 'profile':\n profile_name = parts[1]\n config_name = parts[2]\n remaining = parts[3:]\n # Check if varname starts with 'default' profile (e.g.\n # default.emr-dev.emr.instance_profile) If not, go further to check\n # if varname starts with a known profile name\n elif parts[0] == 'default' or (\n parts[0] in self._session.full_config['profiles']):\n profile_name = parts[0]\n config_name = parts[1]\n remaining = parts[2:]\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is None:\n profile_name = 'default'\n config_name = parts[0]\n remaining = parts[1:]\n\n value = self._session.full_config['profiles'].get(\n profile_name, {}).get(config_name)\n if len(remaining) == 1:\n try:\n value = value.get(remaining[-1])\n except AttributeError:\n value = None\n return value\n", "path": "awscli/customizations/configure/get.py"}]} | 1,619 | 974 |
gh_patches_debug_24329 | rasdani/github-patches | git_diff | pulp__pulpcore-259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
file:// sync deletes files from directory
**Version**
Pulpcore 3.39
**Describe the bug**
When syncing file:// repositories, files are disappearing after the sync.
**To Reproduce**
1) Copy these two repositories to the FS:
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2
2) Sync one, then the other
3) See that some files disappeared.
- In my case, file2 lost every file except PULP_MANIFEST
**Expected behavior**
No files disappear.
**Additional context**
This also occurred with RPM content type files.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/models/base.py`
Content:
```
1 import uuid
2
3 from django.db import models
4 from django.db.models import options
5
6
7 class Model(models.Model):
8 """Base model class for all Pulp models.
9
10 Fields:
11 _created (models.DateTimeField): Created timestamp UTC.
12 _last_updated (models.DateTimeField): Last updated timestamp UTC.
13
14 References:
15
16 * https://docs.djangoproject.com/en/1.8/topics/db/models/#automatic-primary-key-fields
17
18 """
19 _id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
20 _created = models.DateTimeField(auto_now_add=True)
21 _last_updated = models.DateTimeField(auto_now=True, null=True)
22
23 class Meta:
24 abstract = True
25
26 def __str__(self):
27 try:
28 # if we have a name, use it
29 return '<{}: {}>'.format(self._meta.object_name, self.name)
30 except AttributeError:
31 # if we don't, use the pk
32 return '<{}: pk={}>'.format(self._meta.object_name, self.pk)
33
34 def __repr__(self):
35 return str(self)
36
37
38 class MasterModel(Model):
39 """Base model for the "Master" model in a "Master-Detail" relationship.
40
41 Provides methods for casting down to detail types, back up to the master type,
42 as well as a model field for tracking the type.
43
44 Attributes:
45
46 TYPE (str): Default constant value saved into the ``_type``
47 field of Model instances
48
49 Fields:
50
51 _type: The user-facing string identifying the detail type of this model
52
53 Warning:
54 Subclasses of this class rely on there being no other parent/child Model
55 relationships than the Master/Detail relationship. All subclasses must use
56 only abstract Model base classes for MasterModel to behave properly.
57 Specifically, OneToOneField relationships must not be used in any MasterModel
58 subclass.
59
60 """
61
62 # TYPE is the user-facing string that describes this type. It is used to construct API
63 # endpoints for Detail models, and will be seen in the URLs generated for those Detail models.
64 # It can also be used for filtering across a relation where a model is related to a Master
65 # model. Set this to something reasonable in Master and Detail model classes, e.g. when
66 # create a master model, like "Remote", its TYPE value could be "remote". Then, when
67 # creating a Remote Detail class like PackageRemote, its _type value could be "package",
68 # not "package_remote", since "package_remote" would be redundant in the context of
69 # a remote Master model.
70 TYPE = None
71
72 # This field must have a value when models are saved, and defaults to the value of
73 # the TYPE attribute on the Model being saved (seen above).
74 _type = models.TextField(null=False, default=None)
75
76 class Meta:
77 abstract = True
78
79 def save(self, *args, **kwargs):
80 # instances of "detail" models that subclass MasterModel are exposed
81 # on instances of MasterModel by the string stored in that model's TYPE attr.
82 # Storing this _type in a column on the MasterModel next to makes it trivial
83 # to filter for specific detail model types across master's relations.
84 # Prepend the TYPE defined on a detail model with a django app label.
85 # If a plugin sets the type field themselves, it's used as-is.
86 if not self._type:
87 self._type = '{app_label}.{type}'.format(app_label=self._meta.app_label,
88 type=self.TYPE)
89 return super().save(*args, **kwargs)
90
91 def cast(self):
92 """Return a "Detail" model instance of this master-detail pair.
93
94 If this model is already an instance of its detail type, it will return itself.
95 """
96 # Go through our related objects, find the one that's a subclass of this model
97 # on a OneToOneField, which identifies it as a potential detail relation.
98 for rel in self._meta.related_objects:
99 if rel.one_to_one and issubclass(rel.related_model, self._meta.model):
100 # The name of this relation is the name of the attr on the model instance.
101 # If that attr as a value, that means a row exists for this model in the
102 # related detail table. Cast and return this value, recursively following
103 # master/detail relationships down to the last table (the most detailed).
104 try:
105 return getattr(self, rel.name).cast()
106 except AttributeError:
107 continue
108 else:
109 # The for loop exited normally, there are no more detailed models than this
110 # one in this instance's master/detail ancestry, so return here.
111 return self
112
113 @property
114 def master(self):
115 """The "Master" model instance of this master-detail pair
116
117 If this is already the master model instance, it will return itself.
118 """
119 if self._meta.master_model:
120 return self._meta.master_model(pk=self.pk)
121 else:
122 return self
123
124 def __str__(self):
125 # similar to Model's __str__, but type-aware
126 cast = self.cast()
127 if cast is self:
128 return super().__str__()
129
130 try:
131 return '<{} (_type={}): {}>'.format(self._meta.object_name, cast.TYPE, cast.name)
132 except AttributeError:
133 return '<{} (_type={}): pk={}>'.format(self._meta.object_name, cast.TYPE, cast.pk)
134
135
136 # Add properties to model _meta info to support master/detail models
137 # If this property is not None on a Model, then that Model is a Detail Model.
138 # Doing this in a non-monkeypatch way would mean a lot of effort to achieve the same result
139 # (e.g. custom model metaclass, custom Options implementation, etc). These could be classmethods
140 # on Model classes, but it's easy enough to use the model's _meta namespace to do this, since
141 # that's where other methods like this exist in Django.
142 def master_model(options):
143 """
144 The Master model class of this Model's Master/Detail relationship.
145
146 Accessible at ``<model_class>._meta.master_model``, the Master model class in a Master/Detail
147 relationship is the most generic non-abstract Model in this model's multiple-table chain
148 of inheritance.
149
150 If this model is not a detail model, None will be returned.
151 """
152 # If this isn't even a MasterModel descendant, don't bother.
153 if not issubclass(options.model, MasterModel):
154 return None
155 try:
156 # The last item in this list is the oldest ancestor. Since the MasterModel usage
157 # is to declare your master by subclassing MasterModel, and MasterModel is abstract,
158 # the oldest ancestor model is the Master Model.
159 return options.get_parent_list()[-1]
160 except IndexError:
161 # Also None if this model is itself the master.
162 return None
163
164
165 options.Options.master_model = property(master_model)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py
--- a/pulpcore/app/models/base.py
+++ b/pulpcore/app/models/base.py
@@ -1,7 +1,9 @@
+from gettext import gettext as _
import uuid
from django.db import models
from django.db.models import options
+from django.db.models.base import ModelBase
class Model(models.Model):
@@ -35,7 +37,24 @@
return str(self)
-class MasterModel(Model):
+class MasterModelMeta(ModelBase):
+ def __new__(cls, name, bases, attrs, **kwargs):
+ """Override __new__ to set the default_related_name."""
+ if Model not in bases and MasterModel not in bases: # Only affects "Detail" models.
+ meta = attrs.get("Meta")
+ default_related_name = getattr(
+ meta, "default_related_name", None)
+ abstract = getattr(meta, "abstract", None)
+
+ if not default_related_name and not abstract:
+ raise Exception(_("The 'default_related_name' option has not been set for "
+ "{class_name}").format(class_name=name))
+
+ new_class = super().__new__(cls, name, bases, attrs, **kwargs)
+ return new_class
+
+
+class MasterModel(Model, metaclass=MasterModelMeta):
"""Base model for the "Master" model in a "Master-Detail" relationship.
Provides methods for casting down to detail types, back up to the master type,
| {"golden_diff": "diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py\n--- a/pulpcore/app/models/base.py\n+++ b/pulpcore/app/models/base.py\n@@ -1,7 +1,9 @@\n+from gettext import gettext as _\n import uuid\n \n from django.db import models\n from django.db.models import options\n+from django.db.models.base import ModelBase\n \n \n class Model(models.Model):\n@@ -35,7 +37,24 @@\n return str(self)\n \n \n-class MasterModel(Model):\n+class MasterModelMeta(ModelBase):\n+ def __new__(cls, name, bases, attrs, **kwargs):\n+ \"\"\"Override __new__ to set the default_related_name.\"\"\"\n+ if Model not in bases and MasterModel not in bases: # Only affects \"Detail\" models.\n+ meta = attrs.get(\"Meta\")\n+ default_related_name = getattr(\n+ meta, \"default_related_name\", None)\n+ abstract = getattr(meta, \"abstract\", None)\n+\n+ if not default_related_name and not abstract:\n+ raise Exception(_(\"The 'default_related_name' option has not been set for \"\n+ \"{class_name}\").format(class_name=name))\n+\n+ new_class = super().__new__(cls, name, bases, attrs, **kwargs)\n+ return new_class\n+\n+\n+class MasterModel(Model, metaclass=MasterModelMeta):\n \"\"\"Base model for the \"Master\" model in a \"Master-Detail\" relationship.\n \n Provides methods for casting down to detail types, back up to the master type,\n", "issue": "file:// sync deletes files from directory\n**Version**\r\nPulpcore 3.39\r\n\r\n**Describe the bug**\r\nWhen syncing file:// repositories, files are disappearing after the sync.\r\n\r\n**To Reproduce**\r\n1) Copy these two repositories to the FS:\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2\r\n2) Sync one, then the other\r\n3) See that some files disappeared.\r\n - In my case, file2 lost every file except PULP_MANIFEST\r\n\r\n\r\n**Expected behavior**\r\nNo files disappear.\r\n\r\n**Additional context**\r\nThis also occurred with RPM content type files.\r\n\n", "before_files": [{"content": "import uuid\n\nfrom django.db import models\nfrom django.db.models import options\n\n\nclass Model(models.Model):\n \"\"\"Base model class for all Pulp models.\n\n Fields:\n _created (models.DateTimeField): Created timestamp UTC.\n _last_updated (models.DateTimeField): Last updated timestamp UTC.\n\n References:\n\n * https://docs.djangoproject.com/en/1.8/topics/db/models/#automatic-primary-key-fields\n\n \"\"\"\n _id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n _created = models.DateTimeField(auto_now_add=True)\n _last_updated = models.DateTimeField(auto_now=True, null=True)\n\n class Meta:\n abstract = True\n\n def __str__(self):\n try:\n # if we have a name, use it\n return '<{}: {}>'.format(self._meta.object_name, self.name)\n except AttributeError:\n # if we don't, use the pk\n return '<{}: pk={}>'.format(self._meta.object_name, self.pk)\n\n def __repr__(self):\n return str(self)\n\n\nclass MasterModel(Model):\n \"\"\"Base model for the \"Master\" model in a \"Master-Detail\" relationship.\n\n Provides methods for casting down to detail types, back up to the master type,\n as well as a model field for tracking the type.\n\n Attributes:\n\n TYPE (str): Default constant value saved into the ``_type``\n field of Model instances\n\n Fields:\n\n _type: The user-facing string identifying the detail type of this model\n\n Warning:\n Subclasses of this class rely on there being no other parent/child Model\n relationships than the Master/Detail relationship. All subclasses must use\n only abstract Model base classes for MasterModel to behave properly.\n Specifically, OneToOneField relationships must not be used in any MasterModel\n subclass.\n\n \"\"\"\n\n # TYPE is the user-facing string that describes this type. It is used to construct API\n # endpoints for Detail models, and will be seen in the URLs generated for those Detail models.\n # It can also be used for filtering across a relation where a model is related to a Master\n # model. Set this to something reasonable in Master and Detail model classes, e.g. when\n # create a master model, like \"Remote\", its TYPE value could be \"remote\". Then, when\n # creating a Remote Detail class like PackageRemote, its _type value could be \"package\",\n # not \"package_remote\", since \"package_remote\" would be redundant in the context of\n # a remote Master model.\n TYPE = None\n\n # This field must have a value when models are saved, and defaults to the value of\n # the TYPE attribute on the Model being saved (seen above).\n _type = models.TextField(null=False, default=None)\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n # instances of \"detail\" models that subclass MasterModel are exposed\n # on instances of MasterModel by the string stored in that model's TYPE attr.\n # Storing this _type in a column on the MasterModel next to makes it trivial\n # to filter for specific detail model types across master's relations.\n # Prepend the TYPE defined on a detail model with a django app label.\n # If a plugin sets the type field themselves, it's used as-is.\n if not self._type:\n self._type = '{app_label}.{type}'.format(app_label=self._meta.app_label,\n type=self.TYPE)\n return super().save(*args, **kwargs)\n\n def cast(self):\n \"\"\"Return a \"Detail\" model instance of this master-detail pair.\n\n If this model is already an instance of its detail type, it will return itself.\n \"\"\"\n # Go through our related objects, find the one that's a subclass of this model\n # on a OneToOneField, which identifies it as a potential detail relation.\n for rel in self._meta.related_objects:\n if rel.one_to_one and issubclass(rel.related_model, self._meta.model):\n # The name of this relation is the name of the attr on the model instance.\n # If that attr as a value, that means a row exists for this model in the\n # related detail table. Cast and return this value, recursively following\n # master/detail relationships down to the last table (the most detailed).\n try:\n return getattr(self, rel.name).cast()\n except AttributeError:\n continue\n else:\n # The for loop exited normally, there are no more detailed models than this\n # one in this instance's master/detail ancestry, so return here.\n return self\n\n @property\n def master(self):\n \"\"\"The \"Master\" model instance of this master-detail pair\n\n If this is already the master model instance, it will return itself.\n \"\"\"\n if self._meta.master_model:\n return self._meta.master_model(pk=self.pk)\n else:\n return self\n\n def __str__(self):\n # similar to Model's __str__, but type-aware\n cast = self.cast()\n if cast is self:\n return super().__str__()\n\n try:\n return '<{} (_type={}): {}>'.format(self._meta.object_name, cast.TYPE, cast.name)\n except AttributeError:\n return '<{} (_type={}): pk={}>'.format(self._meta.object_name, cast.TYPE, cast.pk)\n\n\n# Add properties to model _meta info to support master/detail models\n# If this property is not None on a Model, then that Model is a Detail Model.\n# Doing this in a non-monkeypatch way would mean a lot of effort to achieve the same result\n# (e.g. custom model metaclass, custom Options implementation, etc). These could be classmethods\n# on Model classes, but it's easy enough to use the model's _meta namespace to do this, since\n# that's where other methods like this exist in Django.\ndef master_model(options):\n \"\"\"\n The Master model class of this Model's Master/Detail relationship.\n\n Accessible at ``<model_class>._meta.master_model``, the Master model class in a Master/Detail\n relationship is the most generic non-abstract Model in this model's multiple-table chain\n of inheritance.\n\n If this model is not a detail model, None will be returned.\n \"\"\"\n # If this isn't even a MasterModel descendant, don't bother.\n if not issubclass(options.model, MasterModel):\n return None\n try:\n # The last item in this list is the oldest ancestor. Since the MasterModel usage\n # is to declare your master by subclassing MasterModel, and MasterModel is abstract,\n # the oldest ancestor model is the Master Model.\n return options.get_parent_list()[-1]\n except IndexError:\n # Also None if this model is itself the master.\n return None\n\n\noptions.Options.master_model = property(master_model)\n", "path": "pulpcore/app/models/base.py"}], "after_files": [{"content": "from gettext import gettext as _\nimport uuid\n\nfrom django.db import models\nfrom django.db.models import options\nfrom django.db.models.base import ModelBase\n\n\nclass Model(models.Model):\n \"\"\"Base model class for all Pulp models.\n\n Fields:\n _created (models.DateTimeField): Created timestamp UTC.\n _last_updated (models.DateTimeField): Last updated timestamp UTC.\n\n References:\n\n * https://docs.djangoproject.com/en/1.8/topics/db/models/#automatic-primary-key-fields\n\n \"\"\"\n _id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n _created = models.DateTimeField(auto_now_add=True)\n _last_updated = models.DateTimeField(auto_now=True, null=True)\n\n class Meta:\n abstract = True\n\n def __str__(self):\n try:\n # if we have a name, use it\n return '<{}: {}>'.format(self._meta.object_name, self.name)\n except AttributeError:\n # if we don't, use the pk\n return '<{}: pk={}>'.format(self._meta.object_name, self.pk)\n\n def __repr__(self):\n return str(self)\n\n\nclass MasterModelMeta(ModelBase):\n def __new__(cls, name, bases, attrs, **kwargs):\n \"\"\"Override __new__ to set the default_related_name.\"\"\"\n if Model not in bases and MasterModel not in bases: # Only affects \"Detail\" models.\n meta = attrs.get(\"Meta\")\n default_related_name = getattr(\n meta, \"default_related_name\", None)\n abstract = getattr(meta, \"abstract\", None)\n\n if not default_related_name and not abstract:\n raise Exception(_(\"The 'default_related_name' option has not been set for \"\n \"{class_name}\").format(class_name=name))\n\n new_class = super().__new__(cls, name, bases, attrs, **kwargs)\n return new_class\n\n\nclass MasterModel(Model, metaclass=MasterModelMeta):\n \"\"\"Base model for the \"Master\" model in a \"Master-Detail\" relationship.\n\n Provides methods for casting down to detail types, back up to the master type,\n as well as a model field for tracking the type.\n\n Attributes:\n\n TYPE (str): Default constant value saved into the ``_type``\n field of Model instances\n\n Fields:\n\n _type: The user-facing string identifying the detail type of this model\n\n Warning:\n Subclasses of this class rely on there being no other parent/child Model\n relationships than the Master/Detail relationship. All subclasses must use\n only abstract Model base classes for MasterModel to behave properly.\n Specifically, OneToOneField relationships must not be used in any MasterModel\n subclass.\n\n \"\"\"\n\n # TYPE is the user-facing string that describes this type. It is used to construct API\n # endpoints for Detail models, and will be seen in the URLs generated for those Detail models.\n # It can also be used for filtering across a relation where a model is related to a Master\n # model. Set this to something reasonable in Master and Detail model classes, e.g. when\n # create a master model, like \"Remote\", its TYPE value could be \"remote\". Then, when\n # creating a Remote Detail class like PackageRemote, its _type value could be \"package\",\n # not \"package_remote\", since \"package_remote\" would be redundant in the context of\n # a remote Master model.\n TYPE = None\n\n # This field must have a value when models are saved, and defaults to the value of\n # the TYPE attribute on the Model being saved (seen above).\n _type = models.TextField(null=False, default=None)\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n # instances of \"detail\" models that subclass MasterModel are exposed\n # on instances of MasterModel by the string stored in that model's TYPE attr.\n # Storing this _type in a column on the MasterModel next to makes it trivial\n # to filter for specific detail model types across master's relations.\n # Prepend the TYPE defined on a detail model with a django app label.\n # If a plugin sets the type field themselves, it's used as-is.\n if not self._type:\n self._type = '{app_label}.{type}'.format(app_label=self._meta.app_label,\n type=self.TYPE)\n return super().save(*args, **kwargs)\n\n def cast(self):\n \"\"\"Return a \"Detail\" model instance of this master-detail pair.\n\n If this model is already an instance of its detail type, it will return itself.\n \"\"\"\n # Go through our related objects, find the one that's a subclass of this model\n # on a OneToOneField, which identifies it as a potential detail relation.\n for rel in self._meta.related_objects:\n if rel.one_to_one and issubclass(rel.related_model, self._meta.model):\n # The name of this relation is the name of the attr on the model instance.\n # If that attr as a value, that means a row exists for this model in the\n # related detail table. Cast and return this value, recursively following\n # master/detail relationships down to the last table (the most detailed).\n try:\n return getattr(self, rel.name).cast()\n except AttributeError:\n continue\n else:\n # The for loop exited normally, there are no more detailed models than this\n # one in this instance's master/detail ancestry, so return here.\n return self\n\n @property\n def master(self):\n \"\"\"The \"Master\" model instance of this master-detail pair\n\n If this is already the master model instance, it will return itself.\n \"\"\"\n if self._meta.master_model:\n return self._meta.master_model(pk=self.pk)\n else:\n return self\n\n def __str__(self):\n # similar to Model's __str__, but type-aware\n cast = self.cast()\n if cast is self:\n return super().__str__()\n\n try:\n return '<{} (_type={}): {}>'.format(self._meta.object_name, cast.TYPE, cast.name)\n except AttributeError:\n return '<{} (_type={}): pk={}>'.format(self._meta.object_name, cast.TYPE, cast.pk)\n\n\n# Add properties to model _meta info to support master/detail models\n# If this property is not None on a Model, then that Model is a Detail Model.\n# Doing this in a non-monkeypatch way would mean a lot of effort to achieve the same result\n# (e.g. custom model metaclass, custom Options implementation, etc). These could be classmethods\n# on Model classes, but it's easy enough to use the model's _meta namespace to do this, since\n# that's where other methods like this exist in Django.\ndef master_model(options):\n \"\"\"\n The Master model class of this Model's Master/Detail relationship.\n\n Accessible at ``<model_class>._meta.master_model``, the Master model class in a Master/Detail\n relationship is the most generic non-abstract Model in this model's multiple-table chain\n of inheritance.\n\n If this model is not a detail model, None will be returned.\n \"\"\"\n # If this isn't even a MasterModel descendant, don't bother.\n if not issubclass(options.model, MasterModel):\n return None\n try:\n # The last item in this list is the oldest ancestor. Since the MasterModel usage\n # is to declare your master by subclassing MasterModel, and MasterModel is abstract,\n # the oldest ancestor model is the Master Model.\n return options.get_parent_list()[-1]\n except IndexError:\n # Also None if this model is itself the master.\n return None\n\n\noptions.Options.master_model = property(master_model)\n", "path": "pulpcore/app/models/base.py"}]} | 2,304 | 336 |
gh_patches_debug_30466 | rasdani/github-patches | git_diff | vaexio__vaex-1150 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG-REPORT] TypeError: can't pickle vaex.superutils.ordered_set
**Description**
If I use `df.func.where` with `isin`, I can't pickle the resulted state.
This is for machine learning pipelines.
reproduce:
```
import vaex
import pickle
df = vaex.from_dict({'a':[1,2,3]})
df['b'] = df.func.where(df['a'].isin([1]),1,2)
pickle.dumps(df.state_get())
...
TypeError: can't pickle vaex.superutils.ordered_set_int64 objects
```
**Software information**
- vaex-core==4.0.0a11
- Vaex was installed via: pip
- OS: Mac
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-core/vaex/hash.py`
Content:
```
1 import os
2
3
4 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
5 if not on_rtd:
6 from .superutils import *
7 from . import superutils
8 ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])
9
10
11 def counter_type_from_dtype(dtype, transient=True):
12 from .array_types import is_string_type
13 if is_string_type(dtype):
14 if transient:
15 postfix = 'string'
16 else:
17 postfix = 'string' # view not support atm
18 else:
19 postfix = str(dtype)
20 if postfix == '>f8':
21 postfix = 'float64'
22 if postfix == 'double': # arrow
23 postfix = 'float64'
24 name = 'counter_' + postfix
25 return globals()[name]
26
27 def ordered_set_type_from_dtype(dtype, transient=True):
28 from .array_types import is_string_type
29 if is_string_type(dtype):
30 if transient:
31 postfix = 'string'
32 else:
33 postfix = 'string' # not support atm
34 else:
35 postfix = str(dtype)
36 if postfix == '>f8':
37 postfix = 'float64'
38 name = 'ordered_set_' + postfix
39 return globals()[name]
40
41 def index_type_from_dtype(dtype, transient=True, prime_growth=False):
42 from .array_types import is_string_type
43 if is_string_type(dtype):
44 if transient:
45 postfix = 'string'
46 else:
47 postfix = 'string' # not support atm
48 else:
49 postfix = str(dtype)
50 if postfix == '>f8':
51 postfix = 'float64'
52 name = 'index_hash_' + postfix
53 if prime_growth:
54 name += "_prime_growth"
55 return globals()[name]
56
57 # from numpy import *
58 # import IPython
59 # IPython.embed()
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/vaex-core/vaex/hash.py b/packages/vaex-core/vaex/hash.py
--- a/packages/vaex-core/vaex/hash.py
+++ b/packages/vaex-core/vaex/hash.py
@@ -1,12 +1,19 @@
import os
+import copyreg
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
from .superutils import *
from . import superutils
+
ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])
+ def pickle(x):
+ return type(x), (x.extract(), x.count, x.nan_count, x.null_count)
+ for cls in ordered_set:
+ copyreg.pickle(cls, pickle)
+
def counter_type_from_dtype(dtype, transient=True):
from .array_types import is_string_type
@@ -24,6 +31,7 @@
name = 'counter_' + postfix
return globals()[name]
+
def ordered_set_type_from_dtype(dtype, transient=True):
from .array_types import is_string_type
if is_string_type(dtype):
@@ -38,6 +46,7 @@
name = 'ordered_set_' + postfix
return globals()[name]
+
def index_type_from_dtype(dtype, transient=True, prime_growth=False):
from .array_types import is_string_type
if is_string_type(dtype):
@@ -53,7 +62,3 @@
if prime_growth:
name += "_prime_growth"
return globals()[name]
-
-# from numpy import *
-# import IPython
-# IPython.embed()
\ No newline at end of file
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/hash.py b/packages/vaex-core/vaex/hash.py\n--- a/packages/vaex-core/vaex/hash.py\n+++ b/packages/vaex-core/vaex/hash.py\n@@ -1,12 +1,19 @@\n import os\n+import copyreg\n \n \n on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n if not on_rtd:\n from .superutils import *\n from . import superutils\n+\n ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])\n \n+ def pickle(x):\n+ return type(x), (x.extract(), x.count, x.nan_count, x.null_count)\n+ for cls in ordered_set:\n+ copyreg.pickle(cls, pickle)\n+\n \n def counter_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n@@ -24,6 +31,7 @@\n name = 'counter_' + postfix\n return globals()[name]\n \n+\n def ordered_set_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n@@ -38,6 +46,7 @@\n name = 'ordered_set_' + postfix\n return globals()[name]\n \n+\n def index_type_from_dtype(dtype, transient=True, prime_growth=False):\n from .array_types import is_string_type\n if is_string_type(dtype):\n@@ -53,7 +62,3 @@\n if prime_growth:\n name += \"_prime_growth\"\n return globals()[name]\n-\n-# from numpy import *\n-# import IPython\n-# IPython.embed()\n\\ No newline at end of file\n", "issue": "[BUG-REPORT] TypeError: can't pickle vaex.superutils.ordered_set\n**Description**\r\nIf I use `df.func.where` with `isin`, I can't pickle the resulted state.\r\nThis is for machine learning pipelines. \r\n\r\nreproduce:\r\n```\r\nimport vaex\r\nimport pickle\r\n\r\ndf = vaex.from_dict({'a':[1,2,3]})\r\ndf['b'] = df.func.where(df['a'].isin([1]),1,2)\r\npickle.dumps(df.state_get())\r\n...\r\nTypeError: can't pickle vaex.superutils.ordered_set_int64 objects\r\n```\r\n\r\n\r\n**Software information**\r\n - vaex-core==4.0.0a11\r\n - Vaex was installed via: pip\r\n - OS: Mac\r\n \n", "before_files": [{"content": "import os\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd:\n from .superutils import *\n from . import superutils\n ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])\n\n\ndef counter_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # view not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n if postfix == 'double': # arrow\n postfix = 'float64'\n name = 'counter_' + postfix\n return globals()[name]\n\ndef ordered_set_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'ordered_set_' + postfix\n return globals()[name]\n\ndef index_type_from_dtype(dtype, transient=True, prime_growth=False):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'index_hash_' + postfix\n if prime_growth:\n name += \"_prime_growth\"\n return globals()[name]\n\n# from numpy import *\n# import IPython\n# IPython.embed()", "path": "packages/vaex-core/vaex/hash.py"}], "after_files": [{"content": "import os\nimport copyreg\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd:\n from .superutils import *\n from . import superutils\n\n ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])\n\n def pickle(x):\n return type(x), (x.extract(), x.count, x.nan_count, x.null_count)\n for cls in ordered_set:\n copyreg.pickle(cls, pickle)\n\n\ndef counter_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # view not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n if postfix == 'double': # arrow\n postfix = 'float64'\n name = 'counter_' + postfix\n return globals()[name]\n\n\ndef ordered_set_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'ordered_set_' + postfix\n return globals()[name]\n\n\ndef index_type_from_dtype(dtype, transient=True, prime_growth=False):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'index_hash_' + postfix\n if prime_growth:\n name += \"_prime_growth\"\n return globals()[name]\n", "path": "packages/vaex-core/vaex/hash.py"}]} | 932 | 381 |
gh_patches_debug_24434 | rasdani/github-patches | git_diff | rdmorganiser__rdmo-571 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users with same email can not be invited
### Description / Beschreibung
Surprisingly, email addresses in RDMO don't have to be unique. I you invite an existin user to a project via email with a non unique mail the managers `.get()` function raises a `MultipleObjectsReturned`.
### Expected behaviour / Erwartetes Verhalten
(a) emails should unique in the first place or
(b) both users should be invited
Probably (b) is easier to implement.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rdmo/projects/forms.py`
Content:
```
1 from django import forms
2 from django.conf import settings
3 from django.contrib.auth import get_user_model
4 from django.core.exceptions import ValidationError
5 from django.core.validators import EmailValidator
6 from django.db.models import Q
7 from django.utils.safestring import mark_safe
8 from django.utils.translation import gettext_lazy as _
9
10 from rdmo.core.constants import VALUE_TYPE_FILE
11 from rdmo.core.plugins import get_plugin
12 from rdmo.core.utils import markdown2html
13
14 from .constants import ROLE_CHOICES
15 from .models import (Integration, IntegrationOption, Invite, Membership,
16 Project, Snapshot)
17
18
19 class CatalogChoiceField(forms.ModelChoiceField):
20
21 def label_from_instance(self, obj):
22 return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.help)))
23
24
25 class TasksMultipleChoiceField(forms.ModelMultipleChoiceField):
26
27 def label_from_instance(self, obj):
28 return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.text)))
29
30
31 class ViewsMultipleChoiceField(forms.ModelMultipleChoiceField):
32
33 def label_from_instance(self, obj):
34 return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.help)))
35
36
37 class ProjectForm(forms.ModelForm):
38
39 use_required_attribute = False
40
41 def __init__(self, *args, **kwargs):
42 catalogs = kwargs.pop('catalogs')
43 projects = kwargs.pop('projects')
44 super().__init__(*args, **kwargs)
45 self.fields['title'].widget.attrs.update({
46 'autofocus': True
47 })
48 self.fields['catalog'].queryset = catalogs
49 self.fields['catalog'].empty_label = None
50 self.fields['catalog'].initial = catalogs.first()
51
52 if settings.NESTED_PROJECTS:
53 self.fields['parent'].queryset = projects
54
55 class Meta:
56 model = Project
57
58 fields = ['title', 'description', 'catalog']
59 if settings.NESTED_PROJECTS:
60 fields += ['parent']
61
62 field_classes = {
63 'catalog': CatalogChoiceField
64 }
65 widgets = {
66 'catalog': forms.RadioSelect()
67 }
68
69
70 class ProjectUpdateInformationForm(forms.ModelForm):
71
72 use_required_attribute = False
73
74 class Meta:
75 model = Project
76 fields = ('title', 'description')
77
78
79 class ProjectUpdateCatalogForm(forms.ModelForm):
80
81 use_required_attribute = False
82
83 def __init__(self, *args, **kwargs):
84 catalogs = kwargs.pop('catalogs')
85 super().__init__(*args, **kwargs)
86 self.fields['catalog'].queryset = catalogs
87 self.fields['catalog'].empty_label = None
88
89 class Meta:
90 model = Project
91 fields = ('catalog', )
92 field_classes = {
93 'catalog': CatalogChoiceField
94 }
95 widgets = {
96 'catalog': forms.RadioSelect()
97 }
98
99
100 class ProjectUpdateTasksForm(forms.ModelForm):
101
102 use_required_attribute = False
103
104 def __init__(self, *args, **kwargs):
105 tasks = kwargs.pop('tasks')
106 super().__init__(*args, **kwargs)
107 self.fields['tasks'].queryset = tasks
108
109 class Meta:
110 model = Project
111 fields = ('tasks', )
112 field_classes = {
113 'tasks': TasksMultipleChoiceField
114 }
115 widgets = {
116 'tasks': forms.CheckboxSelectMultiple()
117 }
118
119
120 class ProjectUpdateViewsForm(forms.ModelForm):
121
122 use_required_attribute = False
123
124 def __init__(self, *args, **kwargs):
125 views = kwargs.pop('views')
126 super().__init__(*args, **kwargs)
127 self.fields['views'].queryset = views
128
129 class Meta:
130 model = Project
131 fields = ('views', )
132 field_classes = {
133 'views': ViewsMultipleChoiceField
134 }
135 widgets = {
136 'views': forms.CheckboxSelectMultiple()
137 }
138
139
140 class ProjectUpdateParentForm(forms.ModelForm):
141
142 use_required_attribute = False
143
144 def __init__(self, *args, **kwargs):
145 projects = kwargs.pop('projects')
146 super().__init__(*args, **kwargs)
147 self.fields['parent'].queryset = projects
148
149 class Meta:
150 model = Project
151 fields = ('parent', )
152
153
154 class SnapshotCreateForm(forms.ModelForm):
155
156 use_required_attribute = False
157
158 class Meta:
159 model = Snapshot
160 fields = ('title', 'description')
161
162 def __init__(self, *args, **kwargs):
163 self.project = kwargs.pop('project')
164 super(SnapshotCreateForm, self).__init__(*args, **kwargs)
165
166 def save(self, *args, **kwargs):
167 self.instance.project = self.project
168 return super(SnapshotCreateForm, self).save(*args, **kwargs)
169
170
171 class MembershipCreateForm(forms.Form):
172
173 use_required_attribute = False
174
175 username_or_email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': _('Username or e-mail')}),
176 label=_('User'),
177 help_text=_('The username or e-mail of the new user.'))
178 role = forms.CharField(widget=forms.RadioSelect(choices=ROLE_CHOICES),
179 initial='author')
180
181 def __init__(self, *args, **kwargs):
182 self.project = kwargs.pop('project')
183 self.is_site_manager = kwargs.pop('is_site_manager')
184 super().__init__(*args, **kwargs)
185
186 if self.is_site_manager:
187 self.fields['silent'] = forms.BooleanField(
188 required=False,
189 label=_('Add member silently'),
190 help_text=_('As site manager or admin, you can directly add users without notifying them via e-mail, when you check the following checkbox.')
191 )
192
193 def clean_username_or_email(self):
194 username_or_email = self.cleaned_data['username_or_email']
195
196 # check if it is a registered
197 try:
198 self.cleaned_data['user'] = get_user_model().objects.get(Q(username=username_or_email) | Q(email__iexact=username_or_email))
199 self.cleaned_data['email'] = self.cleaned_data['user'].email
200
201 if self.cleaned_data['user'] in self.project.user.all():
202 raise ValidationError(_('The user is already a member of the project.'))
203
204 except get_user_model().DoesNotExist:
205 if settings.PROJECT_SEND_INVITE:
206 # check if it is a valid email address, this will raise the correct ValidationError
207 EmailValidator()(username_or_email)
208
209 self.cleaned_data['user'] = None
210 self.cleaned_data['email'] = username_or_email
211 else:
212 self.cleaned_data['user'] = None
213 self.cleaned_data['email'] = None
214 raise ValidationError(_('A user with this username or e-mail was not found. Only registered users can be invited.'))
215
216 def clean(self):
217 if self.cleaned_data.get('silent') is True and self.cleaned_data.get('user') is None:
218 raise ValidationError(_('Only existing users can be added silently.'))
219
220 def save(self):
221 if self.is_site_manager and self.cleaned_data.get('silent') is True:
222 Membership.objects.create(
223 project=self.project,
224 user=self.cleaned_data.get('user'),
225 role=self.cleaned_data.get('role')
226 )
227 else:
228 invite, created = Invite.objects.get_or_create(
229 project=self.project,
230 user=self.cleaned_data.get('user'),
231 email=self.cleaned_data.get('email')
232 )
233 invite.role = self.cleaned_data.get('role')
234 invite.make_token()
235 invite.save()
236
237 return invite
238
239
240 class IntegrationForm(forms.ModelForm):
241
242 class Meta:
243 model = Integration
244 fields = ()
245
246 def __init__(self, *args, **kwargs):
247 self.project = kwargs.pop('project')
248 self.provider_key = kwargs.pop('provider_key', None)
249 super().__init__(*args, **kwargs)
250
251 # get the provider
252 if self.provider_key:
253 self.provider = get_plugin('PROJECT_ISSUE_PROVIDERS', self.provider_key)
254 else:
255 self.provider = self.instance.provider
256
257 # add fields for the integration options
258 for field in self.provider.fields:
259 try:
260 initial = IntegrationOption.objects.get(integration=self.instance, key=field.get('key')).value
261 except IntegrationOption.DoesNotExist:
262 initial = None
263
264 if field.get('placeholder'):
265 attrs = {'placeholder': field.get('placeholder')}
266 self.fields[field.get('key')] = forms.CharField(widget=forms.TextInput(attrs=attrs),
267 initial=initial, required=field.get('required', True))
268
269 def save(self):
270 # the the project and the provider_key
271 self.instance.project = self.project
272 if self.provider_key:
273 self.instance.provider_key = self.provider_key
274
275 # call the form's save method
276 super().save()
277
278 # save the integration options
279 self.instance.save_options(self.cleaned_data)
280
281
282 class IssueSendForm(forms.Form):
283
284 class AttachmentViewsField(forms.ModelMultipleChoiceField):
285 def label_from_instance(self, obj):
286 return _('Attach %s') % obj.title
287
288 class AttachmentFilesField(forms.ModelMultipleChoiceField):
289 def label_from_instance(self, obj):
290 return _('Attach %s') % obj.file_name
291
292 class AttachmentSnapshotField(forms.ModelChoiceField):
293 def label_from_instance(self, obj):
294 return obj.title
295
296 subject = forms.CharField(label=_('Subject'), max_length=128)
297 message = forms.CharField(label=_('Message'), widget=forms.Textarea)
298
299 def __init__(self, *args, **kwargs):
300 self.project = kwargs.pop('project')
301 super().__init__(*args, **kwargs)
302
303 self.fields['attachments_answers'] = forms.MultipleChoiceField(
304 label=_('Answers'), widget=forms.CheckboxSelectMultiple, required=False,
305 choices=[('project_answers', _('Attach the output of "View answers".'))]
306 )
307 self.fields['attachments_views'] = self.AttachmentViewsField(
308 label=_('Views'), widget=forms.CheckboxSelectMultiple, required=False,
309 queryset=self.project.views.all(), to_field_name='id'
310 )
311 self.fields['attachments_files'] = self.AttachmentFilesField(
312 label=_('Files'), widget=forms.CheckboxSelectMultiple, required=False,
313 queryset=self.project.values.filter(snapshot=None)
314 .filter(value_type=VALUE_TYPE_FILE)
315 .order_by('file'),
316 to_field_name='id'
317 )
318 self.fields['attachments_snapshot'] = self.AttachmentSnapshotField(
319 label=_('Snapshot'), widget=forms.RadioSelect, required=False,
320 queryset=self.project.snapshots.all(), empty_label=_('Current')
321 )
322 self.fields['attachments_format'] = forms.ChoiceField(
323 label=_('Format'), widget=forms.RadioSelect, required=False,
324 choices=settings.EXPORT_FORMATS
325 )
326
327 def clean(self):
328 cleaned_data = super().clean()
329
330 if cleaned_data.get('attachments_answers') or cleaned_data.get('attachments_views'):
331 if not cleaned_data.get('attachments_format'):
332 self.add_error('attachments_format', _('This field is required.'))
333
334
335 class IssueMailForm(forms.Form):
336
337 if settings.EMAIL_RECIPIENTS_CHOICES:
338 recipients = forms.MultipleChoiceField(label=_('Recipients'), widget=forms.CheckboxSelectMultiple,
339 required=not settings.EMAIL_RECIPIENTS_INPUT,
340 choices=settings.EMAIL_RECIPIENTS_CHOICES)
341
342 if settings.EMAIL_RECIPIENTS_INPUT:
343 recipients_input = forms.CharField(label=_('Recipients'), widget=forms.Textarea(attrs={
344 'placeholder': _('Enter recipients line by line')
345 }), required=not settings.EMAIL_RECIPIENTS_CHOICES)
346
347 def clean(self):
348 cleaned_data = super().clean()
349
350 if settings.EMAIL_RECIPIENTS_INPUT and \
351 cleaned_data.get('recipients') == [] and \
352 cleaned_data.get('recipients_input') == []:
353 self.add_error('recipients_input', _('This field is required.'))
354
355 def clean_recipients_input(self):
356 email_validator = EmailValidator()
357 cleaned_data = []
358
359 for line in self.cleaned_data['recipients_input'].splitlines():
360 email = line.strip()
361 email_validator(email)
362 cleaned_data.append(email)
363
364 return cleaned_data
365
366
367 class UploadFileForm(forms.Form):
368 uploaded_file = forms.FileField(
369 label='Select a file',
370 )
371
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rdmo/projects/forms.py b/rdmo/projects/forms.py
--- a/rdmo/projects/forms.py
+++ b/rdmo/projects/forms.py
@@ -192,16 +192,17 @@
def clean_username_or_email(self):
username_or_email = self.cleaned_data['username_or_email']
+ usermodel = get_user_model()
- # check if it is a registered
+ # check if it is a registered user
try:
- self.cleaned_data['user'] = get_user_model().objects.get(Q(username=username_or_email) | Q(email__iexact=username_or_email))
+ self.cleaned_data['user'] = usermodel.objects.get(Q(username=username_or_email) | Q(email__iexact=username_or_email))
self.cleaned_data['email'] = self.cleaned_data['user'].email
if self.cleaned_data['user'] in self.project.user.all():
raise ValidationError(_('The user is already a member of the project.'))
- except get_user_model().DoesNotExist:
+ except (usermodel.DoesNotExist, usermodel.MultipleObjectsReturned):
if settings.PROJECT_SEND_INVITE:
# check if it is a valid email address, this will raise the correct ValidationError
EmailValidator()(username_or_email)
| {"golden_diff": "diff --git a/rdmo/projects/forms.py b/rdmo/projects/forms.py\n--- a/rdmo/projects/forms.py\n+++ b/rdmo/projects/forms.py\n@@ -192,16 +192,17 @@\n \n def clean_username_or_email(self):\n username_or_email = self.cleaned_data['username_or_email']\n+ usermodel = get_user_model()\n \n- # check if it is a registered\n+ # check if it is a registered user\n try:\n- self.cleaned_data['user'] = get_user_model().objects.get(Q(username=username_or_email) | Q(email__iexact=username_or_email))\n+ self.cleaned_data['user'] = usermodel.objects.get(Q(username=username_or_email) | Q(email__iexact=username_or_email))\n self.cleaned_data['email'] = self.cleaned_data['user'].email\n \n if self.cleaned_data['user'] in self.project.user.all():\n raise ValidationError(_('The user is already a member of the project.'))\n \n- except get_user_model().DoesNotExist:\n+ except (usermodel.DoesNotExist, usermodel.MultipleObjectsReturned):\n if settings.PROJECT_SEND_INVITE:\n # check if it is a valid email address, this will raise the correct ValidationError\n EmailValidator()(username_or_email)\n", "issue": "Users with same email can not be invited\n### Description / Beschreibung\r\n\r\nSurprisingly, email addresses in RDMO don't have to be unique. I you invite an existin user to a project via email with a non unique mail the managers `.get()` function raises a `MultipleObjectsReturned`.\r\n\r\n### Expected behaviour / Erwartetes Verhalten\r\n\r\n(a) emails should unique in the first place or\r\n(b) both users should be invited\r\n\r\nProbably (b) is easier to implement.\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.db.models import Q\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rdmo.core.constants import VALUE_TYPE_FILE\nfrom rdmo.core.plugins import get_plugin\nfrom rdmo.core.utils import markdown2html\n\nfrom .constants import ROLE_CHOICES\nfrom .models import (Integration, IntegrationOption, Invite, Membership,\n Project, Snapshot)\n\n\nclass CatalogChoiceField(forms.ModelChoiceField):\n\n def label_from_instance(self, obj):\n return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.help)))\n\n\nclass TasksMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n def label_from_instance(self, obj):\n return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.text)))\n\n\nclass ViewsMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n def label_from_instance(self, obj):\n return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.help)))\n\n\nclass ProjectForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n catalogs = kwargs.pop('catalogs')\n projects = kwargs.pop('projects')\n super().__init__(*args, **kwargs)\n self.fields['title'].widget.attrs.update({\n 'autofocus': True\n })\n self.fields['catalog'].queryset = catalogs\n self.fields['catalog'].empty_label = None\n self.fields['catalog'].initial = catalogs.first()\n\n if settings.NESTED_PROJECTS:\n self.fields['parent'].queryset = projects\n\n class Meta:\n model = Project\n\n fields = ['title', 'description', 'catalog']\n if settings.NESTED_PROJECTS:\n fields += ['parent']\n\n field_classes = {\n 'catalog': CatalogChoiceField\n }\n widgets = {\n 'catalog': forms.RadioSelect()\n }\n\n\nclass ProjectUpdateInformationForm(forms.ModelForm):\n\n use_required_attribute = False\n\n class Meta:\n model = Project\n fields = ('title', 'description')\n\n\nclass ProjectUpdateCatalogForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n catalogs = kwargs.pop('catalogs')\n super().__init__(*args, **kwargs)\n self.fields['catalog'].queryset = catalogs\n self.fields['catalog'].empty_label = None\n\n class Meta:\n model = Project\n fields = ('catalog', )\n field_classes = {\n 'catalog': CatalogChoiceField\n }\n widgets = {\n 'catalog': forms.RadioSelect()\n }\n\n\nclass ProjectUpdateTasksForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n tasks = kwargs.pop('tasks')\n super().__init__(*args, **kwargs)\n self.fields['tasks'].queryset = tasks\n\n class Meta:\n model = Project\n fields = ('tasks', )\n field_classes = {\n 'tasks': TasksMultipleChoiceField\n }\n widgets = {\n 'tasks': forms.CheckboxSelectMultiple()\n }\n\n\nclass ProjectUpdateViewsForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n views = kwargs.pop('views')\n super().__init__(*args, **kwargs)\n self.fields['views'].queryset = views\n\n class Meta:\n model = Project\n fields = ('views', )\n field_classes = {\n 'views': ViewsMultipleChoiceField\n }\n widgets = {\n 'views': forms.CheckboxSelectMultiple()\n }\n\n\nclass ProjectUpdateParentForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n projects = kwargs.pop('projects')\n super().__init__(*args, **kwargs)\n self.fields['parent'].queryset = projects\n\n class Meta:\n model = Project\n fields = ('parent', )\n\n\nclass SnapshotCreateForm(forms.ModelForm):\n\n use_required_attribute = False\n\n class Meta:\n model = Snapshot\n fields = ('title', 'description')\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n super(SnapshotCreateForm, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.instance.project = self.project\n return super(SnapshotCreateForm, self).save(*args, **kwargs)\n\n\nclass MembershipCreateForm(forms.Form):\n\n use_required_attribute = False\n\n username_or_email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': _('Username or e-mail')}),\n label=_('User'),\n help_text=_('The username or e-mail of the new user.'))\n role = forms.CharField(widget=forms.RadioSelect(choices=ROLE_CHOICES),\n initial='author')\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n self.is_site_manager = kwargs.pop('is_site_manager')\n super().__init__(*args, **kwargs)\n\n if self.is_site_manager:\n self.fields['silent'] = forms.BooleanField(\n required=False,\n label=_('Add member silently'),\n help_text=_('As site manager or admin, you can directly add users without notifying them via e-mail, when you check the following checkbox.')\n )\n\n def clean_username_or_email(self):\n username_or_email = self.cleaned_data['username_or_email']\n\n # check if it is a registered\n try:\n self.cleaned_data['user'] = get_user_model().objects.get(Q(username=username_or_email) | Q(email__iexact=username_or_email))\n self.cleaned_data['email'] = self.cleaned_data['user'].email\n\n if self.cleaned_data['user'] in self.project.user.all():\n raise ValidationError(_('The user is already a member of the project.'))\n\n except get_user_model().DoesNotExist:\n if settings.PROJECT_SEND_INVITE:\n # check if it is a valid email address, this will raise the correct ValidationError\n EmailValidator()(username_or_email)\n\n self.cleaned_data['user'] = None\n self.cleaned_data['email'] = username_or_email\n else:\n self.cleaned_data['user'] = None\n self.cleaned_data['email'] = None\n raise ValidationError(_('A user with this username or e-mail was not found. Only registered users can be invited.'))\n\n def clean(self):\n if self.cleaned_data.get('silent') is True and self.cleaned_data.get('user') is None:\n raise ValidationError(_('Only existing users can be added silently.'))\n\n def save(self):\n if self.is_site_manager and self.cleaned_data.get('silent') is True:\n Membership.objects.create(\n project=self.project,\n user=self.cleaned_data.get('user'),\n role=self.cleaned_data.get('role')\n )\n else:\n invite, created = Invite.objects.get_or_create(\n project=self.project,\n user=self.cleaned_data.get('user'),\n email=self.cleaned_data.get('email')\n )\n invite.role = self.cleaned_data.get('role')\n invite.make_token()\n invite.save()\n\n return invite\n\n\nclass IntegrationForm(forms.ModelForm):\n\n class Meta:\n model = Integration\n fields = ()\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n self.provider_key = kwargs.pop('provider_key', None)\n super().__init__(*args, **kwargs)\n\n # get the provider\n if self.provider_key:\n self.provider = get_plugin('PROJECT_ISSUE_PROVIDERS', self.provider_key)\n else:\n self.provider = self.instance.provider\n\n # add fields for the integration options\n for field in self.provider.fields:\n try:\n initial = IntegrationOption.objects.get(integration=self.instance, key=field.get('key')).value\n except IntegrationOption.DoesNotExist:\n initial = None\n\n if field.get('placeholder'):\n attrs = {'placeholder': field.get('placeholder')}\n self.fields[field.get('key')] = forms.CharField(widget=forms.TextInput(attrs=attrs),\n initial=initial, required=field.get('required', True))\n\n def save(self):\n # the the project and the provider_key\n self.instance.project = self.project\n if self.provider_key:\n self.instance.provider_key = self.provider_key\n\n # call the form's save method\n super().save()\n\n # save the integration options\n self.instance.save_options(self.cleaned_data)\n\n\nclass IssueSendForm(forms.Form):\n\n class AttachmentViewsField(forms.ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return _('Attach %s') % obj.title\n\n class AttachmentFilesField(forms.ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return _('Attach %s') % obj.file_name\n\n class AttachmentSnapshotField(forms.ModelChoiceField):\n def label_from_instance(self, obj):\n return obj.title\n\n subject = forms.CharField(label=_('Subject'), max_length=128)\n message = forms.CharField(label=_('Message'), widget=forms.Textarea)\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n super().__init__(*args, **kwargs)\n\n self.fields['attachments_answers'] = forms.MultipleChoiceField(\n label=_('Answers'), widget=forms.CheckboxSelectMultiple, required=False,\n choices=[('project_answers', _('Attach the output of \"View answers\".'))]\n )\n self.fields['attachments_views'] = self.AttachmentViewsField(\n label=_('Views'), widget=forms.CheckboxSelectMultiple, required=False,\n queryset=self.project.views.all(), to_field_name='id'\n )\n self.fields['attachments_files'] = self.AttachmentFilesField(\n label=_('Files'), widget=forms.CheckboxSelectMultiple, required=False,\n queryset=self.project.values.filter(snapshot=None)\n .filter(value_type=VALUE_TYPE_FILE)\n .order_by('file'),\n to_field_name='id'\n )\n self.fields['attachments_snapshot'] = self.AttachmentSnapshotField(\n label=_('Snapshot'), widget=forms.RadioSelect, required=False,\n queryset=self.project.snapshots.all(), empty_label=_('Current')\n )\n self.fields['attachments_format'] = forms.ChoiceField(\n label=_('Format'), widget=forms.RadioSelect, required=False,\n choices=settings.EXPORT_FORMATS\n )\n\n def clean(self):\n cleaned_data = super().clean()\n\n if cleaned_data.get('attachments_answers') or cleaned_data.get('attachments_views'):\n if not cleaned_data.get('attachments_format'):\n self.add_error('attachments_format', _('This field is required.'))\n\n\nclass IssueMailForm(forms.Form):\n\n if settings.EMAIL_RECIPIENTS_CHOICES:\n recipients = forms.MultipleChoiceField(label=_('Recipients'), widget=forms.CheckboxSelectMultiple,\n required=not settings.EMAIL_RECIPIENTS_INPUT,\n choices=settings.EMAIL_RECIPIENTS_CHOICES)\n\n if settings.EMAIL_RECIPIENTS_INPUT:\n recipients_input = forms.CharField(label=_('Recipients'), widget=forms.Textarea(attrs={\n 'placeholder': _('Enter recipients line by line')\n }), required=not settings.EMAIL_RECIPIENTS_CHOICES)\n\n def clean(self):\n cleaned_data = super().clean()\n\n if settings.EMAIL_RECIPIENTS_INPUT and \\\n cleaned_data.get('recipients') == [] and \\\n cleaned_data.get('recipients_input') == []:\n self.add_error('recipients_input', _('This field is required.'))\n\n def clean_recipients_input(self):\n email_validator = EmailValidator()\n cleaned_data = []\n\n for line in self.cleaned_data['recipients_input'].splitlines():\n email = line.strip()\n email_validator(email)\n cleaned_data.append(email)\n\n return cleaned_data\n\n\nclass UploadFileForm(forms.Form):\n uploaded_file = forms.FileField(\n label='Select a file',\n )\n", "path": "rdmo/projects/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.db.models import Q\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rdmo.core.constants import VALUE_TYPE_FILE\nfrom rdmo.core.plugins import get_plugin\nfrom rdmo.core.utils import markdown2html\n\nfrom .constants import ROLE_CHOICES\nfrom .models import (Integration, IntegrationOption, Invite, Membership,\n Project, Snapshot)\n\n\nclass CatalogChoiceField(forms.ModelChoiceField):\n\n def label_from_instance(self, obj):\n return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.help)))\n\n\nclass TasksMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n def label_from_instance(self, obj):\n return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.text)))\n\n\nclass ViewsMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n def label_from_instance(self, obj):\n return mark_safe('<b>%s</b></br>%s' % (obj.title, markdown2html(obj.help)))\n\n\nclass ProjectForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n catalogs = kwargs.pop('catalogs')\n projects = kwargs.pop('projects')\n super().__init__(*args, **kwargs)\n self.fields['title'].widget.attrs.update({\n 'autofocus': True\n })\n self.fields['catalog'].queryset = catalogs\n self.fields['catalog'].empty_label = None\n self.fields['catalog'].initial = catalogs.first()\n\n if settings.NESTED_PROJECTS:\n self.fields['parent'].queryset = projects\n\n class Meta:\n model = Project\n\n fields = ['title', 'description', 'catalog']\n if settings.NESTED_PROJECTS:\n fields += ['parent']\n\n field_classes = {\n 'catalog': CatalogChoiceField\n }\n widgets = {\n 'catalog': forms.RadioSelect()\n }\n\n\nclass ProjectUpdateInformationForm(forms.ModelForm):\n\n use_required_attribute = False\n\n class Meta:\n model = Project\n fields = ('title', 'description')\n\n\nclass ProjectUpdateCatalogForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n catalogs = kwargs.pop('catalogs')\n super().__init__(*args, **kwargs)\n self.fields['catalog'].queryset = catalogs\n self.fields['catalog'].empty_label = None\n\n class Meta:\n model = Project\n fields = ('catalog', )\n field_classes = {\n 'catalog': CatalogChoiceField\n }\n widgets = {\n 'catalog': forms.RadioSelect()\n }\n\n\nclass ProjectUpdateTasksForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n tasks = kwargs.pop('tasks')\n super().__init__(*args, **kwargs)\n self.fields['tasks'].queryset = tasks\n\n class Meta:\n model = Project\n fields = ('tasks', )\n field_classes = {\n 'tasks': TasksMultipleChoiceField\n }\n widgets = {\n 'tasks': forms.CheckboxSelectMultiple()\n }\n\n\nclass ProjectUpdateViewsForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n views = kwargs.pop('views')\n super().__init__(*args, **kwargs)\n self.fields['views'].queryset = views\n\n class Meta:\n model = Project\n fields = ('views', )\n field_classes = {\n 'views': ViewsMultipleChoiceField\n }\n widgets = {\n 'views': forms.CheckboxSelectMultiple()\n }\n\n\nclass ProjectUpdateParentForm(forms.ModelForm):\n\n use_required_attribute = False\n\n def __init__(self, *args, **kwargs):\n projects = kwargs.pop('projects')\n super().__init__(*args, **kwargs)\n self.fields['parent'].queryset = projects\n\n class Meta:\n model = Project\n fields = ('parent', )\n\n\nclass SnapshotCreateForm(forms.ModelForm):\n\n use_required_attribute = False\n\n class Meta:\n model = Snapshot\n fields = ('title', 'description')\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n super(SnapshotCreateForm, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.instance.project = self.project\n return super(SnapshotCreateForm, self).save(*args, **kwargs)\n\n\nclass MembershipCreateForm(forms.Form):\n\n use_required_attribute = False\n\n username_or_email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': _('Username or e-mail')}),\n label=_('User'),\n help_text=_('The username or e-mail of the new user.'))\n role = forms.CharField(widget=forms.RadioSelect(choices=ROLE_CHOICES),\n initial='author')\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n self.is_site_manager = kwargs.pop('is_site_manager')\n super().__init__(*args, **kwargs)\n\n if self.is_site_manager:\n self.fields['silent'] = forms.BooleanField(\n required=False,\n label=_('Add member silently'),\n help_text=_('As site manager or admin, you can directly add users without notifying them via e-mail, when you check the following checkbox.')\n )\n\n def clean_username_or_email(self):\n username_or_email = self.cleaned_data['username_or_email']\n usermodel = get_user_model()\n\n # check if it is a registered user\n try:\n self.cleaned_data['user'] = usermodel.objects.get(Q(username=username_or_email) | Q(email__iexact=username_or_email))\n self.cleaned_data['email'] = self.cleaned_data['user'].email\n\n if self.cleaned_data['user'] in self.project.user.all():\n raise ValidationError(_('The user is already a member of the project.'))\n\n except (usermodel.DoesNotExist, usermodel.MultipleObjectsReturned):\n if settings.PROJECT_SEND_INVITE:\n # check if it is a valid email address, this will raise the correct ValidationError\n EmailValidator()(username_or_email)\n\n self.cleaned_data['user'] = None\n self.cleaned_data['email'] = username_or_email\n else:\n self.cleaned_data['user'] = None\n self.cleaned_data['email'] = None\n raise ValidationError(_('A user with this username or e-mail was not found. Only registered users can be invited.'))\n\n def clean(self):\n if self.cleaned_data.get('silent') is True and self.cleaned_data.get('user') is None:\n raise ValidationError(_('Only existing users can be added silently.'))\n\n def save(self):\n if self.is_site_manager and self.cleaned_data.get('silent') is True:\n Membership.objects.create(\n project=self.project,\n user=self.cleaned_data.get('user'),\n role=self.cleaned_data.get('role')\n )\n else:\n invite, created = Invite.objects.get_or_create(\n project=self.project,\n user=self.cleaned_data.get('user'),\n email=self.cleaned_data.get('email')\n )\n invite.role = self.cleaned_data.get('role')\n invite.make_token()\n invite.save()\n\n return invite\n\n\nclass IntegrationForm(forms.ModelForm):\n\n class Meta:\n model = Integration\n fields = ()\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n self.provider_key = kwargs.pop('provider_key', None)\n super().__init__(*args, **kwargs)\n\n # get the provider\n if self.provider_key:\n self.provider = get_plugin('PROJECT_ISSUE_PROVIDERS', self.provider_key)\n else:\n self.provider = self.instance.provider\n\n # add fields for the integration options\n for field in self.provider.fields:\n try:\n initial = IntegrationOption.objects.get(integration=self.instance, key=field.get('key')).value\n except IntegrationOption.DoesNotExist:\n initial = None\n\n if field.get('placeholder'):\n attrs = {'placeholder': field.get('placeholder')}\n self.fields[field.get('key')] = forms.CharField(widget=forms.TextInput(attrs=attrs),\n initial=initial, required=field.get('required', True))\n\n def save(self):\n # the the project and the provider_key\n self.instance.project = self.project\n if self.provider_key:\n self.instance.provider_key = self.provider_key\n\n # call the form's save method\n super().save()\n\n # save the integration options\n self.instance.save_options(self.cleaned_data)\n\n\nclass IssueSendForm(forms.Form):\n\n class AttachmentViewsField(forms.ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return _('Attach %s') % obj.title\n\n class AttachmentFilesField(forms.ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return _('Attach %s') % obj.file_name\n\n class AttachmentSnapshotField(forms.ModelChoiceField):\n def label_from_instance(self, obj):\n return obj.title\n\n subject = forms.CharField(label=_('Subject'), max_length=128)\n message = forms.CharField(label=_('Message'), widget=forms.Textarea)\n\n def __init__(self, *args, **kwargs):\n self.project = kwargs.pop('project')\n super().__init__(*args, **kwargs)\n\n self.fields['attachments_answers'] = forms.MultipleChoiceField(\n label=_('Answers'), widget=forms.CheckboxSelectMultiple, required=False,\n choices=[('project_answers', _('Attach the output of \"View answers\".'))]\n )\n self.fields['attachments_views'] = self.AttachmentViewsField(\n label=_('Views'), widget=forms.CheckboxSelectMultiple, required=False,\n queryset=self.project.views.all(), to_field_name='id'\n )\n self.fields['attachments_files'] = self.AttachmentFilesField(\n label=_('Files'), widget=forms.CheckboxSelectMultiple, required=False,\n queryset=self.project.values.filter(snapshot=None)\n .filter(value_type=VALUE_TYPE_FILE)\n .order_by('file'),\n to_field_name='id'\n )\n self.fields['attachments_snapshot'] = self.AttachmentSnapshotField(\n label=_('Snapshot'), widget=forms.RadioSelect, required=False,\n queryset=self.project.snapshots.all(), empty_label=_('Current')\n )\n self.fields['attachments_format'] = forms.ChoiceField(\n label=_('Format'), widget=forms.RadioSelect, required=False,\n choices=settings.EXPORT_FORMATS\n )\n\n def clean(self):\n cleaned_data = super().clean()\n\n if cleaned_data.get('attachments_answers') or cleaned_data.get('attachments_views'):\n if not cleaned_data.get('attachments_format'):\n self.add_error('attachments_format', _('This field is required.'))\n\n\nclass IssueMailForm(forms.Form):\n\n if settings.EMAIL_RECIPIENTS_CHOICES:\n recipients = forms.MultipleChoiceField(label=_('Recipients'), widget=forms.CheckboxSelectMultiple,\n required=not settings.EMAIL_RECIPIENTS_INPUT,\n choices=settings.EMAIL_RECIPIENTS_CHOICES)\n\n if settings.EMAIL_RECIPIENTS_INPUT:\n recipients_input = forms.CharField(label=_('Recipients'), widget=forms.Textarea(attrs={\n 'placeholder': _('Enter recipients line by line')\n }), required=not settings.EMAIL_RECIPIENTS_CHOICES)\n\n def clean(self):\n cleaned_data = super().clean()\n\n if settings.EMAIL_RECIPIENTS_INPUT and \\\n cleaned_data.get('recipients') == [] and \\\n cleaned_data.get('recipients_input') == []:\n self.add_error('recipients_input', _('This field is required.'))\n\n def clean_recipients_input(self):\n email_validator = EmailValidator()\n cleaned_data = []\n\n for line in self.cleaned_data['recipients_input'].splitlines():\n email = line.strip()\n email_validator(email)\n cleaned_data.append(email)\n\n return cleaned_data\n\n\nclass UploadFileForm(forms.Form):\n uploaded_file = forms.FileField(\n label='Select a file',\n )\n", "path": "rdmo/projects/forms.py"}]} | 3,971 | 274 |
gh_patches_debug_15466 | rasdani/github-patches | git_diff | google__turbinia-811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DockerContainersEnumerationTask JSON decode failure
```
DockerContainersEnumerationTask Task failed with exception: [Expecting value: line 1 column 1 (char 0)]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/__init__.py", line 895, in run_wrapper
self.result = self.run(evidence, self.result)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py", line 107, in run
containers_info = self.GetContainers(evidence)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py", line 84, in GetContainers
containers_info = json.loads(json_string)
File "/usr/lib/python3.6/json/__init__.py", line 354, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python3.6/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
```
e2e test -> https://plaso-ci.log2timeline.net/jenkins/view/Turbinia/job/turbinia-e2e-googleclouddisk-dev/59/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/workers/docker.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Task for running docker-explorer."""
15
16 from __future__ import unicode_literals
17
18 import json
19 import logging
20 import subprocess
21
22 from turbinia import TurbiniaException
23 from turbinia.evidence import DockerContainer
24 from turbinia.evidence import EvidenceState as state
25 from turbinia.lib import utils
26 from turbinia.workers import Priority
27 from turbinia.workers import TurbiniaTask
28 from turbinia.lib.docker_manager import GetDockerPath
29 from turbinia import config
30
31 log = logging.getLogger('turbinia')
32
33
34 class DockerContainersEnumerationTask(TurbiniaTask):
35 """Enumerates Docker containers on Linux"""
36
37 REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]
38
39 def GetContainers(self, evidence):
40 """Lists the containers from an input Evidence.
41
42 We use subprocess to run the DockerExplorer script, instead of using the
43 Python module, because we need to make sure all DockerExplorer code runs
44 as root.
45
46 Args:
47 evidence (Evidence): the input Evidence.
48
49 Returns:
50 a list(dict) containing information about the containers found.
51
52 Raises:
53 TurbiniaException: when the docker-explorer tool cannot be found or failed
54 to run.
55 """
56 config.LoadConfig()
57 docker_dir = GetDockerPath(evidence.mount_path)
58
59 containers_info = None
60
61 # TODO(rgayon): use docker-explorer exposed constant when
62 # https://github.com/google/docker-explorer/issues/80 is in.
63 de_binary = utils.get_exe_path('de.py')
64 if not de_binary:
65 raise TurbiniaException('Cannot find de.py in path')
66
67 docker_explorer_command = ['sudo', de_binary]
68
69 if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):
70 docker_explorer_command.append('-d')
71
72 docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])
73
74 log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))
75 try:
76 json_string = subprocess.check_output(docker_explorer_command).decode(
77 'utf-8')
78 except json.JSONDecodeError as e:
79 raise TurbiniaException(
80 'Error decoding JSON output from de.py: {0!s}'.format(e))
81 except subprocess.CalledProcessError as e:
82 raise TurbiniaException('de.py returned an error: {0!s}'.format(e))
83
84 containers_info = json.loads(json_string)
85
86 return containers_info
87
88 def run(self, evidence, result):
89 """Run the docker-explorer tool to list containerss.
90
91 Args:
92 evidence (Evidence object): The evidence to process
93 result (TurbiniaTaskResult): The object to place task results into.
94
95 Returns:
96 TurbiniaTaskResult object.
97 """
98
99 status_report = ''
100 success = False
101
102 status_report = (
103 'Error enumerating Docker containers, evidence has no mounted '
104 'filesystem')
105 found_containers = []
106 try:
107 containers_info = self.GetContainers(evidence)
108 for container_info in containers_info:
109 container_id = container_info.get('container_id')
110 found_containers.append(container_id)
111 container_evidence = DockerContainer(container_id=container_id)
112 result.add_evidence(container_evidence, evidence.config)
113 success = True
114 status_report = 'Found {0!s} containers: {1:s}'.format(
115 len(found_containers), ' '.join(found_containers))
116 except TurbiniaException as e:
117 status_report = 'Error enumerating Docker containers: {0!s}'.format(e)
118
119 result.report_priority = Priority.LOW
120 result.report_data = status_report
121 result.close(self, success=success, status=status_report)
122 return result
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/workers/docker.py b/turbinia/workers/docker.py
--- a/turbinia/workers/docker.py
+++ b/turbinia/workers/docker.py
@@ -75,14 +75,14 @@
try:
json_string = subprocess.check_output(docker_explorer_command).decode(
'utf-8')
+ containers_info = json.loads(json_string)
except json.JSONDecodeError as e:
raise TurbiniaException(
- 'Error decoding JSON output from de.py: {0!s}'.format(e))
+ 'Error decoding JSON output from de.py: {0!s} {1!s}'.format(
+ e, json_string))
except subprocess.CalledProcessError as e:
raise TurbiniaException('de.py returned an error: {0!s}'.format(e))
- containers_info = json.loads(json_string)
-
return containers_info
def run(self, evidence, result):
| {"golden_diff": "diff --git a/turbinia/workers/docker.py b/turbinia/workers/docker.py\n--- a/turbinia/workers/docker.py\n+++ b/turbinia/workers/docker.py\n@@ -75,14 +75,14 @@\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\n 'utf-8')\n+ containers_info = json.loads(json_string)\n except json.JSONDecodeError as e:\n raise TurbiniaException(\n- 'Error decoding JSON output from de.py: {0!s}'.format(e))\n+ 'Error decoding JSON output from de.py: {0!s} {1!s}'.format(\n+ e, json_string))\n except subprocess.CalledProcessError as e:\n raise TurbiniaException('de.py returned an error: {0!s}'.format(e))\n \n- containers_info = json.loads(json_string)\n-\n return containers_info\n \n def run(self, evidence, result):\n", "issue": "DockerContainersEnumerationTask JSON decode failure\n```\r\nDockerContainersEnumerationTask Task failed with exception: [Expecting value: line 1 column 1 (char 0)]\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/__init__.py\", line 895, in run_wrapper\r\n self.result = self.run(evidence, self.result)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py\", line 107, in run\r\n containers_info = self.GetContainers(evidence)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210423.dev2+g1f137be-py3.6.egg/turbinia/workers/docker.py\", line 84, in GetContainers\r\n containers_info = json.loads(json_string)\r\n File \"/usr/lib/python3.6/json/__init__.py\", line 354, in loads\r\n return _default_decoder.decode(s)\r\n File \"/usr/lib/python3.6/json/decoder.py\", line 339, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/usr/lib/python3.6/json/decoder.py\", line 357, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\r\n\r\ne2e test -> https://plaso-ci.log2timeline.net/jenkins/view/Turbinia/job/turbinia-e2e-googleclouddisk-dev/59/\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for running docker-explorer.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport subprocess\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import DockerContainer\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.lib import utils\nfrom turbinia.workers import Priority\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.lib.docker_manager import GetDockerPath\nfrom turbinia import config\n\nlog = logging.getLogger('turbinia')\n\n\nclass DockerContainersEnumerationTask(TurbiniaTask):\n \"\"\"Enumerates Docker containers on Linux\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def GetContainers(self, evidence):\n \"\"\"Lists the containers from an input Evidence.\n\n We use subprocess to run the DockerExplorer script, instead of using the\n Python module, because we need to make sure all DockerExplorer code runs\n as root.\n\n Args:\n evidence (Evidence): the input Evidence.\n\n Returns:\n a list(dict) containing information about the containers found.\n\n Raises:\n TurbiniaException: when the docker-explorer tool cannot be found or failed\n to run.\n \"\"\"\n config.LoadConfig()\n docker_dir = GetDockerPath(evidence.mount_path)\n\n containers_info = None\n\n # TODO(rgayon): use docker-explorer exposed constant when\n # https://github.com/google/docker-explorer/issues/80 is in.\n de_binary = utils.get_exe_path('de.py')\n if not de_binary:\n raise TurbiniaException('Cannot find de.py in path')\n\n docker_explorer_command = ['sudo', de_binary]\n\n if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):\n docker_explorer_command.append('-d')\n\n docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])\n\n log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\n 'utf-8')\n except json.JSONDecodeError as e:\n raise TurbiniaException(\n 'Error decoding JSON output from de.py: {0!s}'.format(e))\n except subprocess.CalledProcessError as e:\n raise TurbiniaException('de.py returned an error: {0!s}'.format(e))\n\n containers_info = json.loads(json_string)\n\n return containers_info\n\n def run(self, evidence, result):\n \"\"\"Run the docker-explorer tool to list containerss.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n status_report = ''\n success = False\n\n status_report = (\n 'Error enumerating Docker containers, evidence has no mounted '\n 'filesystem')\n found_containers = []\n try:\n containers_info = self.GetContainers(evidence)\n for container_info in containers_info:\n container_id = container_info.get('container_id')\n found_containers.append(container_id)\n container_evidence = DockerContainer(container_id=container_id)\n result.add_evidence(container_evidence, evidence.config)\n success = True\n status_report = 'Found {0!s} containers: {1:s}'.format(\n len(found_containers), ' '.join(found_containers))\n except TurbiniaException as e:\n status_report = 'Error enumerating Docker containers: {0!s}'.format(e)\n\n result.report_priority = Priority.LOW\n result.report_data = status_report\n result.close(self, success=success, status=status_report)\n return result\n", "path": "turbinia/workers/docker.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for running docker-explorer.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport subprocess\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import DockerContainer\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.lib import utils\nfrom turbinia.workers import Priority\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.lib.docker_manager import GetDockerPath\nfrom turbinia import config\n\nlog = logging.getLogger('turbinia')\n\n\nclass DockerContainersEnumerationTask(TurbiniaTask):\n \"\"\"Enumerates Docker containers on Linux\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def GetContainers(self, evidence):\n \"\"\"Lists the containers from an input Evidence.\n\n We use subprocess to run the DockerExplorer script, instead of using the\n Python module, because we need to make sure all DockerExplorer code runs\n as root.\n\n Args:\n evidence (Evidence): the input Evidence.\n\n Returns:\n a list(dict) containing information about the containers found.\n\n Raises:\n TurbiniaException: when the docker-explorer tool cannot be found or failed\n to run.\n \"\"\"\n config.LoadConfig()\n docker_dir = GetDockerPath(evidence.mount_path)\n\n containers_info = None\n\n # TODO(rgayon): use docker-explorer exposed constant when\n # https://github.com/google/docker-explorer/issues/80 is in.\n de_binary = utils.get_exe_path('de.py')\n if not de_binary:\n raise TurbiniaException('Cannot find de.py in path')\n\n docker_explorer_command = ['sudo', de_binary]\n\n if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):\n docker_explorer_command.append('-d')\n\n docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])\n\n log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\n 'utf-8')\n containers_info = json.loads(json_string)\n except json.JSONDecodeError as e:\n raise TurbiniaException(\n 'Error decoding JSON output from de.py: {0!s} {1!s}'.format(\n e, json_string))\n except subprocess.CalledProcessError as e:\n raise TurbiniaException('de.py returned an error: {0!s}'.format(e))\n\n return containers_info\n\n def run(self, evidence, result):\n \"\"\"Run the docker-explorer tool to list containerss.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n status_report = ''\n success = False\n\n status_report = (\n 'Error enumerating Docker containers, evidence has no mounted '\n 'filesystem')\n found_containers = []\n try:\n containers_info = self.GetContainers(evidence)\n for container_info in containers_info:\n container_id = container_info.get('container_id')\n found_containers.append(container_id)\n container_evidence = DockerContainer(container_id=container_id)\n result.add_evidence(container_evidence, evidence.config)\n success = True\n status_report = 'Found {0!s} containers: {1:s}'.format(\n len(found_containers), ' '.join(found_containers))\n except TurbiniaException as e:\n status_report = 'Error enumerating Docker containers: {0!s}'.format(e)\n\n result.report_priority = Priority.LOW\n result.report_data = status_report\n result.close(self, success=success, status=status_report)\n return result\n", "path": "turbinia/workers/docker.py"}]} | 1,910 | 214 |
gh_patches_debug_12726 | rasdani/github-patches | git_diff | enthought__chaco-678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove chaco.chaco_version file
https://github.com/enthought/chaco/blob/fdd858aa6dbc76addb50d011fb81e879ce8e0355/chaco/chaco_version.py
We now create `chaco._version` file when installing the package (in `setup.py`) so we don't need this additional `chaco.chaco_version` file anymore.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/chaco_version.py`
Content:
```
1 # ------------------------------------------------------------------------------
2 # Copyright (c) 2005, Enthought, Inc.
3 # All rights reserved.
4 #
5 # This software is provided without warranty under the terms of the BSD
6 # license included in LICENSE.txt and may be redistributed only
7 # under the conditions described in the aforementioned license. The license
8 # is also available online at http://www.enthought.com/licenses/BSD.txt
9 # Thanks for using Enthought open source!
10 #
11 # Author: Enthought, Inc.
12 # Description: <Enthought library component>
13 # ------------------------------------------------------------------------------
14 """ Defines version numbering for the Chaco package.
15 """
16 major = 2
17 minor = 0
18 micro = 9
19
20 version = "%s.%s.%s" % (major, minor, micro)
21
22
23 release_level = "beta"
24 branch = ""
25 revision = version
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/chaco_version.py b/chaco/chaco_version.py
deleted file mode 100644
--- a/chaco/chaco_version.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# ------------------------------------------------------------------------------
-# Copyright (c) 2005, Enthought, Inc.
-# All rights reserved.
-#
-# This software is provided without warranty under the terms of the BSD
-# license included in LICENSE.txt and may be redistributed only
-# under the conditions described in the aforementioned license. The license
-# is also available online at http://www.enthought.com/licenses/BSD.txt
-# Thanks for using Enthought open source!
-#
-# Author: Enthought, Inc.
-# Description: <Enthought library component>
-# ------------------------------------------------------------------------------
-""" Defines version numbering for the Chaco package.
-"""
-major = 2
-minor = 0
-micro = 9
-
-version = "%s.%s.%s" % (major, minor, micro)
-
-
-release_level = "beta"
-branch = ""
-revision = version
| {"golden_diff": "diff --git a/chaco/chaco_version.py b/chaco/chaco_version.py\ndeleted file mode 100644\n--- a/chaco/chaco_version.py\n+++ /dev/null\n@@ -1,25 +0,0 @@\n-# ------------------------------------------------------------------------------\n-# Copyright (c) 2005, Enthought, Inc.\n-# All rights reserved.\n-#\n-# This software is provided without warranty under the terms of the BSD\n-# license included in LICENSE.txt and may be redistributed only\n-# under the conditions described in the aforementioned license. The license\n-# is also available online at http://www.enthought.com/licenses/BSD.txt\n-# Thanks for using Enthought open source!\n-#\n-# Author: Enthought, Inc.\n-# Description: <Enthought library component>\n-# ------------------------------------------------------------------------------\n-\"\"\" Defines version numbering for the Chaco package.\n-\"\"\"\n-major = 2\n-minor = 0\n-micro = 9\n-\n-version = \"%s.%s.%s\" % (major, minor, micro)\n-\n-\n-release_level = \"beta\"\n-branch = \"\"\n-revision = version\n", "issue": "Remove chaco.chaco_version file\nhttps://github.com/enthought/chaco/blob/fdd858aa6dbc76addb50d011fb81e879ce8e0355/chaco/chaco_version.py\r\n\r\nWe now create `chaco._version` file when installing the package (in `setup.py`) so we don't need this additional `chaco.chaco_version` file anymore.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------\n# Copyright (c) 2005, Enthought, Inc.\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only\n# under the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n# Thanks for using Enthought open source!\n#\n# Author: Enthought, Inc.\n# Description: <Enthought library component>\n# ------------------------------------------------------------------------------\n\"\"\" Defines version numbering for the Chaco package.\n\"\"\"\nmajor = 2\nminor = 0\nmicro = 9\n\nversion = \"%s.%s.%s\" % (major, minor, micro)\n\n\nrelease_level = \"beta\"\nbranch = \"\"\nrevision = version\n", "path": "chaco/chaco_version.py"}], "after_files": [{"content": null, "path": "chaco/chaco_version.py"}]} | 569 | 245 |
gh_patches_debug_41891 | rasdani/github-patches | git_diff | streamlink__streamlink-2134 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Huomao plugin not work
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
<!-- Replace [ ] with [x] in order to check the box -->
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
I found huomao plugin seems not work, i can use browser to watch stream but
streamlink says no playable stream
<!-- Explain the plugin issue as thoroughly as you can. -->
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
1. https://www.huomao.com/9755
2. https://www.huomao.com/777777
3. https://www.huomao.com/888
### Log output
<!--
TEXT LOG OUTPUT IS REQUIRED for a plugin issue!
Use the `--loglevel debug` parameter and avoid using parameters which suppress log output.
https://streamlink.github.io/cli.html#cmdoption-l
Make sure to **remove usernames and passwords**
You can copy the output to https://gist.github.com/ or paste it below.
-->
```
[cli][info] Found matching plugin huomao for URL https://www.huomao.com/888
[plugin.huomao][error] Failed to extract stream_info.
error: No playable streams found on this URL: https://www.huomao.com/888
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/huomao.py`
Content:
```
1 """
2 NOTE: Since a documented API is nowhere to be found for Huomao; this plugin
3 simply extracts the videos stream_id, stream_url and stream_quality by
4 scraping the HTML and JS of one of Huomaos mobile webpages.
5
6 When viewing a stream on huomao.com, the base URL references a room_id. This
7 room_id is mapped one-to-one to a stream_id which references the actual .flv
8 video. Both stream_id, stream_url and stream_quality can be found in the
9 HTML and JS source of the mobile_page. Since one stream can occur in many
10 different qualities, we scrape all stream_url and stream_quality occurrences
11 and return each option to the user.
12 """
13
14 import re
15
16 from streamlink.plugin import Plugin
17 from streamlink.stream import HTTPStream
18
19 # URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.
20 url_re = re.compile(r"""
21 (http(s)?://)?
22 (www\.)?
23 huomao
24 (\.tv|\.com)
25 /(?P<room_id>\d+)
26 """, re.VERBOSE)
27
28 # URL used to retrive the stream_id, stream_url and stream_quality based of
29 # a room_id.
30 mobile_url = "http://www.huomao.com/mobile/mob_live/{0}"
31
32 # Pattern for extracting the stream_id from the mobile_url HTML.
33 #
34 # Example from HTML:
35 # <input id="html_stream" value="efmrCH" type="hidden">
36 stream_id_pattern = re.compile(r'id=\"html_stream\" value=\"(?P<stream_id>\w+)\"')
37
38 # Pattern for extracting each stream_url, stream_quality_url and a prettified
39 # stream_quality_name used for quality naming.
40 #
41 # Example from HTML:
42 # "2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'"
43 stream_info_pattern = re.compile(r"""
44 [1-9]:
45 \s+
46 '(?P<stream_url>(?:\w|\.|:|-|/)+)
47 '\+stream\+'
48 (?P<stream_quality_url>_?(?P<stream_quality_name>\d*))
49 /playlist.m3u8'
50 """, re.VERBOSE)
51
52
53 class Huomao(Plugin):
54 @classmethod
55 def can_handle_url(self, url):
56 return url_re.match(url)
57
58 def get_stream_id(self, html):
59 """Returns the stream_id contained in the HTML."""
60 stream_id = stream_id_pattern.search(html)
61
62 if not stream_id:
63 self.logger.error("Failed to extract stream_id.")
64
65 return stream_id.group("stream_id")
66
67 def get_stream_info(self, html):
68 """Returns a nested list of different stream options.
69
70 Each entry in the list will contain a stream_url, stream_quality_url
71 and stream_quality_name for each stream occurrence that was found in
72 the JS.
73 """
74 stream_info = stream_info_pattern.findall(html)
75
76 if not stream_info:
77 self.logger.error("Failed to extract stream_info.")
78
79 # Rename the "" quality to "source" by transforming the tuples to a
80 # list and reassigning.
81 stream_info_list = []
82 for info in stream_info:
83 if not info[2]:
84 stream_info_list.append([info[0], info[1], "source"])
85 else:
86 stream_info_list.append(list(info))
87
88 return stream_info_list
89
90 def _get_streams(self):
91 room_id = url_re.search(self.url).group("room_id")
92 html = self.session.http.get(mobile_url.format(room_id))
93 stream_id = self.get_stream_id(html.text)
94 stream_info = self.get_stream_info(html.text)
95
96 streams = {}
97 for info in stream_info:
98 streams[info[2]] = HTTPStream(self.session,
99 info[0] + stream_id + info[1] + ".flv")
100
101 return streams
102
103
104 __plugin__ = Huomao
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/huomao.py b/src/streamlink/plugins/huomao.py
--- a/src/streamlink/plugins/huomao.py
+++ b/src/streamlink/plugins/huomao.py
@@ -4,8 +4,8 @@
scraping the HTML and JS of one of Huomaos mobile webpages.
When viewing a stream on huomao.com, the base URL references a room_id. This
-room_id is mapped one-to-one to a stream_id which references the actual .flv
-video. Both stream_id, stream_url and stream_quality can be found in the
+room_id is mapped one-to-one to a stream_id which references the actual .m3u8
+file. Both stream_id, stream_url and stream_quality can be found in the
HTML and JS source of the mobile_page. Since one stream can occur in many
different qualities, we scrape all stream_url and stream_quality occurrences
and return each option to the user.
@@ -14,7 +14,7 @@
import re
from streamlink.plugin import Plugin
-from streamlink.stream import HTTPStream
+from streamlink.stream import HLSStream
# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.
url_re = re.compile(r"""
@@ -35,18 +35,15 @@
# <input id="html_stream" value="efmrCH" type="hidden">
stream_id_pattern = re.compile(r'id=\"html_stream\" value=\"(?P<stream_id>\w+)\"')
-# Pattern for extracting each stream_url, stream_quality_url and a prettified
+# Pattern for extracting each stream_url and
# stream_quality_name used for quality naming.
#
# Example from HTML:
-# "2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'"
+# src="http://live-ws-hls.huomaotv.cn/live/<stream_id>_720/playlist.m3u8"
stream_info_pattern = re.compile(r"""
- [1-9]:
- \s+
- '(?P<stream_url>(?:\w|\.|:|-|/)+)
- '\+stream\+'
- (?P<stream_quality_url>_?(?P<stream_quality_name>\d*))
- /playlist.m3u8'
+ (?P<stream_url>(?:[\w\/\.\-:]+)
+ \/[^_\"]+(?:_(?P<stream_quality_name>\d+))
+ ?/playlist.m3u8)
""", re.VERBOSE)
@@ -65,11 +62,11 @@
return stream_id.group("stream_id")
def get_stream_info(self, html):
- """Returns a nested list of different stream options.
+ """
+ Returns a nested list of different stream options.
- Each entry in the list will contain a stream_url, stream_quality_url
- and stream_quality_name for each stream occurrence that was found in
- the JS.
+ Each entry in the list will contain a stream_url and stream_quality_name
+ for each stream occurrence that was found in the JS.
"""
stream_info = stream_info_pattern.findall(html)
@@ -80,8 +77,8 @@
# list and reassigning.
stream_info_list = []
for info in stream_info:
- if not info[2]:
- stream_info_list.append([info[0], info[1], "source"])
+ if not info[1]:
+ stream_info_list.append([info[0], "source"])
else:
stream_info_list.append(list(info))
@@ -95,8 +92,8 @@
streams = {}
for info in stream_info:
- streams[info[2]] = HTTPStream(self.session,
- info[0] + stream_id + info[1] + ".flv")
+ if stream_id in info[0]:
+ streams[info[1]] = HLSStream(self.session, info[0])
return streams
| {"golden_diff": "diff --git a/src/streamlink/plugins/huomao.py b/src/streamlink/plugins/huomao.py\n--- a/src/streamlink/plugins/huomao.py\n+++ b/src/streamlink/plugins/huomao.py\n@@ -4,8 +4,8 @@\n scraping the HTML and JS of one of Huomaos mobile webpages.\n \n When viewing a stream on huomao.com, the base URL references a room_id. This\n-room_id is mapped one-to-one to a stream_id which references the actual .flv\n-video. Both stream_id, stream_url and stream_quality can be found in the\n+room_id is mapped one-to-one to a stream_id which references the actual .m3u8\n+file. Both stream_id, stream_url and stream_quality can be found in the\n HTML and JS source of the mobile_page. Since one stream can occur in many\n different qualities, we scrape all stream_url and stream_quality occurrences\n and return each option to the user.\n@@ -14,7 +14,7 @@\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.stream import HTTPStream\n+from streamlink.stream import HLSStream\n \n # URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.\n url_re = re.compile(r\"\"\"\n@@ -35,18 +35,15 @@\n # <input id=\"html_stream\" value=\"efmrCH\" type=\"hidden\">\n stream_id_pattern = re.compile(r'id=\\\"html_stream\\\" value=\\\"(?P<stream_id>\\w+)\\\"')\n \n-# Pattern for extracting each stream_url, stream_quality_url and a prettified\n+# Pattern for extracting each stream_url and\n # stream_quality_name used for quality naming.\n #\n # Example from HTML:\n-# \"2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'\"\n+# src=\"http://live-ws-hls.huomaotv.cn/live/<stream_id>_720/playlist.m3u8\"\n stream_info_pattern = re.compile(r\"\"\"\n- [1-9]:\n- \\s+\n- '(?P<stream_url>(?:\\w|\\.|:|-|/)+)\n- '\\+stream\\+'\n- (?P<stream_quality_url>_?(?P<stream_quality_name>\\d*))\n- /playlist.m3u8'\n+ (?P<stream_url>(?:[\\w\\/\\.\\-:]+)\n+ \\/[^_\\\"]+(?:_(?P<stream_quality_name>\\d+))\n+ ?/playlist.m3u8)\n \"\"\", re.VERBOSE)\n \n \n@@ -65,11 +62,11 @@\n return stream_id.group(\"stream_id\")\n \n def get_stream_info(self, html):\n- \"\"\"Returns a nested list of different stream options.\n+ \"\"\"\n+ Returns a nested list of different stream options.\n \n- Each entry in the list will contain a stream_url, stream_quality_url\n- and stream_quality_name for each stream occurrence that was found in\n- the JS.\n+ Each entry in the list will contain a stream_url and stream_quality_name\n+ for each stream occurrence that was found in the JS.\n \"\"\"\n stream_info = stream_info_pattern.findall(html)\n \n@@ -80,8 +77,8 @@\n # list and reassigning.\n stream_info_list = []\n for info in stream_info:\n- if not info[2]:\n- stream_info_list.append([info[0], info[1], \"source\"])\n+ if not info[1]:\n+ stream_info_list.append([info[0], \"source\"])\n else:\n stream_info_list.append(list(info))\n \n@@ -95,8 +92,8 @@\n \n streams = {}\n for info in stream_info:\n- streams[info[2]] = HTTPStream(self.session,\n- info[0] + stream_id + info[1] + \".flv\")\n+ if stream_id in info[0]:\n+ streams[info[1]] = HLSStream(self.session, info[0])\n \n return streams\n", "issue": "Huomao plugin not work\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\nI found huomao plugin seems not work, i can use browser to watch stream but\r\nstreamlink says no playable stream\r\n\r\n\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. https://www.huomao.com/9755\r\n2. https://www.huomao.com/777777\r\n3. https://www.huomao.com/888\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n-->\r\n\r\n```\r\n[cli][info] Found matching plugin huomao for URL https://www.huomao.com/888\r\n[plugin.huomao][error] Failed to extract stream_info.\r\nerror: No playable streams found on this URL: https://www.huomao.com/888\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nNOTE: Since a documented API is nowhere to be found for Huomao; this plugin\nsimply extracts the videos stream_id, stream_url and stream_quality by\nscraping the HTML and JS of one of Huomaos mobile webpages.\n\nWhen viewing a stream on huomao.com, the base URL references a room_id. This\nroom_id is mapped one-to-one to a stream_id which references the actual .flv\nvideo. Both stream_id, stream_url and stream_quality can be found in the\nHTML and JS source of the mobile_page. Since one stream can occur in many\ndifferent qualities, we scrape all stream_url and stream_quality occurrences\nand return each option to the user.\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HTTPStream\n\n# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.\nurl_re = re.compile(r\"\"\"\n (http(s)?://)?\n (www\\.)?\n huomao\n (\\.tv|\\.com)\n /(?P<room_id>\\d+)\n\"\"\", re.VERBOSE)\n\n# URL used to retrive the stream_id, stream_url and stream_quality based of\n# a room_id.\nmobile_url = \"http://www.huomao.com/mobile/mob_live/{0}\"\n\n# Pattern for extracting the stream_id from the mobile_url HTML.\n#\n# Example from HTML:\n# <input id=\"html_stream\" value=\"efmrCH\" type=\"hidden\">\nstream_id_pattern = re.compile(r'id=\\\"html_stream\\\" value=\\\"(?P<stream_id>\\w+)\\\"')\n\n# Pattern for extracting each stream_url, stream_quality_url and a prettified\n# stream_quality_name used for quality naming.\n#\n# Example from HTML:\n# \"2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'\"\nstream_info_pattern = re.compile(r\"\"\"\n [1-9]:\n \\s+\n '(?P<stream_url>(?:\\w|\\.|:|-|/)+)\n '\\+stream\\+'\n (?P<stream_quality_url>_?(?P<stream_quality_name>\\d*))\n /playlist.m3u8'\n\"\"\", re.VERBOSE)\n\n\nclass Huomao(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return url_re.match(url)\n\n def get_stream_id(self, html):\n \"\"\"Returns the stream_id contained in the HTML.\"\"\"\n stream_id = stream_id_pattern.search(html)\n\n if not stream_id:\n self.logger.error(\"Failed to extract stream_id.\")\n\n return stream_id.group(\"stream_id\")\n\n def get_stream_info(self, html):\n \"\"\"Returns a nested list of different stream options.\n\n Each entry in the list will contain a stream_url, stream_quality_url\n and stream_quality_name for each stream occurrence that was found in\n the JS.\n \"\"\"\n stream_info = stream_info_pattern.findall(html)\n\n if not stream_info:\n self.logger.error(\"Failed to extract stream_info.\")\n\n # Rename the \"\" quality to \"source\" by transforming the tuples to a\n # list and reassigning.\n stream_info_list = []\n for info in stream_info:\n if not info[2]:\n stream_info_list.append([info[0], info[1], \"source\"])\n else:\n stream_info_list.append(list(info))\n\n return stream_info_list\n\n def _get_streams(self):\n room_id = url_re.search(self.url).group(\"room_id\")\n html = self.session.http.get(mobile_url.format(room_id))\n stream_id = self.get_stream_id(html.text)\n stream_info = self.get_stream_info(html.text)\n\n streams = {}\n for info in stream_info:\n streams[info[2]] = HTTPStream(self.session,\n info[0] + stream_id + info[1] + \".flv\")\n\n return streams\n\n\n__plugin__ = Huomao\n", "path": "src/streamlink/plugins/huomao.py"}], "after_files": [{"content": "\"\"\"\nNOTE: Since a documented API is nowhere to be found for Huomao; this plugin\nsimply extracts the videos stream_id, stream_url and stream_quality by\nscraping the HTML and JS of one of Huomaos mobile webpages.\n\nWhen viewing a stream on huomao.com, the base URL references a room_id. This\nroom_id is mapped one-to-one to a stream_id which references the actual .m3u8\nfile. Both stream_id, stream_url and stream_quality can be found in the\nHTML and JS source of the mobile_page. Since one stream can occur in many\ndifferent qualities, we scrape all stream_url and stream_quality occurrences\nand return each option to the user.\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HLSStream\n\n# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.\nurl_re = re.compile(r\"\"\"\n (http(s)?://)?\n (www\\.)?\n huomao\n (\\.tv|\\.com)\n /(?P<room_id>\\d+)\n\"\"\", re.VERBOSE)\n\n# URL used to retrive the stream_id, stream_url and stream_quality based of\n# a room_id.\nmobile_url = \"http://www.huomao.com/mobile/mob_live/{0}\"\n\n# Pattern for extracting the stream_id from the mobile_url HTML.\n#\n# Example from HTML:\n# <input id=\"html_stream\" value=\"efmrCH\" type=\"hidden\">\nstream_id_pattern = re.compile(r'id=\\\"html_stream\\\" value=\\\"(?P<stream_id>\\w+)\\\"')\n\n# Pattern for extracting each stream_url and\n# stream_quality_name used for quality naming.\n#\n# Example from HTML:\n# src=\"http://live-ws-hls.huomaotv.cn/live/<stream_id>_720/playlist.m3u8\"\nstream_info_pattern = re.compile(r\"\"\"\n (?P<stream_url>(?:[\\w\\/\\.\\-:]+)\n \\/[^_\\\"]+(?:_(?P<stream_quality_name>\\d+))\n ?/playlist.m3u8)\n\"\"\", re.VERBOSE)\n\n\nclass Huomao(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return url_re.match(url)\n\n def get_stream_id(self, html):\n \"\"\"Returns the stream_id contained in the HTML.\"\"\"\n stream_id = stream_id_pattern.search(html)\n\n if not stream_id:\n self.logger.error(\"Failed to extract stream_id.\")\n\n return stream_id.group(\"stream_id\")\n\n def get_stream_info(self, html):\n \"\"\"\n Returns a nested list of different stream options.\n\n Each entry in the list will contain a stream_url and stream_quality_name\n for each stream occurrence that was found in the JS.\n \"\"\"\n stream_info = stream_info_pattern.findall(html)\n\n if not stream_info:\n self.logger.error(\"Failed to extract stream_info.\")\n\n # Rename the \"\" quality to \"source\" by transforming the tuples to a\n # list and reassigning.\n stream_info_list = []\n for info in stream_info:\n if not info[1]:\n stream_info_list.append([info[0], \"source\"])\n else:\n stream_info_list.append(list(info))\n\n return stream_info_list\n\n def _get_streams(self):\n room_id = url_re.search(self.url).group(\"room_id\")\n html = self.session.http.get(mobile_url.format(room_id))\n stream_id = self.get_stream_id(html.text)\n stream_info = self.get_stream_info(html.text)\n\n streams = {}\n for info in stream_info:\n if stream_id in info[0]:\n streams[info[1]] = HLSStream(self.session, info[0])\n\n return streams\n\n\n__plugin__ = Huomao\n", "path": "src/streamlink/plugins/huomao.py"}]} | 1,741 | 889 |
gh_patches_debug_1500 | rasdani/github-patches | git_diff | lutris__lutris-2885 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Runners list is not updated on a second install/uninstall
**Describe the bug**
The second time you are trying to install a runner on a Lutris session, the runner list won't be updated anymore.
**Current behavior**
The runner list is updated only once per install/uninstall for each runner
**Steps to reproduce**
1. For the sake of this testing, make sure you don't have ZDoom installed.
2. Open Lutris.
3. Open the 'Manage runners' window.
4. Install ZDoom runner.
4.1. As expected, the runner list from the main Lutris window has added the ZDoom entry.
5. Uninstall ZDoom runner.
5.1. As expected, the runner list from the main Lutris window removed the ZDoom entry.
6. Reinstall ZDoom runner.
6.1. The runner list from the main Lutris window haven't added the ZDoom entry!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/gui/widgets/sidebar.py`
Content:
```
1 """Sidebar for the main window"""
2 # Standard Library
3 import os
4
5 # Third Party Libraries
6 from gi.repository import GObject, Gtk, Pango
7
8 # Lutris Modules
9 from lutris import pga, platforms, runners
10 from lutris.game import Game
11 from lutris.gui.config.runner import RunnerConfigDialog
12 from lutris.gui.dialogs.runner_install import RunnerInstallDialog
13 from lutris.gui.dialogs.runners import RunnersDialog
14 from lutris.util import datapath
15
16 TYPE = 0
17 SLUG = 1
18 ICON = 2
19 LABEL = 3
20 GAMECOUNT = 4
21
22
23 class SidebarRow(Gtk.ListBoxRow):
24
25 def __init__(self, id_, type_, name, icon):
26 super().__init__()
27 self.type = type_
28 self.id = id_
29 self.btn_box = None
30 self.runner = None
31
32 self.box = Gtk.Box(spacing=6, margin_start=9, margin_end=9)
33
34 # Construct the left column icon space.
35 if icon:
36 self.box.add(icon)
37 else:
38 # Place a spacer if there is no loaded icon.
39 icon = Gtk.Box(spacing=6, margin_start=9, margin_end=9)
40 self.box.add(icon)
41
42 label = Gtk.Label(
43 label=name,
44 halign=Gtk.Align.START,
45 hexpand=True,
46 margin_top=6,
47 margin_bottom=6,
48 ellipsize=Pango.EllipsizeMode.END,
49 )
50 self.box.add(label)
51
52 self.add(self.box)
53
54 def _create_button_box(self):
55 self.btn_box = Gtk.Box(spacing=3, no_show_all=True, valign=Gtk.Align.CENTER, homogeneous=True)
56
57 # Creation is delayed because only installed runners can be imported
58 # and all visible boxes should be installed.
59 self.runner = runners.import_runner(self.id)()
60 entries = []
61 if self.runner.multiple_versions:
62 entries.append((
63 "system-software-install-symbolic",
64 "Manage Versions",
65 self.on_manage_versions,
66 ))
67 if self.runner.runnable_alone:
68 entries.append(("media-playback-start-symbolic", "Run", self.runner.run))
69 entries.append(("emblem-system-symbolic", "Configure", self.on_configure_runner))
70 for entry in entries:
71 btn = Gtk.Button(tooltip_text=entry[1], relief=Gtk.ReliefStyle.NONE, visible=True)
72 image = Gtk.Image.new_from_icon_name(entry[0], Gtk.IconSize.MENU)
73 image.show()
74 btn.add(image)
75 btn.connect("clicked", entry[2])
76 self.btn_box.add(btn)
77
78 self.box.add(self.btn_box)
79
80 def on_configure_runner(self, *args): # pylint: disable=unused-argument
81 RunnerConfigDialog(self.runner, parent=self.get_toplevel())
82
83 def on_manage_versions(self, *args): # pylint: disable=unused-argument
84 dlg_title = "Manage %s versions" % self.runner.name
85 RunnerInstallDialog(dlg_title, self.get_toplevel(), self.runner.name)
86
87 def do_state_flags_changed(self, previous_flags): # pylint: disable=arguments-differ
88 if self.id is not None and self.type == "runner":
89 flags = self.get_state_flags()
90 if flags & Gtk.StateFlags.PRELIGHT or flags & Gtk.StateFlags.SELECTED:
91 if self.btn_box is None:
92 self._create_button_box()
93 self.btn_box.show()
94 elif self.btn_box is not None and self.btn_box.get_visible():
95 self.btn_box.hide()
96 Gtk.ListBoxRow.do_state_flags_changed(self, previous_flags)
97
98
99 class SidebarHeader(Gtk.Box):
100
101 def __init__(self, name):
102 super().__init__(orientation=Gtk.Orientation.VERTICAL)
103 self.get_style_context().add_class("sidebar-header")
104 label = Gtk.Label(
105 halign=Gtk.Align.START,
106 hexpand=True,
107 use_markup=True,
108 label="<b>{}</b>".format(name),
109 )
110 label.get_style_context().add_class("dim-label")
111 box = Gtk.Box(margin_start=9, margin_top=6, margin_bottom=6, margin_right=9)
112 box.add(label)
113 self.add(box)
114 if name == "Runners":
115 manage_runners_button = Gtk.Button.new_from_icon_name("emblem-system-symbolic", Gtk.IconSize.MENU)
116 manage_runners_button.props.action_name = "win.manage-runners"
117 manage_runners_button.props.relief = Gtk.ReliefStyle.NONE
118 manage_runners_button.set_margin_right(16)
119 manage_runners_button.get_style_context().add_class("sidebar-button")
120 box.add(manage_runners_button)
121 self.add(Gtk.Separator())
122 self.show_all()
123
124
125 class SidebarListBox(Gtk.ListBox):
126 __gtype_name__ = "LutrisSidebar"
127
128 def __init__(self):
129 super().__init__()
130 self.get_style_context().add_class("sidebar")
131 self.installed_runners = []
132 self.active_platforms = pga.get_used_platforms()
133 self.runners = sorted(runners.__all__)
134 self.platforms = sorted(platforms.__all__)
135
136 GObject.add_emission_hook(RunnersDialog, "runner-installed", self.update)
137 GObject.add_emission_hook(RunnersDialog, "runner-removed", self.update)
138 GObject.add_emission_hook(Game, "game-updated", self.update)
139 GObject.add_emission_hook(Game, "game-removed", self.update)
140
141 # TODO: This should be in a more logical location
142 icon_theme = Gtk.IconTheme.get_default()
143 local_theme_path = os.path.join(datapath.get(), "icons")
144 if local_theme_path not in icon_theme.get_search_path():
145 icon_theme.prepend_search_path(local_theme_path)
146
147 all_row = SidebarRow(None, "runner", "All", None)
148 self.add(all_row)
149 self.select_row(all_row)
150 for runner in self.runners:
151 icon_name = runner.lower().replace(" ", "") + "-symbolic"
152 icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)
153 name = runners.import_runner(runner).human_name
154 self.add(SidebarRow(runner, "runner", name, icon))
155
156 self.add(SidebarRow(None, "platform", "All", None))
157 for platform in self.platforms:
158 icon_name = (platform.lower().replace(" ", "").replace("/", "_") + "-symbolic")
159 icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)
160 self.add(SidebarRow(platform, "platform", platform, icon))
161
162 self.set_filter_func(self._filter_func)
163 self.set_header_func(self._header_func)
164 self.update()
165 self.show_all()
166
167 def _filter_func(self, row):
168 if row is None:
169 return True
170 if row.type == "runner":
171 if row.id is None:
172 return True # 'All'
173 return row.id in self.installed_runners
174 if len(self.active_platforms) <= 1:
175 return False # Hide useless filter
176 if row.id is None: # 'All'
177 return True
178 return row.id in self.active_platforms
179
180 def _header_func(self, row, before):
181 if row.get_header():
182 return
183
184 if not before:
185 row.set_header(SidebarHeader("Runners"))
186 elif before.type == "runner" and row.type == "platform":
187 row.set_header(SidebarHeader("Platforms"))
188
189 def update(self, *args): # pylint: disable=unused-argument
190 self.installed_runners = [runner.name for runner in runners.get_installed()]
191 self.active_platforms = pga.get_used_platforms()
192 self.invalidate_filter()
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/gui/widgets/sidebar.py b/lutris/gui/widgets/sidebar.py
--- a/lutris/gui/widgets/sidebar.py
+++ b/lutris/gui/widgets/sidebar.py
@@ -190,3 +190,4 @@
self.installed_runners = [runner.name for runner in runners.get_installed()]
self.active_platforms = pga.get_used_platforms()
self.invalidate_filter()
+ return True
| {"golden_diff": "diff --git a/lutris/gui/widgets/sidebar.py b/lutris/gui/widgets/sidebar.py\n--- a/lutris/gui/widgets/sidebar.py\n+++ b/lutris/gui/widgets/sidebar.py\n@@ -190,3 +190,4 @@\n self.installed_runners = [runner.name for runner in runners.get_installed()]\n self.active_platforms = pga.get_used_platforms()\n self.invalidate_filter()\n+ return True\n", "issue": "Runners list is not updated on a second install/uninstall\n**Describe the bug**\r\nThe second time you are trying to install a runner on a Lutris session, the runner list won't be updated anymore.\r\n\r\n**Current behavior**\r\nThe runner list is updated only once per install/uninstall for each runner\r\n\r\n**Steps to reproduce**\r\n1. For the sake of this testing, make sure you don't have ZDoom installed.\r\n2. Open Lutris.\r\n3. Open the 'Manage runners' window.\r\n4. Install ZDoom runner.\r\n4.1. As expected, the runner list from the main Lutris window has added the ZDoom entry.\r\n5. Uninstall ZDoom runner.\r\n5.1. As expected, the runner list from the main Lutris window removed the ZDoom entry.\r\n6. Reinstall ZDoom runner.\r\n6.1. The runner list from the main Lutris window haven't added the ZDoom entry!\n", "before_files": [{"content": "\"\"\"Sidebar for the main window\"\"\"\n# Standard Library\nimport os\n\n# Third Party Libraries\nfrom gi.repository import GObject, Gtk, Pango\n\n# Lutris Modules\nfrom lutris import pga, platforms, runners\nfrom lutris.game import Game\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.dialogs.runners import RunnersDialog\nfrom lutris.util import datapath\n\nTYPE = 0\nSLUG = 1\nICON = 2\nLABEL = 3\nGAMECOUNT = 4\n\n\nclass SidebarRow(Gtk.ListBoxRow):\n\n def __init__(self, id_, type_, name, icon):\n super().__init__()\n self.type = type_\n self.id = id_\n self.btn_box = None\n self.runner = None\n\n self.box = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n\n # Construct the left column icon space.\n if icon:\n self.box.add(icon)\n else:\n # Place a spacer if there is no loaded icon.\n icon = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n self.box.add(icon)\n\n label = Gtk.Label(\n label=name,\n halign=Gtk.Align.START,\n hexpand=True,\n margin_top=6,\n margin_bottom=6,\n ellipsize=Pango.EllipsizeMode.END,\n )\n self.box.add(label)\n\n self.add(self.box)\n\n def _create_button_box(self):\n self.btn_box = Gtk.Box(spacing=3, no_show_all=True, valign=Gtk.Align.CENTER, homogeneous=True)\n\n # Creation is delayed because only installed runners can be imported\n # and all visible boxes should be installed.\n self.runner = runners.import_runner(self.id)()\n entries = []\n if self.runner.multiple_versions:\n entries.append((\n \"system-software-install-symbolic\",\n \"Manage Versions\",\n self.on_manage_versions,\n ))\n if self.runner.runnable_alone:\n entries.append((\"media-playback-start-symbolic\", \"Run\", self.runner.run))\n entries.append((\"emblem-system-symbolic\", \"Configure\", self.on_configure_runner))\n for entry in entries:\n btn = Gtk.Button(tooltip_text=entry[1], relief=Gtk.ReliefStyle.NONE, visible=True)\n image = Gtk.Image.new_from_icon_name(entry[0], Gtk.IconSize.MENU)\n image.show()\n btn.add(image)\n btn.connect(\"clicked\", entry[2])\n self.btn_box.add(btn)\n\n self.box.add(self.btn_box)\n\n def on_configure_runner(self, *args): # pylint: disable=unused-argument\n RunnerConfigDialog(self.runner, parent=self.get_toplevel())\n\n def on_manage_versions(self, *args): # pylint: disable=unused-argument\n dlg_title = \"Manage %s versions\" % self.runner.name\n RunnerInstallDialog(dlg_title, self.get_toplevel(), self.runner.name)\n\n def do_state_flags_changed(self, previous_flags): # pylint: disable=arguments-differ\n if self.id is not None and self.type == \"runner\":\n flags = self.get_state_flags()\n if flags & Gtk.StateFlags.PRELIGHT or flags & Gtk.StateFlags.SELECTED:\n if self.btn_box is None:\n self._create_button_box()\n self.btn_box.show()\n elif self.btn_box is not None and self.btn_box.get_visible():\n self.btn_box.hide()\n Gtk.ListBoxRow.do_state_flags_changed(self, previous_flags)\n\n\nclass SidebarHeader(Gtk.Box):\n\n def __init__(self, name):\n super().__init__(orientation=Gtk.Orientation.VERTICAL)\n self.get_style_context().add_class(\"sidebar-header\")\n label = Gtk.Label(\n halign=Gtk.Align.START,\n hexpand=True,\n use_markup=True,\n label=\"<b>{}</b>\".format(name),\n )\n label.get_style_context().add_class(\"dim-label\")\n box = Gtk.Box(margin_start=9, margin_top=6, margin_bottom=6, margin_right=9)\n box.add(label)\n self.add(box)\n if name == \"Runners\":\n manage_runners_button = Gtk.Button.new_from_icon_name(\"emblem-system-symbolic\", Gtk.IconSize.MENU)\n manage_runners_button.props.action_name = \"win.manage-runners\"\n manage_runners_button.props.relief = Gtk.ReliefStyle.NONE\n manage_runners_button.set_margin_right(16)\n manage_runners_button.get_style_context().add_class(\"sidebar-button\")\n box.add(manage_runners_button)\n self.add(Gtk.Separator())\n self.show_all()\n\n\nclass SidebarListBox(Gtk.ListBox):\n __gtype_name__ = \"LutrisSidebar\"\n\n def __init__(self):\n super().__init__()\n self.get_style_context().add_class(\"sidebar\")\n self.installed_runners = []\n self.active_platforms = pga.get_used_platforms()\n self.runners = sorted(runners.__all__)\n self.platforms = sorted(platforms.__all__)\n\n GObject.add_emission_hook(RunnersDialog, \"runner-installed\", self.update)\n GObject.add_emission_hook(RunnersDialog, \"runner-removed\", self.update)\n GObject.add_emission_hook(Game, \"game-updated\", self.update)\n GObject.add_emission_hook(Game, \"game-removed\", self.update)\n\n # TODO: This should be in a more logical location\n icon_theme = Gtk.IconTheme.get_default()\n local_theme_path = os.path.join(datapath.get(), \"icons\")\n if local_theme_path not in icon_theme.get_search_path():\n icon_theme.prepend_search_path(local_theme_path)\n\n all_row = SidebarRow(None, \"runner\", \"All\", None)\n self.add(all_row)\n self.select_row(all_row)\n for runner in self.runners:\n icon_name = runner.lower().replace(\" \", \"\") + \"-symbolic\"\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n name = runners.import_runner(runner).human_name\n self.add(SidebarRow(runner, \"runner\", name, icon))\n\n self.add(SidebarRow(None, \"platform\", \"All\", None))\n for platform in self.platforms:\n icon_name = (platform.lower().replace(\" \", \"\").replace(\"/\", \"_\") + \"-symbolic\")\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n self.add(SidebarRow(platform, \"platform\", platform, icon))\n\n self.set_filter_func(self._filter_func)\n self.set_header_func(self._header_func)\n self.update()\n self.show_all()\n\n def _filter_func(self, row):\n if row is None:\n return True\n if row.type == \"runner\":\n if row.id is None:\n return True # 'All'\n return row.id in self.installed_runners\n if len(self.active_platforms) <= 1:\n return False # Hide useless filter\n if row.id is None: # 'All'\n return True\n return row.id in self.active_platforms\n\n def _header_func(self, row, before):\n if row.get_header():\n return\n\n if not before:\n row.set_header(SidebarHeader(\"Runners\"))\n elif before.type == \"runner\" and row.type == \"platform\":\n row.set_header(SidebarHeader(\"Platforms\"))\n\n def update(self, *args): # pylint: disable=unused-argument\n self.installed_runners = [runner.name for runner in runners.get_installed()]\n self.active_platforms = pga.get_used_platforms()\n self.invalidate_filter()\n", "path": "lutris/gui/widgets/sidebar.py"}], "after_files": [{"content": "\"\"\"Sidebar for the main window\"\"\"\n# Standard Library\nimport os\n\n# Third Party Libraries\nfrom gi.repository import GObject, Gtk, Pango\n\n# Lutris Modules\nfrom lutris import pga, platforms, runners\nfrom lutris.game import Game\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.dialogs.runners import RunnersDialog\nfrom lutris.util import datapath\n\nTYPE = 0\nSLUG = 1\nICON = 2\nLABEL = 3\nGAMECOUNT = 4\n\n\nclass SidebarRow(Gtk.ListBoxRow):\n\n def __init__(self, id_, type_, name, icon):\n super().__init__()\n self.type = type_\n self.id = id_\n self.btn_box = None\n self.runner = None\n\n self.box = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n\n # Construct the left column icon space.\n if icon:\n self.box.add(icon)\n else:\n # Place a spacer if there is no loaded icon.\n icon = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n self.box.add(icon)\n\n label = Gtk.Label(\n label=name,\n halign=Gtk.Align.START,\n hexpand=True,\n margin_top=6,\n margin_bottom=6,\n ellipsize=Pango.EllipsizeMode.END,\n )\n self.box.add(label)\n\n self.add(self.box)\n\n def _create_button_box(self):\n self.btn_box = Gtk.Box(spacing=3, no_show_all=True, valign=Gtk.Align.CENTER, homogeneous=True)\n\n # Creation is delayed because only installed runners can be imported\n # and all visible boxes should be installed.\n self.runner = runners.import_runner(self.id)()\n entries = []\n if self.runner.multiple_versions:\n entries.append((\n \"system-software-install-symbolic\",\n \"Manage Versions\",\n self.on_manage_versions,\n ))\n if self.runner.runnable_alone:\n entries.append((\"media-playback-start-symbolic\", \"Run\", self.runner.run))\n entries.append((\"emblem-system-symbolic\", \"Configure\", self.on_configure_runner))\n for entry in entries:\n btn = Gtk.Button(tooltip_text=entry[1], relief=Gtk.ReliefStyle.NONE, visible=True)\n image = Gtk.Image.new_from_icon_name(entry[0], Gtk.IconSize.MENU)\n image.show()\n btn.add(image)\n btn.connect(\"clicked\", entry[2])\n self.btn_box.add(btn)\n\n self.box.add(self.btn_box)\n\n def on_configure_runner(self, *args): # pylint: disable=unused-argument\n RunnerConfigDialog(self.runner, parent=self.get_toplevel())\n\n def on_manage_versions(self, *args): # pylint: disable=unused-argument\n dlg_title = \"Manage %s versions\" % self.runner.name\n RunnerInstallDialog(dlg_title, self.get_toplevel(), self.runner.name)\n\n def do_state_flags_changed(self, previous_flags): # pylint: disable=arguments-differ\n if self.id is not None and self.type == \"runner\":\n flags = self.get_state_flags()\n if flags & Gtk.StateFlags.PRELIGHT or flags & Gtk.StateFlags.SELECTED:\n if self.btn_box is None:\n self._create_button_box()\n self.btn_box.show()\n elif self.btn_box is not None and self.btn_box.get_visible():\n self.btn_box.hide()\n Gtk.ListBoxRow.do_state_flags_changed(self, previous_flags)\n\n\nclass SidebarHeader(Gtk.Box):\n\n def __init__(self, name):\n super().__init__(orientation=Gtk.Orientation.VERTICAL)\n self.get_style_context().add_class(\"sidebar-header\")\n label = Gtk.Label(\n halign=Gtk.Align.START,\n hexpand=True,\n use_markup=True,\n label=\"<b>{}</b>\".format(name),\n )\n label.get_style_context().add_class(\"dim-label\")\n box = Gtk.Box(margin_start=9, margin_top=6, margin_bottom=6, margin_right=9)\n box.add(label)\n self.add(box)\n if name == \"Runners\":\n manage_runners_button = Gtk.Button.new_from_icon_name(\"emblem-system-symbolic\", Gtk.IconSize.MENU)\n manage_runners_button.props.action_name = \"win.manage-runners\"\n manage_runners_button.props.relief = Gtk.ReliefStyle.NONE\n manage_runners_button.set_margin_right(16)\n manage_runners_button.get_style_context().add_class(\"sidebar-button\")\n box.add(manage_runners_button)\n self.add(Gtk.Separator())\n self.show_all()\n\n\nclass SidebarListBox(Gtk.ListBox):\n __gtype_name__ = \"LutrisSidebar\"\n\n def __init__(self):\n super().__init__()\n self.get_style_context().add_class(\"sidebar\")\n self.installed_runners = []\n self.active_platforms = pga.get_used_platforms()\n self.runners = sorted(runners.__all__)\n self.platforms = sorted(platforms.__all__)\n\n GObject.add_emission_hook(RunnersDialog, \"runner-installed\", self.update)\n GObject.add_emission_hook(RunnersDialog, \"runner-removed\", self.update)\n GObject.add_emission_hook(Game, \"game-updated\", self.update)\n GObject.add_emission_hook(Game, \"game-removed\", self.update)\n\n # TODO: This should be in a more logical location\n icon_theme = Gtk.IconTheme.get_default()\n local_theme_path = os.path.join(datapath.get(), \"icons\")\n if local_theme_path not in icon_theme.get_search_path():\n icon_theme.prepend_search_path(local_theme_path)\n\n all_row = SidebarRow(None, \"runner\", \"All\", None)\n self.add(all_row)\n self.select_row(all_row)\n for runner in self.runners:\n icon_name = runner.lower().replace(\" \", \"\") + \"-symbolic\"\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n name = runners.import_runner(runner).human_name\n self.add(SidebarRow(runner, \"runner\", name, icon))\n\n self.add(SidebarRow(None, \"platform\", \"All\", None))\n for platform in self.platforms:\n icon_name = (platform.lower().replace(\" \", \"\").replace(\"/\", \"_\") + \"-symbolic\")\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n self.add(SidebarRow(platform, \"platform\", platform, icon))\n\n self.set_filter_func(self._filter_func)\n self.set_header_func(self._header_func)\n self.update()\n self.show_all()\n\n def _filter_func(self, row):\n if row is None:\n return True\n if row.type == \"runner\":\n if row.id is None:\n return True # 'All'\n return row.id in self.installed_runners\n if len(self.active_platforms) <= 1:\n return False # Hide useless filter\n if row.id is None: # 'All'\n return True\n return row.id in self.active_platforms\n\n def _header_func(self, row, before):\n if row.get_header():\n return\n\n if not before:\n row.set_header(SidebarHeader(\"Runners\"))\n elif before.type == \"runner\" and row.type == \"platform\":\n row.set_header(SidebarHeader(\"Platforms\"))\n\n def update(self, *args): # pylint: disable=unused-argument\n self.installed_runners = [runner.name for runner in runners.get_installed()]\n self.active_platforms = pga.get_used_platforms()\n self.invalidate_filter()\n return True\n", "path": "lutris/gui/widgets/sidebar.py"}]} | 2,590 | 94 |
gh_patches_debug_9747 | rasdani/github-patches | git_diff | getredash__redash-543 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed to run mongodb query
When I try to run this mongodb on both local install or redash demo, a error raised "Error running query: string indices must be integers"
Below is the query, I think this is an error of python code since I can run other simple query just fine.
``` json
{
"collection": "Email",
"aggregate": [
{
"$group": {
"_id": {
"$dateToString": {
"format": "%Y-%m-%d",
"date": "$sendTime"
}
},
"sent": {
"$sum": {
"$cond": {
"if": {
"$gte": [
"$sent",
1
]
},
"then": 1,
"else": 0
}
}
},
"opened": {
"$sum": {
"$cond": {
"if": {
"$gte": [
"$opened",
1
]
},
"then": 1,
"else": 0
}
}
},
"clicked": {
"$sum": {
"$cond": {
"if": {
"$gte": [
"$clicked",
1
]
},
"then": 1,
"else": 0
}
}
}
}
},
{
"$limit": 10
}
]
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/mongodb.py`
Content:
```
1 import json
2 import datetime
3 import logging
4 import re
5 import time
6 from dateutil.parser import parse
7
8 from redash.utils import JSONEncoder
9 from redash.query_runner import *
10
11 logger = logging.getLogger(__name__)
12
13 try:
14 import pymongo
15 from bson.objectid import ObjectId
16 from bson.son import SON
17 enabled = True
18
19 except ImportError:
20 logger.warning("Missing dependencies. Please install pymongo.")
21 logger.warning("You can use pip: pip install pymongo")
22 enabled = False
23
24
25 TYPES_MAP = {
26 str: TYPE_STRING,
27 unicode: TYPE_STRING,
28 int: TYPE_INTEGER,
29 long: TYPE_INTEGER,
30 float: TYPE_FLOAT,
31 bool: TYPE_BOOLEAN,
32 datetime.datetime: TYPE_DATETIME,
33 }
34
35 date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
36
37 class MongoDBJSONEncoder(JSONEncoder):
38 def default(self, o):
39 if isinstance(o, ObjectId):
40 return str(o)
41
42 return super(MongoDBJSONEncoder, self).default(o)
43
44 # Simple query example:
45 #
46 # {
47 # "collection" : "my_collection",
48 # "query" : {
49 # "date" : {
50 # "$gt" : "ISODate(\"2015-01-15 11:41\")",
51 # },
52 # "type" : 1
53 # },
54 # "fields" : {
55 # "_id" : 1,
56 # "name" : 2
57 # },
58 # "sort" : [
59 # {
60 # "name" : "date",
61 # "direction" : -1
62 # }
63 # ]
64 #
65 # }
66 #
67 #
68 # Aggregation
69 # ===========
70 # Uses a syntax similar to the one used in PyMongo, however to support the
71 # correct order of sorting, it uses a regular list for the "$sort" operation
72 # that converts into a SON (sorted dictionary) object before execution.
73 #
74 # Aggregation query example:
75 #
76 # {
77 # "collection" : "things",
78 # "aggregate" : [
79 # {
80 # "$unwind" : "$tags"
81 # },
82 # {
83 # "$group" : {
84 # "_id" : "$tags",
85 # "count" : { "$sum" : 1 }
86 # }
87 # },
88 # {
89 # "$sort" : [
90 # {
91 # "name" : "count",
92 # "direction" : -1
93 # },
94 # {
95 # "name" : "_id",
96 # "direction" : -1
97 # }
98 # ]
99 # }
100 # ]
101 # }
102 #
103 #
104 class MongoDB(BaseQueryRunner):
105 @classmethod
106 def configuration_schema(cls):
107 return {
108 'type': 'object',
109 'properties': {
110 'connectionString': {
111 'type': 'string',
112 'title': 'Connection String'
113 },
114 'dbName': {
115 'type': 'string',
116 'title': "Database Name"
117 },
118 'replicaSetName': {
119 'type': 'string',
120 'title': 'Replica Set Name'
121 },
122 },
123 'required': ['connectionString']
124 }
125
126 @classmethod
127 def enabled(cls):
128 return enabled
129
130 @classmethod
131 def annotate_query(cls):
132 return False
133
134 def __init__(self, configuration_json):
135 super(MongoDB, self).__init__(configuration_json)
136
137 self.syntax = 'json'
138
139 self.db_name = self.configuration["dbName"]
140
141 self.is_replica_set = True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
142
143 def _get_column_by_name(self, columns, column_name):
144 for c in columns:
145 if "name" in c and c["name"] == column_name:
146 return c
147
148 return None
149
150 def _fix_dates(self, data):
151 for k in data:
152 if isinstance(data[k], list):
153 for i in range(0, len(data[k])):
154 self._fix_dates(data[k][i])
155 elif isinstance(data[k], dict):
156 self._fix_dates(data[k])
157 else:
158 if isinstance(data[k], (str, unicode)):
159 self._convert_date(data, k)
160
161 def _convert_date(self, q, field_name):
162 m = date_regex.findall(q[field_name])
163 if len(m) > 0:
164 q[field_name] = parse(m[0], yearfirst=True)
165
166 def run_query(self, query):
167 if self.is_replica_set:
168 db_connection = pymongo.MongoReplicaSetClient(self.configuration["connectionString"], replicaSet=self.configuration["replicaSetName"])
169 else:
170 db_connection = pymongo.MongoClient(self.configuration["connectionString"])
171
172 db = db_connection[self.db_name]
173
174 logger.debug("mongodb connection string: %s", self.configuration['connectionString'])
175 logger.debug("mongodb got query: %s", query)
176
177 try:
178 query_data = json.loads(query)
179 self._fix_dates(query_data)
180 except ValueError:
181 return None, "Invalid query format. The query is not a valid JSON."
182
183 if "collection" not in query_data:
184 return None, "'collection' must have a value to run a query"
185 else:
186 collection = query_data["collection"]
187
188 q = query_data.get("query", None)
189 f = None
190
191 aggregate = query_data.get("aggregate", None)
192 if aggregate:
193 for step in aggregate:
194 if "$sort" in step:
195 sort_list = []
196 for sort_item in step["$sort"]:
197 sort_list.append((sort_item["name"], sort_item["direction"]))
198
199 step["$sort"] = SON(sort_list)
200
201 if not aggregate:
202 s = None
203 if "sort" in query_data and query_data["sort"]:
204 s = []
205 for field in query_data["sort"]:
206 s.append((field["name"], field["direction"]))
207
208 if "fields" in query_data:
209 f = query_data["fields"]
210
211 s = None
212 if "sort" in query_data and query_data["sort"]:
213 s = []
214 for field_data in query_data["sort"]:
215 s.append((field_data["name"], field_data["direction"]))
216
217 columns = []
218 rows = []
219
220 cursor = None
221 if q or (not q and not aggregate):
222 if s:
223 cursor = db[collection].find(q, f).sort(s)
224 else:
225 cursor = db[collection].find(q, f)
226
227 if "skip" in query_data:
228 cursor = cursor.skip(query_data["skip"])
229
230 if "limit" in query_data:
231 cursor = cursor.limit(query_data["limit"])
232
233 elif aggregate:
234 r = db[collection].aggregate(aggregate)
235
236 # Backwards compatibility with older pymongo versions.
237 #
238 # Older pymongo version would return a dictionary from an aggregate command.
239 # The dict would contain a "result" key which would hold the cursor.
240 # Newer ones return pymongo.command_cursor.CommandCursor.
241 if isinstance(r, dict):
242 cursor = r["result"]
243 else:
244 cursor = r
245
246 for r in cursor:
247 for k in r:
248 if self._get_column_by_name(columns, k) is None:
249 columns.append({
250 "name": k,
251 "friendly_name": k,
252 "type": TYPES_MAP.get(type(r[k]), TYPE_STRING)
253 })
254
255 rows.append(r)
256
257 if f:
258 ordered_columns = []
259 for k in sorted(f, key=f.get):
260 ordered_columns.append(self._get_column_by_name(columns, k))
261
262 columns = ordered_columns
263
264 data = {
265 "columns": columns,
266 "rows": rows
267 }
268 error = None
269 json_data = json.dumps(data, cls=MongoDBJSONEncoder)
270
271 return json_data, error
272
273 register(MongoDB)
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py
--- a/redash/query_runner/mongodb.py
+++ b/redash/query_runner/mongodb.py
@@ -151,7 +151,11 @@
for k in data:
if isinstance(data[k], list):
for i in range(0, len(data[k])):
- self._fix_dates(data[k][i])
+ if isinstance(data[k][i], (str, unicode)):
+ self._convert_date(data[k], i)
+ elif not isinstance(data[k][i], (int)):
+ self._fix_dates(data[k][i])
+
elif isinstance(data[k], dict):
self._fix_dates(data[k])
else:
| {"golden_diff": "diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py\n--- a/redash/query_runner/mongodb.py\n+++ b/redash/query_runner/mongodb.py\n@@ -151,7 +151,11 @@\n for k in data:\n if isinstance(data[k], list):\n for i in range(0, len(data[k])):\n- self._fix_dates(data[k][i])\n+ if isinstance(data[k][i], (str, unicode)):\n+ self._convert_date(data[k], i)\n+ elif not isinstance(data[k][i], (int)):\n+ self._fix_dates(data[k][i])\n+\n elif isinstance(data[k], dict):\n self._fix_dates(data[k])\n else:\n", "issue": "Failed to run mongodb query\nWhen I try to run this mongodb on both local install or redash demo, a error raised \"Error running query: string indices must be integers\"\n\nBelow is the query, I think this is an error of python code since I can run other simple query just fine.\n\n``` json\n{\n \"collection\": \"Email\",\n \"aggregate\": [\n {\n \"$group\": {\n \"_id\": {\n \"$dateToString\": {\n \"format\": \"%Y-%m-%d\",\n \"date\": \"$sendTime\"\n }\n },\n \"sent\": {\n \"$sum\": {\n \"$cond\": {\n \"if\": {\n \"$gte\": [\n \"$sent\",\n 1\n ]\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n },\n \"opened\": {\n \"$sum\": {\n \"$cond\": {\n \"if\": {\n \"$gte\": [\n \"$opened\",\n 1\n ]\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n },\n \"clicked\": {\n \"$sum\": {\n \"$cond\": {\n \"if\": {\n \"$gte\": [\n \"$clicked\",\n 1\n ]\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n }\n }\n },\n {\n \"$limit\": 10\n }\n ]\n}\n```\n\n", "before_files": [{"content": "import json\nimport datetime\nimport logging\nimport re\nimport time\nfrom dateutil.parser import parse\n\nfrom redash.utils import JSONEncoder\nfrom redash.query_runner import *\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pymongo\n from bson.objectid import ObjectId\n from bson.son import SON\n enabled = True\n\nexcept ImportError:\n logger.warning(\"Missing dependencies. Please install pymongo.\")\n logger.warning(\"You can use pip: pip install pymongo\")\n enabled = False\n\n\nTYPES_MAP = {\n str: TYPE_STRING,\n unicode: TYPE_STRING,\n int: TYPE_INTEGER,\n long: TYPE_INTEGER,\n float: TYPE_FLOAT,\n bool: TYPE_BOOLEAN,\n datetime.datetime: TYPE_DATETIME,\n}\n\ndate_regex = re.compile(\"ISODate\\(\\\"(.*)\\\"\\)\", re.IGNORECASE)\n\nclass MongoDBJSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, ObjectId):\n return str(o)\n\n return super(MongoDBJSONEncoder, self).default(o)\n\n# Simple query example:\n#\n# {\n# \"collection\" : \"my_collection\",\n# \"query\" : {\n# \"date\" : {\n# \"$gt\" : \"ISODate(\\\"2015-01-15 11:41\\\")\",\n# },\n# \"type\" : 1\n# },\n# \"fields\" : {\n# \"_id\" : 1,\n# \"name\" : 2\n# },\n# \"sort\" : [\n# {\n# \"name\" : \"date\",\n# \"direction\" : -1\n# }\n# ]\n#\n# }\n#\n#\n# Aggregation\n# ===========\n# Uses a syntax similar to the one used in PyMongo, however to support the\n# correct order of sorting, it uses a regular list for the \"$sort\" operation\n# that converts into a SON (sorted dictionary) object before execution.\n#\n# Aggregation query example:\n#\n# {\n# \"collection\" : \"things\",\n# \"aggregate\" : [\n# {\n# \"$unwind\" : \"$tags\"\n# },\n# {\n# \"$group\" : {\n# \"_id\" : \"$tags\",\n# \"count\" : { \"$sum\" : 1 }\n# }\n# },\n# {\n# \"$sort\" : [\n# {\n# \"name\" : \"count\",\n# \"direction\" : -1\n# },\n# {\n# \"name\" : \"_id\",\n# \"direction\" : -1\n# }\n# ]\n# }\n# ]\n# }\n#\n#\nclass MongoDB(BaseQueryRunner):\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'connectionString': {\n 'type': 'string',\n 'title': 'Connection String'\n },\n 'dbName': {\n 'type': 'string',\n 'title': \"Database Name\"\n },\n 'replicaSetName': {\n 'type': 'string',\n 'title': 'Replica Set Name'\n },\n },\n 'required': ['connectionString']\n }\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def __init__(self, configuration_json):\n super(MongoDB, self).__init__(configuration_json)\n\n self.syntax = 'json'\n\n self.db_name = self.configuration[\"dbName\"]\n\n self.is_replica_set = True if \"replicaSetName\" in self.configuration and self.configuration[\"replicaSetName\"] else False\n\n def _get_column_by_name(self, columns, column_name):\n for c in columns:\n if \"name\" in c and c[\"name\"] == column_name:\n return c\n\n return None\n\n def _fix_dates(self, data):\n for k in data:\n if isinstance(data[k], list):\n for i in range(0, len(data[k])):\n self._fix_dates(data[k][i])\n elif isinstance(data[k], dict):\n self._fix_dates(data[k])\n else:\n if isinstance(data[k], (str, unicode)):\n self._convert_date(data, k)\n\n def _convert_date(self, q, field_name):\n m = date_regex.findall(q[field_name])\n if len(m) > 0:\n q[field_name] = parse(m[0], yearfirst=True)\n\n def run_query(self, query):\n if self.is_replica_set:\n db_connection = pymongo.MongoReplicaSetClient(self.configuration[\"connectionString\"], replicaSet=self.configuration[\"replicaSetName\"])\n else:\n db_connection = pymongo.MongoClient(self.configuration[\"connectionString\"])\n\n db = db_connection[self.db_name]\n\n logger.debug(\"mongodb connection string: %s\", self.configuration['connectionString'])\n logger.debug(\"mongodb got query: %s\", query)\n\n try:\n query_data = json.loads(query)\n self._fix_dates(query_data)\n except ValueError:\n return None, \"Invalid query format. The query is not a valid JSON.\"\n\n if \"collection\" not in query_data:\n return None, \"'collection' must have a value to run a query\"\n else:\n collection = query_data[\"collection\"]\n\n q = query_data.get(\"query\", None)\n f = None\n\n aggregate = query_data.get(\"aggregate\", None)\n if aggregate:\n for step in aggregate:\n if \"$sort\" in step:\n sort_list = []\n for sort_item in step[\"$sort\"]:\n sort_list.append((sort_item[\"name\"], sort_item[\"direction\"]))\n\n step[\"$sort\"] = SON(sort_list)\n\n if not aggregate:\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field in query_data[\"sort\"]:\n s.append((field[\"name\"], field[\"direction\"]))\n\n if \"fields\" in query_data:\n f = query_data[\"fields\"]\n\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field_data in query_data[\"sort\"]:\n s.append((field_data[\"name\"], field_data[\"direction\"]))\n\n columns = []\n rows = []\n\n cursor = None\n if q or (not q and not aggregate):\n if s:\n cursor = db[collection].find(q, f).sort(s)\n else:\n cursor = db[collection].find(q, f)\n\n if \"skip\" in query_data:\n cursor = cursor.skip(query_data[\"skip\"])\n\n if \"limit\" in query_data:\n cursor = cursor.limit(query_data[\"limit\"])\n\n elif aggregate:\n r = db[collection].aggregate(aggregate)\n\n # Backwards compatibility with older pymongo versions.\n #\n # Older pymongo version would return a dictionary from an aggregate command.\n # The dict would contain a \"result\" key which would hold the cursor.\n # Newer ones return pymongo.command_cursor.CommandCursor.\n if isinstance(r, dict):\n cursor = r[\"result\"]\n else:\n cursor = r\n\n for r in cursor:\n for k in r:\n if self._get_column_by_name(columns, k) is None:\n columns.append({\n \"name\": k,\n \"friendly_name\": k,\n \"type\": TYPES_MAP.get(type(r[k]), TYPE_STRING)\n })\n\n rows.append(r)\n\n if f:\n ordered_columns = []\n for k in sorted(f, key=f.get):\n ordered_columns.append(self._get_column_by_name(columns, k))\n\n columns = ordered_columns\n\n data = {\n \"columns\": columns,\n \"rows\": rows\n }\n error = None\n json_data = json.dumps(data, cls=MongoDBJSONEncoder)\n\n return json_data, error\n\nregister(MongoDB)\n", "path": "redash/query_runner/mongodb.py"}], "after_files": [{"content": "import json\nimport datetime\nimport logging\nimport re\nimport time\nfrom dateutil.parser import parse\n\nfrom redash.utils import JSONEncoder\nfrom redash.query_runner import *\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pymongo\n from bson.objectid import ObjectId\n from bson.son import SON\n enabled = True\n\nexcept ImportError:\n logger.warning(\"Missing dependencies. Please install pymongo.\")\n logger.warning(\"You can use pip: pip install pymongo\")\n enabled = False\n\n\nTYPES_MAP = {\n str: TYPE_STRING,\n unicode: TYPE_STRING,\n int: TYPE_INTEGER,\n long: TYPE_INTEGER,\n float: TYPE_FLOAT,\n bool: TYPE_BOOLEAN,\n datetime.datetime: TYPE_DATETIME,\n}\n\ndate_regex = re.compile(\"ISODate\\(\\\"(.*)\\\"\\)\", re.IGNORECASE)\n\nclass MongoDBJSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, ObjectId):\n return str(o)\n\n return super(MongoDBJSONEncoder, self).default(o)\n\n# Simple query example:\n#\n# {\n# \"collection\" : \"my_collection\",\n# \"query\" : {\n# \"date\" : {\n# \"$gt\" : \"ISODate(\\\"2015-01-15 11:41\\\")\",\n# },\n# \"type\" : 1\n# },\n# \"fields\" : {\n# \"_id\" : 1,\n# \"name\" : 2\n# },\n# \"sort\" : [\n# {\n# \"name\" : \"date\",\n# \"direction\" : -1\n# }\n# ]\n#\n# }\n#\n#\n# Aggregation\n# ===========\n# Uses a syntax similar to the one used in PyMongo, however to support the\n# correct order of sorting, it uses a regular list for the \"$sort\" operation\n# that converts into a SON (sorted dictionary) object before execution.\n#\n# Aggregation query example:\n#\n# {\n# \"collection\" : \"things\",\n# \"aggregate\" : [\n# {\n# \"$unwind\" : \"$tags\"\n# },\n# {\n# \"$group\" : {\n# \"_id\" : \"$tags\",\n# \"count\" : { \"$sum\" : 1 }\n# }\n# },\n# {\n# \"$sort\" : [\n# {\n# \"name\" : \"count\",\n# \"direction\" : -1\n# },\n# {\n# \"name\" : \"_id\",\n# \"direction\" : -1\n# }\n# ]\n# }\n# ]\n# }\n#\n#\nclass MongoDB(BaseQueryRunner):\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'connectionString': {\n 'type': 'string',\n 'title': 'Connection String'\n },\n 'dbName': {\n 'type': 'string',\n 'title': \"Database Name\"\n },\n 'replicaSetName': {\n 'type': 'string',\n 'title': 'Replica Set Name'\n },\n },\n 'required': ['connectionString']\n }\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def __init__(self, configuration_json):\n super(MongoDB, self).__init__(configuration_json)\n\n self.syntax = 'json'\n\n self.db_name = self.configuration[\"dbName\"]\n\n self.is_replica_set = True if \"replicaSetName\" in self.configuration and self.configuration[\"replicaSetName\"] else False\n\n def _get_column_by_name(self, columns, column_name):\n for c in columns:\n if \"name\" in c and c[\"name\"] == column_name:\n return c\n\n return None\n\n def _fix_dates(self, data):\n for k in data:\n if isinstance(data[k], list):\n for i in range(0, len(data[k])):\n if isinstance(data[k][i], (str, unicode)):\n self._convert_date(data[k], i)\n elif not isinstance(data[k][i], (int)):\n self._fix_dates(data[k][i])\n\n elif isinstance(data[k], dict):\n self._fix_dates(data[k])\n else:\n if isinstance(data[k], (str, unicode)):\n self._convert_date(data, k)\n\n def _convert_date(self, q, field_name):\n m = date_regex.findall(q[field_name])\n if len(m) > 0:\n q[field_name] = parse(m[0], yearfirst=True)\n\n def run_query(self, query):\n if self.is_replica_set:\n db_connection = pymongo.MongoReplicaSetClient(self.configuration[\"connectionString\"], replicaSet=self.configuration[\"replicaSetName\"])\n else:\n db_connection = pymongo.MongoClient(self.configuration[\"connectionString\"])\n\n db = db_connection[self.db_name]\n\n logger.debug(\"mongodb connection string: %s\", self.configuration['connectionString'])\n logger.debug(\"mongodb got query: %s\", query)\n\n try:\n query_data = json.loads(query)\n self._fix_dates(query_data)\n except ValueError:\n return None, \"Invalid query format. The query is not a valid JSON.\"\n\n if \"collection\" not in query_data:\n return None, \"'collection' must have a value to run a query\"\n else:\n collection = query_data[\"collection\"]\n\n q = query_data.get(\"query\", None)\n f = None\n\n aggregate = query_data.get(\"aggregate\", None)\n if aggregate:\n for step in aggregate:\n if \"$sort\" in step:\n sort_list = []\n for sort_item in step[\"$sort\"]:\n sort_list.append((sort_item[\"name\"], sort_item[\"direction\"]))\n\n step[\"$sort\"] = SON(sort_list)\n\n if not aggregate:\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field in query_data[\"sort\"]:\n s.append((field[\"name\"], field[\"direction\"]))\n\n if \"fields\" in query_data:\n f = query_data[\"fields\"]\n\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field_data in query_data[\"sort\"]:\n s.append((field_data[\"name\"], field_data[\"direction\"]))\n\n columns = []\n rows = []\n\n cursor = None\n if q or (not q and not aggregate):\n if s:\n cursor = db[collection].find(q, f).sort(s)\n else:\n cursor = db[collection].find(q, f)\n\n if \"skip\" in query_data:\n cursor = cursor.skip(query_data[\"skip\"])\n\n if \"limit\" in query_data:\n cursor = cursor.limit(query_data[\"limit\"])\n\n elif aggregate:\n r = db[collection].aggregate(aggregate)\n\n # Backwards compatibility with older pymongo versions.\n #\n # Older pymongo version would return a dictionary from an aggregate command.\n # The dict would contain a \"result\" key which would hold the cursor.\n # Newer ones return pymongo.command_cursor.CommandCursor.\n if isinstance(r, dict):\n cursor = r[\"result\"]\n else:\n cursor = r\n\n for r in cursor:\n for k in r:\n if self._get_column_by_name(columns, k) is None:\n columns.append({\n \"name\": k,\n \"friendly_name\": k,\n \"type\": TYPES_MAP.get(type(r[k]), TYPE_STRING)\n })\n\n rows.append(r)\n\n if f:\n ordered_columns = []\n for k in sorted(f, key=f.get):\n ordered_columns.append(self._get_column_by_name(columns, k))\n\n columns = ordered_columns\n\n data = {\n \"columns\": columns,\n \"rows\": rows\n }\n error = None\n json_data = json.dumps(data, cls=MongoDBJSONEncoder)\n\n return json_data, error\n\nregister(MongoDB)\n", "path": "redash/query_runner/mongodb.py"}]} | 2,985 | 161 |
gh_patches_debug_1908 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1660 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SSL Timeout Error immediately when switching to interactive
#### PoC
```
from pwn import *
r = remote('google.com', 443, ssl=True)
r.interactive()
r.close()
```
It immediately results in:
```
[+] Opening connection to google.com on port 443: Done
[*] Switching to interactive mode
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 784, in recv_thread
cur = self.recv(timeout = 0.05)
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 78, in recv
return self._recv(numb, timeout) or ''
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 156, in _recv
if not self.buffer and not self._fillbuffer(timeout):
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py", line 126, in _fillbuffer
data = self.recv_raw(self.buffer.get_fill_size())
File "/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/sock.py", line 37, in recv_raw
data = self.sock.recv(numb, *a)
File "/usr/lib/python2.7/ssl.py", line 772, in recv
return self.read(buflen)
File "/usr/lib/python2.7/ssl.py", line 659, in read
v = self._sslobj.read(len)
SSLError: ('The read operation timed out',)
```
Note that doing so on a non-SSL server doesn't have this issue:
```
from pwn import *
r = remote('google.com', 80, ssl=False)
r.interactive()
r.close()
```
It allows you to type in HTTP Request in interactive mode, and return the server response without any issues.
```
GET /
```
```
<HTTP Responses>
```
Is the SSL feature is broken in pwntools?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/tubes/sock.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import errno
5 import select
6 import six
7 import socket
8
9 from pwnlib.log import getLogger
10 from pwnlib.tubes.tube import tube
11
12 log = getLogger(__name__)
13
14 class sock(tube):
15 """Base type used for :class:`.tubes.remote` and :class:`.tubes.listen` classes"""
16
17 def __init__(self, *args, **kwargs):
18 super(sock, self).__init__(*args, **kwargs)
19 self.closed = {"recv": False, "send": False}
20
21 # Overwritten for better usability
22 def recvall(self, timeout = tube.forever):
23 """recvall() -> str
24
25 Receives data until the socket is closed.
26 """
27
28 if getattr(self, 'type', None) == socket.SOCK_DGRAM:
29 self.error("UDP sockets does not supports recvall")
30 else:
31 return super(sock, self).recvall(timeout)
32
33 def recv_raw(self, numb, *a):
34 if self.closed["recv"]:
35 raise EOFError
36
37 while True:
38 try:
39 data = self.sock.recv(numb, *a)
40 break
41 except socket.timeout:
42 return None
43 except IOError as e:
44 if e.errno == errno.EAGAIN:
45 return None
46 elif e.errno in (errno.ECONNREFUSED, errno.ECONNRESET):
47 self.shutdown("recv")
48 raise EOFError
49 elif e.errno == errno.EINTR:
50 continue
51 else:
52 raise
53
54 if not data:
55 self.shutdown("recv")
56 raise EOFError
57
58 return data
59
60 def send_raw(self, data):
61 if self.closed["send"]:
62 raise EOFError
63
64 try:
65 self.sock.sendall(data)
66 except IOError as e:
67 eof_numbers = (errno.EPIPE, errno.ECONNRESET, errno.ECONNREFUSED)
68 if e.errno in eof_numbers or 'Socket is closed' in e.args:
69 self.shutdown("send")
70 raise EOFError
71 else:
72 raise
73
74 def settimeout_raw(self, timeout):
75 if getattr(self, 'sock', None):
76 self.sock.settimeout(timeout)
77
78 def can_recv_raw(self, timeout):
79 """
80 Tests:
81
82 >>> l = listen()
83 >>> r = remote('localhost', l.lport)
84 >>> r.can_recv_raw(timeout=0)
85 False
86 >>> l.send(b'a')
87 >>> r.can_recv_raw(timeout=1)
88 True
89 >>> r.recv()
90 b'a'
91 >>> r.can_recv_raw(timeout=0)
92 False
93 >>> l.close()
94 >>> r.can_recv_raw(timeout=1)
95 False
96 >>> r.closed['recv']
97 True
98 """
99 if not self.sock or self.closed["recv"]:
100 return False
101
102 # select() will tell us data is available at EOF
103 can_recv = select.select([self.sock], [], [], timeout) == ([self.sock], [], [])
104
105 if not can_recv:
106 return False
107
108 # Ensure there's actually data, not just EOF
109 try:
110 self.recv_raw(1, socket.MSG_PEEK)
111 except EOFError:
112 return False
113
114 return True
115
116 def connected_raw(self, direction):
117 """
118 Tests:
119
120 >>> l = listen()
121 >>> r = remote('localhost', l.lport)
122 >>> r.connected()
123 True
124 >>> l.close()
125 >>> time.sleep(0.1) # Avoid race condition
126 >>> r.connected()
127 False
128 """
129 # If there's no socket, it's definitely closed
130 if not self.sock:
131 return False
132
133 # If we have noticed a connection close in a given direction before,
134 # return fast.
135 if self.closed.get(direction, False):
136 return False
137
138 # If a connection is closed in all manners, return fast
139 if all(self.closed.values()):
140 return False
141
142 # Use poll() to determine the connection state
143 want = {
144 'recv': select.POLLIN,
145 'send': select.POLLOUT,
146 'any': select.POLLIN | select.POLLOUT,
147 }[direction]
148
149 poll = select.poll()
150 poll.register(self, want | select.POLLHUP | select.POLLERR)
151
152 for fd, event in poll.poll(0):
153 if event & select.POLLHUP:
154 self.close()
155 return False
156 if event & select.POLLIN:
157 return True
158 if event & select.POLLOUT:
159 return True
160
161 return True
162
163 def close(self):
164 if not getattr(self, 'sock', None):
165 return
166
167 # Mark as closed in both directions
168 self.closed['send'] = True
169 self.closed['recv'] = True
170
171 self.sock.close()
172 self.sock = None
173 self._close_msg()
174
175 def _close_msg(self):
176 self.info('Closed connection to %s port %d' % (self.rhost, self.rport))
177
178 def fileno(self):
179 if not self.sock:
180 self.error("A closed socket does not have a file number")
181
182 return self.sock.fileno()
183
184 def shutdown_raw(self, direction):
185 if self.closed[direction]:
186 return
187
188 self.closed[direction] = True
189
190 if direction == "send":
191 try:
192 self.sock.shutdown(socket.SHUT_WR)
193 except IOError as e:
194 if e.errno == errno.ENOTCONN:
195 pass
196 else:
197 raise
198
199 if direction == "recv":
200 try:
201 self.sock.shutdown(socket.SHUT_RD)
202 except IOError as e:
203 if e.errno == errno.ENOTCONN:
204 pass
205 else:
206 raise
207
208 if False not in self.closed.values():
209 self.close()
210
211 @classmethod
212 def _get_family(cls, fam):
213 if isinstance(fam, six.integer_types):
214 pass
215 elif fam == 'any':
216 fam = socket.AF_UNSPEC
217 elif fam.lower() in ['ipv4', 'ip4', 'v4', '4']:
218 fam = socket.AF_INET
219 elif fam.lower() in ['ipv6', 'ip6', 'v6', '6']:
220 fam = socket.AF_INET6
221 else:
222 self.error("%s(): socket family %r is not supported",
223 cls.__name__,
224 fam)
225
226 return fam
227
228 @classmethod
229 def _get_type(cls, typ):
230 if isinstance(typ, six.integer_types):
231 pass
232 elif typ == "tcp":
233 typ = socket.SOCK_STREAM
234 elif typ == "udp":
235 typ = socket.SOCK_DGRAM
236 else:
237 self.error("%s(): socket type %r is not supported",
238 cls.__name__,
239 typ)
240
241 return typ
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/tubes/sock.py b/pwnlib/tubes/sock.py
--- a/pwnlib/tubes/sock.py
+++ b/pwnlib/tubes/sock.py
@@ -48,6 +48,8 @@
raise EOFError
elif e.errno == errno.EINTR:
continue
+ elif 'timed out' in e.message:
+ return None
else:
raise
| {"golden_diff": "diff --git a/pwnlib/tubes/sock.py b/pwnlib/tubes/sock.py\n--- a/pwnlib/tubes/sock.py\n+++ b/pwnlib/tubes/sock.py\n@@ -48,6 +48,8 @@\n raise EOFError\n elif e.errno == errno.EINTR:\n continue\n+ elif 'timed out' in e.message:\n+ return None\n else:\n raise\n", "issue": "SSL Timeout Error immediately when switching to interactive\n#### PoC\r\n```\r\nfrom pwn import *\r\nr = remote('google.com', 443, ssl=True)\r\nr.interactive()\r\nr.close()\r\n```\r\nIt immediately results in:\r\n```\r\n[+] Opening connection to google.com on port 443: Done\r\n[*] Switching to interactive mode\r\nException in thread Thread-2:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/threading.py\", line 801, in __bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python2.7/threading.py\", line 754, in run\r\n self.__target(*self.__args, **self.__kwargs)\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 784, in recv_thread\r\n cur = self.recv(timeout = 0.05)\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 78, in recv\r\n return self._recv(numb, timeout) or ''\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 156, in _recv\r\n if not self.buffer and not self._fillbuffer(timeout):\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/tube.py\", line 126, in _fillbuffer\r\n data = self.recv_raw(self.buffer.get_fill_size())\r\n File \"/home/hopkins/.local/lib/python2.7/site-packages/pwnlib/tubes/sock.py\", line 37, in recv_raw\r\n data = self.sock.recv(numb, *a)\r\n File \"/usr/lib/python2.7/ssl.py\", line 772, in recv\r\n return self.read(buflen)\r\n File \"/usr/lib/python2.7/ssl.py\", line 659, in read\r\n v = self._sslobj.read(len)\r\nSSLError: ('The read operation timed out',)\r\n\r\n```\r\n\r\nNote that doing so on a non-SSL server doesn't have this issue:\r\n```\r\nfrom pwn import *\r\nr = remote('google.com', 80, ssl=False)\r\nr.interactive()\r\nr.close()\r\n```\r\n\r\nIt allows you to type in HTTP Request in interactive mode, and return the server response without any issues.\r\n```\r\nGET /\r\n```\r\n```\r\n<HTTP Responses>\r\n```\r\n\r\nIs the SSL feature is broken in pwntools?\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport errno\nimport select\nimport six\nimport socket\n\nfrom pwnlib.log import getLogger\nfrom pwnlib.tubes.tube import tube\n\nlog = getLogger(__name__)\n\nclass sock(tube):\n \"\"\"Base type used for :class:`.tubes.remote` and :class:`.tubes.listen` classes\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(sock, self).__init__(*args, **kwargs)\n self.closed = {\"recv\": False, \"send\": False}\n\n # Overwritten for better usability\n def recvall(self, timeout = tube.forever):\n \"\"\"recvall() -> str\n\n Receives data until the socket is closed.\n \"\"\"\n\n if getattr(self, 'type', None) == socket.SOCK_DGRAM:\n self.error(\"UDP sockets does not supports recvall\")\n else:\n return super(sock, self).recvall(timeout)\n\n def recv_raw(self, numb, *a):\n if self.closed[\"recv\"]:\n raise EOFError\n\n while True:\n try:\n data = self.sock.recv(numb, *a)\n break\n except socket.timeout:\n return None\n except IOError as e:\n if e.errno == errno.EAGAIN:\n return None\n elif e.errno in (errno.ECONNREFUSED, errno.ECONNRESET):\n self.shutdown(\"recv\")\n raise EOFError\n elif e.errno == errno.EINTR:\n continue\n else:\n raise\n\n if not data:\n self.shutdown(\"recv\")\n raise EOFError\n\n return data\n\n def send_raw(self, data):\n if self.closed[\"send\"]:\n raise EOFError\n\n try:\n self.sock.sendall(data)\n except IOError as e:\n eof_numbers = (errno.EPIPE, errno.ECONNRESET, errno.ECONNREFUSED)\n if e.errno in eof_numbers or 'Socket is closed' in e.args:\n self.shutdown(\"send\")\n raise EOFError\n else:\n raise\n\n def settimeout_raw(self, timeout):\n if getattr(self, 'sock', None):\n self.sock.settimeout(timeout)\n\n def can_recv_raw(self, timeout):\n \"\"\"\n Tests:\n\n >>> l = listen()\n >>> r = remote('localhost', l.lport)\n >>> r.can_recv_raw(timeout=0)\n False\n >>> l.send(b'a')\n >>> r.can_recv_raw(timeout=1)\n True\n >>> r.recv()\n b'a'\n >>> r.can_recv_raw(timeout=0)\n False\n >>> l.close()\n >>> r.can_recv_raw(timeout=1)\n False\n >>> r.closed['recv']\n True\n \"\"\"\n if not self.sock or self.closed[\"recv\"]:\n return False\n\n # select() will tell us data is available at EOF\n can_recv = select.select([self.sock], [], [], timeout) == ([self.sock], [], [])\n\n if not can_recv:\n return False\n\n # Ensure there's actually data, not just EOF\n try:\n self.recv_raw(1, socket.MSG_PEEK)\n except EOFError:\n return False\n\n return True\n\n def connected_raw(self, direction):\n \"\"\"\n Tests:\n\n >>> l = listen()\n >>> r = remote('localhost', l.lport)\n >>> r.connected()\n True\n >>> l.close()\n >>> time.sleep(0.1) # Avoid race condition\n >>> r.connected()\n False\n \"\"\"\n # If there's no socket, it's definitely closed\n if not self.sock:\n return False\n\n # If we have noticed a connection close in a given direction before,\n # return fast.\n if self.closed.get(direction, False):\n return False\n\n # If a connection is closed in all manners, return fast\n if all(self.closed.values()):\n return False\n\n # Use poll() to determine the connection state\n want = {\n 'recv': select.POLLIN,\n 'send': select.POLLOUT,\n 'any': select.POLLIN | select.POLLOUT,\n }[direction]\n\n poll = select.poll()\n poll.register(self, want | select.POLLHUP | select.POLLERR)\n\n for fd, event in poll.poll(0):\n if event & select.POLLHUP:\n self.close()\n return False\n if event & select.POLLIN:\n return True\n if event & select.POLLOUT:\n return True\n\n return True\n\n def close(self):\n if not getattr(self, 'sock', None):\n return\n\n # Mark as closed in both directions\n self.closed['send'] = True\n self.closed['recv'] = True\n\n self.sock.close()\n self.sock = None\n self._close_msg()\n\n def _close_msg(self):\n self.info('Closed connection to %s port %d' % (self.rhost, self.rport))\n\n def fileno(self):\n if not self.sock:\n self.error(\"A closed socket does not have a file number\")\n\n return self.sock.fileno()\n\n def shutdown_raw(self, direction):\n if self.closed[direction]:\n return\n\n self.closed[direction] = True\n\n if direction == \"send\":\n try:\n self.sock.shutdown(socket.SHUT_WR)\n except IOError as e:\n if e.errno == errno.ENOTCONN:\n pass\n else:\n raise\n\n if direction == \"recv\":\n try:\n self.sock.shutdown(socket.SHUT_RD)\n except IOError as e:\n if e.errno == errno.ENOTCONN:\n pass\n else:\n raise\n\n if False not in self.closed.values():\n self.close()\n\n @classmethod\n def _get_family(cls, fam):\n if isinstance(fam, six.integer_types):\n pass\n elif fam == 'any':\n fam = socket.AF_UNSPEC\n elif fam.lower() in ['ipv4', 'ip4', 'v4', '4']:\n fam = socket.AF_INET\n elif fam.lower() in ['ipv6', 'ip6', 'v6', '6']:\n fam = socket.AF_INET6\n else:\n self.error(\"%s(): socket family %r is not supported\",\n cls.__name__,\n fam)\n\n return fam\n\n @classmethod\n def _get_type(cls, typ):\n if isinstance(typ, six.integer_types):\n pass\n elif typ == \"tcp\":\n typ = socket.SOCK_STREAM\n elif typ == \"udp\":\n typ = socket.SOCK_DGRAM\n else:\n self.error(\"%s(): socket type %r is not supported\",\n cls.__name__,\n typ)\n\n return typ\n", "path": "pwnlib/tubes/sock.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport errno\nimport select\nimport six\nimport socket\n\nfrom pwnlib.log import getLogger\nfrom pwnlib.tubes.tube import tube\n\nlog = getLogger(__name__)\n\nclass sock(tube):\n \"\"\"Base type used for :class:`.tubes.remote` and :class:`.tubes.listen` classes\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(sock, self).__init__(*args, **kwargs)\n self.closed = {\"recv\": False, \"send\": False}\n\n # Overwritten for better usability\n def recvall(self, timeout = tube.forever):\n \"\"\"recvall() -> str\n\n Receives data until the socket is closed.\n \"\"\"\n\n if getattr(self, 'type', None) == socket.SOCK_DGRAM:\n self.error(\"UDP sockets does not supports recvall\")\n else:\n return super(sock, self).recvall(timeout)\n\n def recv_raw(self, numb, *a):\n if self.closed[\"recv\"]:\n raise EOFError\n\n while True:\n try:\n data = self.sock.recv(numb, *a)\n break\n except socket.timeout:\n return None\n except IOError as e:\n if e.errno == errno.EAGAIN:\n return None\n elif e.errno in (errno.ECONNREFUSED, errno.ECONNRESET):\n self.shutdown(\"recv\")\n raise EOFError\n elif e.errno == errno.EINTR:\n continue\n elif 'timed out' in e.message:\n return None\n else:\n raise\n\n if not data:\n self.shutdown(\"recv\")\n raise EOFError\n\n return data\n\n def send_raw(self, data):\n if self.closed[\"send\"]:\n raise EOFError\n\n try:\n self.sock.sendall(data)\n except IOError as e:\n eof_numbers = (errno.EPIPE, errno.ECONNRESET, errno.ECONNREFUSED)\n if e.errno in eof_numbers or 'Socket is closed' in e.args:\n self.shutdown(\"send\")\n raise EOFError\n else:\n raise\n\n def settimeout_raw(self, timeout):\n if getattr(self, 'sock', None):\n self.sock.settimeout(timeout)\n\n def can_recv_raw(self, timeout):\n \"\"\"\n Tests:\n\n >>> l = listen()\n >>> r = remote('localhost', l.lport)\n >>> r.can_recv_raw(timeout=0)\n False\n >>> l.send(b'a')\n >>> r.can_recv_raw(timeout=1)\n True\n >>> r.recv()\n b'a'\n >>> r.can_recv_raw(timeout=0)\n False\n >>> l.close()\n >>> r.can_recv_raw(timeout=1)\n False\n >>> r.closed['recv']\n True\n \"\"\"\n if not self.sock or self.closed[\"recv\"]:\n return False\n\n # select() will tell us data is available at EOF\n can_recv = select.select([self.sock], [], [], timeout) == ([self.sock], [], [])\n\n if not can_recv:\n return False\n\n # Ensure there's actually data, not just EOF\n try:\n self.recv_raw(1, socket.MSG_PEEK)\n except EOFError:\n return False\n\n return True\n\n def connected_raw(self, direction):\n \"\"\"\n Tests:\n\n >>> l = listen()\n >>> r = remote('localhost', l.lport)\n >>> r.connected()\n True\n >>> l.close()\n >>> time.sleep(0.1) # Avoid race condition\n >>> r.connected()\n False\n \"\"\"\n # If there's no socket, it's definitely closed\n if not self.sock:\n return False\n\n # If we have noticed a connection close in a given direction before,\n # return fast.\n if self.closed.get(direction, False):\n return False\n\n # If a connection is closed in all manners, return fast\n if all(self.closed.values()):\n return False\n\n # Use poll() to determine the connection state\n want = {\n 'recv': select.POLLIN,\n 'send': select.POLLOUT,\n 'any': select.POLLIN | select.POLLOUT,\n }[direction]\n\n poll = select.poll()\n poll.register(self, want | select.POLLHUP | select.POLLERR)\n\n for fd, event in poll.poll(0):\n if event & select.POLLHUP:\n self.close()\n return False\n if event & select.POLLIN:\n return True\n if event & select.POLLOUT:\n return True\n\n return True\n\n def close(self):\n if not getattr(self, 'sock', None):\n return\n\n # Mark as closed in both directions\n self.closed['send'] = True\n self.closed['recv'] = True\n\n self.sock.close()\n self.sock = None\n self._close_msg()\n\n def _close_msg(self):\n self.info('Closed connection to %s port %d' % (self.rhost, self.rport))\n\n def fileno(self):\n if not self.sock:\n self.error(\"A closed socket does not have a file number\")\n\n return self.sock.fileno()\n\n def shutdown_raw(self, direction):\n if self.closed[direction]:\n return\n\n self.closed[direction] = True\n\n if direction == \"send\":\n try:\n self.sock.shutdown(socket.SHUT_WR)\n except IOError as e:\n if e.errno == errno.ENOTCONN:\n pass\n else:\n raise\n\n if direction == \"recv\":\n try:\n self.sock.shutdown(socket.SHUT_RD)\n except IOError as e:\n if e.errno == errno.ENOTCONN:\n pass\n else:\n raise\n\n if False not in self.closed.values():\n self.close()\n\n @classmethod\n def _get_family(cls, fam):\n if isinstance(fam, six.integer_types):\n pass\n elif fam == 'any':\n fam = socket.AF_UNSPEC\n elif fam.lower() in ['ipv4', 'ip4', 'v4', '4']:\n fam = socket.AF_INET\n elif fam.lower() in ['ipv6', 'ip6', 'v6', '6']:\n fam = socket.AF_INET6\n else:\n self.error(\"%s(): socket family %r is not supported\",\n cls.__name__,\n fam)\n\n return fam\n\n @classmethod\n def _get_type(cls, typ):\n if isinstance(typ, six.integer_types):\n pass\n elif typ == \"tcp\":\n typ = socket.SOCK_STREAM\n elif typ == \"udp\":\n typ = socket.SOCK_DGRAM\n else:\n self.error(\"%s(): socket type %r is not supported\",\n cls.__name__,\n typ)\n\n return typ\n", "path": "pwnlib/tubes/sock.py"}]} | 2,913 | 97 |
gh_patches_debug_5126 | rasdani/github-patches | git_diff | ipython__ipython-4092 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nbconvert can't handle Heading with Chinese characters on Japanese Windows OS.
Convert following notebook by `ipython nbconvert test.ipynb` will raise Exception:
```
File "C:\Python27\lib\site-packages\ipython-1.0.0_dev-py2.7.egg\IPython\nbconv
ert\filters\strings.py", line 83, in add_anchor
h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))
File "C:\Python27\lib\xml\etree\ElementTree.py", line 1301, in XML
parser.feed(text)
File "C:\Python27\lib\xml\etree\ElementTree.py", line 1643, in feed
self._raiseerror(v)
File "C:\Python27\lib\xml\etree\ElementTree.py", line 1507, in _raiseerror
raise err
ParseError: not well-formed (invalid token): line 1, column 9
```
Here is the content of the notebook. I am using a Japanese Windows, the default encoding is:
```
In [1]: from IPython.utils import encoding
In [2]: encoding.DEFAULT_ENCODING
Out[2]: 'cp932'
```
When call `py3compat.cast_bytes_py2(html)` it can't convert the Chinese characters correctly.
```
{
"metadata": {
"name": ""
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "heading",
"level": 1,
"metadata": {},
"source": [
"\u6269\u5c55\u7c7b\u578b(cdef\u7c7b)"
]
},
{
"cell_type": "code",
"collapsed": false,
"input": [
"\n"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/nbconvert/filters/strings.py`
Content:
```
1 # coding: utf-8
2 """String filters.
3
4 Contains a collection of useful string manipulation filters for use in Jinja
5 templates.
6 """
7 #-----------------------------------------------------------------------------
8 # Copyright (c) 2013, the IPython Development Team.
9 #
10 # Distributed under the terms of the Modified BSD License.
11 #
12 # The full license is in the file COPYING.txt, distributed with this software.
13 #-----------------------------------------------------------------------------
14
15 #-----------------------------------------------------------------------------
16 # Imports
17 #-----------------------------------------------------------------------------
18
19 import os
20 import re
21 import textwrap
22 from xml.etree import ElementTree
23
24 from IPython.core.interactiveshell import InteractiveShell
25 from IPython.utils import py3compat
26
27 #-----------------------------------------------------------------------------
28 # Functions
29 #-----------------------------------------------------------------------------
30
31 __all__ = [
32 'wrap_text',
33 'html2text',
34 'add_anchor',
35 'strip_dollars',
36 'strip_files_prefix',
37 'comment_lines',
38 'get_lines',
39 'ipython2python',
40 'posix_path',
41 ]
42
43
44 def wrap_text(text, width=100):
45 """
46 Intelligently wrap text.
47 Wrap text without breaking words if possible.
48
49 Parameters
50 ----------
51 text : str
52 Text to wrap.
53 width : int, optional
54 Number of characters to wrap to, default 100.
55 """
56
57 split_text = text.split('\n')
58 wrp = map(lambda x:textwrap.wrap(x,width), split_text)
59 wrpd = map('\n'.join, wrp)
60 return '\n'.join(wrpd)
61
62
63 def html2text(element):
64 """extract inner text from html
65
66 Analog of jQuery's $(element).text()
67 """
68 if isinstance(element, py3compat.string_types):
69 element = ElementTree.fromstring(element)
70
71 text = element.text or ""
72 for child in element:
73 text += html2text(child)
74 text += (element.tail or "")
75 return text
76
77
78 def add_anchor(html):
79 """Add an anchor-link to an html header tag
80
81 For use in heading cells
82 """
83 h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))
84 link = html2text(h).replace(' ', '-')
85 h.set('id', link)
86 a = ElementTree.Element("a", {"class" : "anchor-link", "href" : "#" + link})
87 a.text = u'¶'
88 h.append(a)
89
90 # Known issue of Python3.x, ElementTree.tostring() returns a byte string
91 # instead of a text string. See issue http://bugs.python.org/issue10942
92 # Workaround is to make sure the bytes are casted to a string.
93 return py3compat.decode(ElementTree.tostring(h), 'utf-8')
94
95
96 def strip_dollars(text):
97 """
98 Remove all dollar symbols from text
99
100 Parameters
101 ----------
102 text : str
103 Text to remove dollars from
104 """
105
106 return text.strip('$')
107
108
109 files_url_pattern = re.compile(r'(src|href)\=([\'"]?)files/')
110
111 def strip_files_prefix(text):
112 """
113 Fix all fake URLs that start with `files/`,
114 stripping out the `files/` prefix.
115
116 Parameters
117 ----------
118 text : str
119 Text in which to replace 'src="files/real...' with 'src="real...'
120 """
121 return files_url_pattern.sub(r"\1=\2", text)
122
123
124 def comment_lines(text, prefix='# '):
125 """
126 Build a Python comment line from input text.
127
128 Parameters
129 ----------
130 text : str
131 Text to comment out.
132 prefix : str
133 Character to append to the start of each line.
134 """
135
136 #Replace line breaks with line breaks and comment symbols.
137 #Also add a comment symbol at the beginning to comment out
138 #the first line.
139 return prefix + ('\n'+prefix).join(text.split('\n'))
140
141
142 def get_lines(text, start=None,end=None):
143 """
144 Split the input text into separate lines and then return the
145 lines that the caller is interested in.
146
147 Parameters
148 ----------
149 text : str
150 Text to parse lines from.
151 start : int, optional
152 First line to grab from.
153 end : int, optional
154 Last line to grab from.
155 """
156
157 # Split the input into lines.
158 lines = text.split("\n")
159
160 # Return the right lines.
161 return "\n".join(lines[start:end]) #re-join
162
163 def ipython2python(code):
164 """Transform IPython syntax to pure Python syntax
165
166 Parameters
167 ----------
168
169 code : str
170 IPython code, to be transformed to pure Python
171 """
172 shell = InteractiveShell.instance()
173 return shell.input_transformer_manager.transform_cell(code)
174
175 def posix_path(path):
176 """Turn a path into posix-style path/to/etc
177
178 Mainly for use in latex on Windows,
179 where native Windows paths are not allowed.
180 """
181 if os.path.sep != '/':
182 return path.replace(os.path.sep, '/')
183 return path
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/nbconvert/filters/strings.py b/IPython/nbconvert/filters/strings.py
--- a/IPython/nbconvert/filters/strings.py
+++ b/IPython/nbconvert/filters/strings.py
@@ -80,7 +80,7 @@
For use in heading cells
"""
- h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))
+ h = ElementTree.fromstring(py3compat.cast_bytes_py2(html, encoding='utf-8'))
link = html2text(h).replace(' ', '-')
h.set('id', link)
a = ElementTree.Element("a", {"class" : "anchor-link", "href" : "#" + link})
| {"golden_diff": "diff --git a/IPython/nbconvert/filters/strings.py b/IPython/nbconvert/filters/strings.py\n--- a/IPython/nbconvert/filters/strings.py\n+++ b/IPython/nbconvert/filters/strings.py\n@@ -80,7 +80,7 @@\n \n For use in heading cells\n \"\"\"\n- h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))\n+ h = ElementTree.fromstring(py3compat.cast_bytes_py2(html, encoding='utf-8'))\n link = html2text(h).replace(' ', '-')\n h.set('id', link)\n a = ElementTree.Element(\"a\", {\"class\" : \"anchor-link\", \"href\" : \"#\" + link})\n", "issue": "nbconvert can't handle Heading with Chinese characters on Japanese Windows OS.\nConvert following notebook by `ipython nbconvert test.ipynb` will raise Exception:\n\n```\n File \"C:\\Python27\\lib\\site-packages\\ipython-1.0.0_dev-py2.7.egg\\IPython\\nbconv\nert\\filters\\strings.py\", line 83, in add_anchor\n h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))\n File \"C:\\Python27\\lib\\xml\\etree\\ElementTree.py\", line 1301, in XML\n parser.feed(text)\n File \"C:\\Python27\\lib\\xml\\etree\\ElementTree.py\", line 1643, in feed\n self._raiseerror(v)\n File \"C:\\Python27\\lib\\xml\\etree\\ElementTree.py\", line 1507, in _raiseerror\n raise err\nParseError: not well-formed (invalid token): line 1, column 9\n```\n\nHere is the content of the notebook. I am using a Japanese Windows, the default encoding is:\n\n```\nIn [1]: from IPython.utils import encoding\n\nIn [2]: encoding.DEFAULT_ENCODING\nOut[2]: 'cp932'\n```\n\nWhen call `py3compat.cast_bytes_py2(html)` it can't convert the Chinese characters correctly.\n\n```\n{\n \"metadata\": {\n \"name\": \"\"\n },\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"heading\",\n \"level\": 1,\n \"metadata\": {},\n \"source\": [\n \"\\u6269\\u5c55\\u7c7b\\u578b(cdef\\u7c7b)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"\\n\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\n```\n\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"String filters.\n\nContains a collection of useful string manipulation filters for use in Jinja\ntemplates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport re\nimport textwrap\nfrom xml.etree import ElementTree\n\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython.utils import py3compat\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\n\n__all__ = [\n 'wrap_text',\n 'html2text',\n 'add_anchor',\n 'strip_dollars',\n 'strip_files_prefix',\n 'comment_lines',\n 'get_lines',\n 'ipython2python',\n 'posix_path',\n]\n\n\ndef wrap_text(text, width=100):\n \"\"\" \n Intelligently wrap text.\n Wrap text without breaking words if possible.\n \n Parameters\n ----------\n text : str\n Text to wrap.\n width : int, optional\n Number of characters to wrap to, default 100.\n \"\"\"\n\n split_text = text.split('\\n')\n wrp = map(lambda x:textwrap.wrap(x,width), split_text)\n wrpd = map('\\n'.join, wrp)\n return '\\n'.join(wrpd)\n\n\ndef html2text(element):\n \"\"\"extract inner text from html\n \n Analog of jQuery's $(element).text()\n \"\"\"\n if isinstance(element, py3compat.string_types):\n element = ElementTree.fromstring(element)\n \n text = element.text or \"\"\n for child in element:\n text += html2text(child)\n text += (element.tail or \"\")\n return text\n\n\ndef add_anchor(html):\n \"\"\"Add an anchor-link to an html header tag\n \n For use in heading cells\n \"\"\"\n h = ElementTree.fromstring(py3compat.cast_bytes_py2(html))\n link = html2text(h).replace(' ', '-')\n h.set('id', link)\n a = ElementTree.Element(\"a\", {\"class\" : \"anchor-link\", \"href\" : \"#\" + link})\n a.text = u'\u00b6'\n h.append(a)\n\n # Known issue of Python3.x, ElementTree.tostring() returns a byte string\n # instead of a text string. See issue http://bugs.python.org/issue10942\n # Workaround is to make sure the bytes are casted to a string.\n return py3compat.decode(ElementTree.tostring(h), 'utf-8')\n\n\ndef strip_dollars(text):\n \"\"\"\n Remove all dollar symbols from text\n \n Parameters\n ----------\n text : str\n Text to remove dollars from\n \"\"\"\n\n return text.strip('$')\n\n\nfiles_url_pattern = re.compile(r'(src|href)\\=([\\'\"]?)files/')\n\ndef strip_files_prefix(text):\n \"\"\"\n Fix all fake URLs that start with `files/`,\n stripping out the `files/` prefix.\n \n Parameters\n ----------\n text : str\n Text in which to replace 'src=\"files/real...' with 'src=\"real...'\n \"\"\"\n return files_url_pattern.sub(r\"\\1=\\2\", text)\n\n\ndef comment_lines(text, prefix='# '):\n \"\"\"\n Build a Python comment line from input text.\n \n Parameters\n ----------\n text : str\n Text to comment out.\n prefix : str\n Character to append to the start of each line.\n \"\"\"\n \n #Replace line breaks with line breaks and comment symbols.\n #Also add a comment symbol at the beginning to comment out\n #the first line.\n return prefix + ('\\n'+prefix).join(text.split('\\n')) \n\n\ndef get_lines(text, start=None,end=None):\n \"\"\"\n Split the input text into separate lines and then return the \n lines that the caller is interested in.\n \n Parameters\n ----------\n text : str\n Text to parse lines from.\n start : int, optional\n First line to grab from.\n end : int, optional\n Last line to grab from.\n \"\"\"\n \n # Split the input into lines.\n lines = text.split(\"\\n\")\n \n # Return the right lines.\n return \"\\n\".join(lines[start:end]) #re-join\n\ndef ipython2python(code):\n \"\"\"Transform IPython syntax to pure Python syntax\n\n Parameters\n ----------\n\n code : str\n IPython code, to be transformed to pure Python\n \"\"\"\n shell = InteractiveShell.instance()\n return shell.input_transformer_manager.transform_cell(code)\n\ndef posix_path(path):\n \"\"\"Turn a path into posix-style path/to/etc\n \n Mainly for use in latex on Windows,\n where native Windows paths are not allowed.\n \"\"\"\n if os.path.sep != '/':\n return path.replace(os.path.sep, '/')\n return path\n", "path": "IPython/nbconvert/filters/strings.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"String filters.\n\nContains a collection of useful string manipulation filters for use in Jinja\ntemplates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport re\nimport textwrap\nfrom xml.etree import ElementTree\n\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom IPython.utils import py3compat\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\n\n__all__ = [\n 'wrap_text',\n 'html2text',\n 'add_anchor',\n 'strip_dollars',\n 'strip_files_prefix',\n 'comment_lines',\n 'get_lines',\n 'ipython2python',\n 'posix_path',\n]\n\n\ndef wrap_text(text, width=100):\n \"\"\" \n Intelligently wrap text.\n Wrap text without breaking words if possible.\n \n Parameters\n ----------\n text : str\n Text to wrap.\n width : int, optional\n Number of characters to wrap to, default 100.\n \"\"\"\n\n split_text = text.split('\\n')\n wrp = map(lambda x:textwrap.wrap(x,width), split_text)\n wrpd = map('\\n'.join, wrp)\n return '\\n'.join(wrpd)\n\n\ndef html2text(element):\n \"\"\"extract inner text from html\n \n Analog of jQuery's $(element).text()\n \"\"\"\n if isinstance(element, py3compat.string_types):\n element = ElementTree.fromstring(element)\n \n text = element.text or \"\"\n for child in element:\n text += html2text(child)\n text += (element.tail or \"\")\n return text\n\n\ndef add_anchor(html):\n \"\"\"Add an anchor-link to an html header tag\n \n For use in heading cells\n \"\"\"\n h = ElementTree.fromstring(py3compat.cast_bytes_py2(html, encoding='utf-8'))\n link = html2text(h).replace(' ', '-')\n h.set('id', link)\n a = ElementTree.Element(\"a\", {\"class\" : \"anchor-link\", \"href\" : \"#\" + link})\n a.text = u'\u00b6'\n h.append(a)\n\n # Known issue of Python3.x, ElementTree.tostring() returns a byte string\n # instead of a text string. See issue http://bugs.python.org/issue10942\n # Workaround is to make sure the bytes are casted to a string.\n return py3compat.decode(ElementTree.tostring(h), 'utf-8')\n\n\ndef strip_dollars(text):\n \"\"\"\n Remove all dollar symbols from text\n \n Parameters\n ----------\n text : str\n Text to remove dollars from\n \"\"\"\n\n return text.strip('$')\n\n\nfiles_url_pattern = re.compile(r'(src|href)\\=([\\'\"]?)files/')\n\ndef strip_files_prefix(text):\n \"\"\"\n Fix all fake URLs that start with `files/`,\n stripping out the `files/` prefix.\n \n Parameters\n ----------\n text : str\n Text in which to replace 'src=\"files/real...' with 'src=\"real...'\n \"\"\"\n return files_url_pattern.sub(r\"\\1=\\2\", text)\n\n\ndef comment_lines(text, prefix='# '):\n \"\"\"\n Build a Python comment line from input text.\n \n Parameters\n ----------\n text : str\n Text to comment out.\n prefix : str\n Character to append to the start of each line.\n \"\"\"\n \n #Replace line breaks with line breaks and comment symbols.\n #Also add a comment symbol at the beginning to comment out\n #the first line.\n return prefix + ('\\n'+prefix).join(text.split('\\n')) \n\n\ndef get_lines(text, start=None,end=None):\n \"\"\"\n Split the input text into separate lines and then return the \n lines that the caller is interested in.\n \n Parameters\n ----------\n text : str\n Text to parse lines from.\n start : int, optional\n First line to grab from.\n end : int, optional\n Last line to grab from.\n \"\"\"\n \n # Split the input into lines.\n lines = text.split(\"\\n\")\n \n # Return the right lines.\n return \"\\n\".join(lines[start:end]) #re-join\n\ndef ipython2python(code):\n \"\"\"Transform IPython syntax to pure Python syntax\n\n Parameters\n ----------\n\n code : str\n IPython code, to be transformed to pure Python\n \"\"\"\n shell = InteractiveShell.instance()\n return shell.input_transformer_manager.transform_cell(code)\n\ndef posix_path(path):\n \"\"\"Turn a path into posix-style path/to/etc\n \n Mainly for use in latex on Windows,\n where native Windows paths are not allowed.\n \"\"\"\n if os.path.sep != '/':\n return path.replace(os.path.sep, '/')\n return path\n", "path": "IPython/nbconvert/filters/strings.py"}]} | 2,256 | 159 |
gh_patches_debug_28054 | rasdani/github-patches | git_diff | scrapy__scrapy-5885 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_func_args does not fully work in CPython
As [shown in tests](https://github.com/scrapy/scrapy/blob/ada917307844950a81226f020b596d5932187f6e/tests/test_utils_python.py#L240-L243), `get_func_args` does not work in CPython with inputs like `str.split`, `"".join` or `itemgetter(2)`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/python.py`
Content:
```
1 """
2 This module contains essential stuff that should've come with Python itself ;)
3 """
4 import gc
5 import inspect
6 import re
7 import sys
8 import weakref
9 from functools import partial, wraps
10 from itertools import chain
11 from typing import Any, AsyncGenerator, AsyncIterable, Iterable, Union
12
13 from scrapy.utils.asyncgen import as_async_generator
14
15
16 def flatten(x):
17 """flatten(sequence) -> list
18
19 Returns a single, flat list which contains all elements retrieved
20 from the sequence and all recursively contained sub-sequences
21 (iterables).
22
23 Examples:
24 >>> [1, 2, [3,4], (5,6)]
25 [1, 2, [3, 4], (5, 6)]
26 >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
27 [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
28 >>> flatten(["foo", "bar"])
29 ['foo', 'bar']
30 >>> flatten(["foo", ["baz", 42], "bar"])
31 ['foo', 'baz', 42, 'bar']
32 """
33 return list(iflatten(x))
34
35
36 def iflatten(x):
37 """iflatten(sequence) -> iterator
38
39 Similar to ``.flatten()``, but returns iterator instead"""
40 for el in x:
41 if is_listlike(el):
42 for el_ in iflatten(el):
43 yield el_
44 else:
45 yield el
46
47
48 def is_listlike(x: Any) -> bool:
49 """
50 >>> is_listlike("foo")
51 False
52 >>> is_listlike(5)
53 False
54 >>> is_listlike(b"foo")
55 False
56 >>> is_listlike([b"foo"])
57 True
58 >>> is_listlike((b"foo",))
59 True
60 >>> is_listlike({})
61 True
62 >>> is_listlike(set())
63 True
64 >>> is_listlike((x for x in range(3)))
65 True
66 >>> is_listlike(range(5))
67 True
68 """
69 return hasattr(x, "__iter__") and not isinstance(x, (str, bytes))
70
71
72 def unique(list_, key=lambda x: x):
73 """efficient function to uniquify a list preserving item order"""
74 seen = set()
75 result = []
76 for item in list_:
77 seenkey = key(item)
78 if seenkey in seen:
79 continue
80 seen.add(seenkey)
81 result.append(item)
82 return result
83
84
85 def to_unicode(text, encoding=None, errors="strict"):
86 """Return the unicode representation of a bytes object ``text``. If
87 ``text`` is already an unicode object, return it as-is."""
88 if isinstance(text, str):
89 return text
90 if not isinstance(text, (bytes, str)):
91 raise TypeError(
92 "to_unicode must receive a bytes or str "
93 f"object, got {type(text).__name__}"
94 )
95 if encoding is None:
96 encoding = "utf-8"
97 return text.decode(encoding, errors)
98
99
100 def to_bytes(text, encoding=None, errors="strict"):
101 """Return the binary representation of ``text``. If ``text``
102 is already a bytes object, return it as-is."""
103 if isinstance(text, bytes):
104 return text
105 if not isinstance(text, str):
106 raise TypeError(
107 "to_bytes must receive a str or bytes " f"object, got {type(text).__name__}"
108 )
109 if encoding is None:
110 encoding = "utf-8"
111 return text.encode(encoding, errors)
112
113
114 def re_rsearch(pattern, text, chunk_size=1024):
115 """
116 This function does a reverse search in a text using a regular expression
117 given in the attribute 'pattern'.
118 Since the re module does not provide this functionality, we have to find for
119 the expression into chunks of text extracted from the end (for the sake of efficiency).
120 At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
121 the pattern. If the pattern is not found, another chunk is extracted, and another
122 search is performed.
123 This process continues until a match is found, or until the whole file is read.
124 In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
125 the start position of the match, and the ending (regarding the entire text).
126 """
127
128 def _chunk_iter():
129 offset = len(text)
130 while True:
131 offset -= chunk_size * 1024
132 if offset <= 0:
133 break
134 yield (text[offset:], offset)
135 yield (text, 0)
136
137 if isinstance(pattern, str):
138 pattern = re.compile(pattern)
139
140 for chunk, offset in _chunk_iter():
141 matches = [match for match in pattern.finditer(chunk)]
142 if matches:
143 start, end = matches[-1].span()
144 return offset + start, offset + end
145 return None
146
147
148 def memoizemethod_noargs(method):
149 """Decorator to cache the result of a method (without arguments) using a
150 weak reference to its object
151 """
152 cache = weakref.WeakKeyDictionary()
153
154 @wraps(method)
155 def new_method(self, *args, **kwargs):
156 if self not in cache:
157 cache[self] = method(self, *args, **kwargs)
158 return cache[self]
159
160 return new_method
161
162
163 _BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b"\0", b"\t", b"\n", b"\r"}
164 _BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}
165
166
167 def binary_is_text(data):
168 """Returns ``True`` if the given ``data`` argument (a ``bytes`` object)
169 does not contain unprintable control characters.
170 """
171 if not isinstance(data, bytes):
172 raise TypeError(f"data must be bytes, got '{type(data).__name__}'")
173 return all(c not in _BINARYCHARS for c in data)
174
175
176 def get_func_args(func, stripself=False):
177 """Return the argument name list of a callable"""
178 if inspect.isfunction(func):
179 spec = inspect.getfullargspec(func)
180 func_args = spec.args + spec.kwonlyargs
181 elif inspect.isclass(func):
182 return get_func_args(func.__init__, True)
183 elif inspect.ismethod(func):
184 return get_func_args(func.__func__, True)
185 elif inspect.ismethoddescriptor(func):
186 return []
187 elif isinstance(func, partial):
188 return [
189 x
190 for x in get_func_args(func.func)[len(func.args) :]
191 if not (func.keywords and x in func.keywords)
192 ]
193 elif hasattr(func, "__call__"):
194 if inspect.isroutine(func):
195 return []
196 if getattr(func, "__name__", None) == "__call__":
197 return []
198 return get_func_args(func.__call__, True)
199 else:
200 raise TypeError(f"{type(func)} is not callable")
201 if stripself:
202 func_args.pop(0)
203 return func_args
204
205
206 def get_spec(func):
207 """Returns (args, kwargs) tuple for a function
208 >>> import re
209 >>> get_spec(re.match)
210 (['pattern', 'string'], {'flags': 0})
211
212 >>> class Test:
213 ... def __call__(self, val):
214 ... pass
215 ... def method(self, val, flags=0):
216 ... pass
217
218 >>> get_spec(Test)
219 (['self', 'val'], {})
220
221 >>> get_spec(Test.method)
222 (['self', 'val'], {'flags': 0})
223
224 >>> get_spec(Test().method)
225 (['self', 'val'], {'flags': 0})
226 """
227
228 if inspect.isfunction(func) or inspect.ismethod(func):
229 spec = inspect.getfullargspec(func)
230 elif hasattr(func, "__call__"):
231 spec = inspect.getfullargspec(func.__call__)
232 else:
233 raise TypeError(f"{type(func)} is not callable")
234
235 defaults = spec.defaults or []
236
237 firstdefault = len(spec.args) - len(defaults)
238 args = spec.args[:firstdefault]
239 kwargs = dict(zip(spec.args[firstdefault:], defaults))
240 return args, kwargs
241
242
243 def equal_attributes(obj1, obj2, attributes):
244 """Compare two objects attributes"""
245 # not attributes given return False by default
246 if not attributes:
247 return False
248
249 temp1, temp2 = object(), object()
250 for attr in attributes:
251 # support callables like itemgetter
252 if callable(attr):
253 if attr(obj1) != attr(obj2):
254 return False
255 elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):
256 return False
257 # all attributes equal
258 return True
259
260
261 def without_none_values(iterable):
262 """Return a copy of ``iterable`` with all ``None`` entries removed.
263
264 If ``iterable`` is a mapping, return a dictionary where all pairs that have
265 value ``None`` have been removed.
266 """
267 try:
268 return {k: v for k, v in iterable.items() if v is not None}
269 except AttributeError:
270 return type(iterable)((v for v in iterable if v is not None))
271
272
273 def global_object_name(obj):
274 """
275 Return full name of a global object.
276
277 >>> from scrapy import Request
278 >>> global_object_name(Request)
279 'scrapy.http.request.Request'
280 """
281 return f"{obj.__module__}.{obj.__name__}"
282
283
284 if hasattr(sys, "pypy_version_info"):
285
286 def garbage_collect():
287 # Collecting weakreferences can take two collections on PyPy.
288 gc.collect()
289 gc.collect()
290
291 else:
292
293 def garbage_collect():
294 gc.collect()
295
296
297 class MutableChain(Iterable):
298 """
299 Thin wrapper around itertools.chain, allowing to add iterables "in-place"
300 """
301
302 def __init__(self, *args: Iterable):
303 self.data = chain.from_iterable(args)
304
305 def extend(self, *iterables: Iterable) -> None:
306 self.data = chain(self.data, chain.from_iterable(iterables))
307
308 def __iter__(self):
309 return self
310
311 def __next__(self):
312 return next(self.data)
313
314
315 async def _async_chain(*iterables: Union[Iterable, AsyncIterable]) -> AsyncGenerator:
316 for it in iterables:
317 async for o in as_async_generator(it):
318 yield o
319
320
321 class MutableAsyncChain(AsyncIterable):
322 """
323 Similar to MutableChain but for async iterables
324 """
325
326 def __init__(self, *args: Union[Iterable, AsyncIterable]):
327 self.data = _async_chain(*args)
328
329 def extend(self, *iterables: Union[Iterable, AsyncIterable]) -> None:
330 self.data = _async_chain(self.data, _async_chain(*iterables))
331
332 def __aiter__(self):
333 return self
334
335 async def __anext__(self):
336 return await self.data.__anext__()
337
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py
--- a/scrapy/utils/python.py
+++ b/scrapy/utils/python.py
@@ -174,33 +174,33 @@
def get_func_args(func, stripself=False):
- """Return the argument name list of a callable"""
- if inspect.isfunction(func):
- spec = inspect.getfullargspec(func)
- func_args = spec.args + spec.kwonlyargs
- elif inspect.isclass(func):
- return get_func_args(func.__init__, True)
- elif inspect.ismethod(func):
- return get_func_args(func.__func__, True)
- elif inspect.ismethoddescriptor(func):
- return []
- elif isinstance(func, partial):
- return [
- x
- for x in get_func_args(func.func)[len(func.args) :]
- if not (func.keywords and x in func.keywords)
- ]
- elif hasattr(func, "__call__"):
- if inspect.isroutine(func):
- return []
- if getattr(func, "__name__", None) == "__call__":
- return []
- return get_func_args(func.__call__, True)
+ """Return the argument name list of a callable object"""
+ if not callable(func):
+ raise TypeError(f"func must be callable, got '{type(func).__name__}'")
+
+ args = []
+ try:
+ sig = inspect.signature(func)
+ except ValueError:
+ return args
+
+ if isinstance(func, partial):
+ partial_args = func.args
+ partial_kw = func.keywords
+
+ for name, param in sig.parameters.items():
+ if param.name in partial_args:
+ continue
+ if partial_kw and param.name in partial_kw:
+ continue
+ args.append(name)
else:
- raise TypeError(f"{type(func)} is not callable")
- if stripself:
- func_args.pop(0)
- return func_args
+ for name in sig.parameters.keys():
+ args.append(name)
+
+ if stripself and args and args[0] == "self":
+ args = args[1:]
+ return args
def get_spec(func):
| {"golden_diff": "diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py\n--- a/scrapy/utils/python.py\n+++ b/scrapy/utils/python.py\n@@ -174,33 +174,33 @@\n \n \n def get_func_args(func, stripself=False):\n- \"\"\"Return the argument name list of a callable\"\"\"\n- if inspect.isfunction(func):\n- spec = inspect.getfullargspec(func)\n- func_args = spec.args + spec.kwonlyargs\n- elif inspect.isclass(func):\n- return get_func_args(func.__init__, True)\n- elif inspect.ismethod(func):\n- return get_func_args(func.__func__, True)\n- elif inspect.ismethoddescriptor(func):\n- return []\n- elif isinstance(func, partial):\n- return [\n- x\n- for x in get_func_args(func.func)[len(func.args) :]\n- if not (func.keywords and x in func.keywords)\n- ]\n- elif hasattr(func, \"__call__\"):\n- if inspect.isroutine(func):\n- return []\n- if getattr(func, \"__name__\", None) == \"__call__\":\n- return []\n- return get_func_args(func.__call__, True)\n+ \"\"\"Return the argument name list of a callable object\"\"\"\n+ if not callable(func):\n+ raise TypeError(f\"func must be callable, got '{type(func).__name__}'\")\n+\n+ args = []\n+ try:\n+ sig = inspect.signature(func)\n+ except ValueError:\n+ return args\n+\n+ if isinstance(func, partial):\n+ partial_args = func.args\n+ partial_kw = func.keywords\n+\n+ for name, param in sig.parameters.items():\n+ if param.name in partial_args:\n+ continue\n+ if partial_kw and param.name in partial_kw:\n+ continue\n+ args.append(name)\n else:\n- raise TypeError(f\"{type(func)} is not callable\")\n- if stripself:\n- func_args.pop(0)\n- return func_args\n+ for name in sig.parameters.keys():\n+ args.append(name)\n+\n+ if stripself and args and args[0] == \"self\":\n+ args = args[1:]\n+ return args\n \n \n def get_spec(func):\n", "issue": "get_func_args does not fully work in CPython\nAs [shown in tests](https://github.com/scrapy/scrapy/blob/ada917307844950a81226f020b596d5932187f6e/tests/test_utils_python.py#L240-L243), `get_func_args` does not work in CPython with inputs like `str.split`, `\"\".join` or `itemgetter(2)`.\n", "before_files": [{"content": "\"\"\"\nThis module contains essential stuff that should've come with Python itself ;)\n\"\"\"\nimport gc\nimport inspect\nimport re\nimport sys\nimport weakref\nfrom functools import partial, wraps\nfrom itertools import chain\nfrom typing import Any, AsyncGenerator, AsyncIterable, Iterable, Union\n\nfrom scrapy.utils.asyncgen import as_async_generator\n\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\n >>> flatten([\"foo\", \"bar\"])\n ['foo', 'bar']\n >>> flatten([\"foo\", [\"baz\", 42], \"bar\"])\n ['foo', 'baz', 42, 'bar']\n \"\"\"\n return list(iflatten(x))\n\n\ndef iflatten(x):\n \"\"\"iflatten(sequence) -> iterator\n\n Similar to ``.flatten()``, but returns iterator instead\"\"\"\n for el in x:\n if is_listlike(el):\n for el_ in iflatten(el):\n yield el_\n else:\n yield el\n\n\ndef is_listlike(x: Any) -> bool:\n \"\"\"\n >>> is_listlike(\"foo\")\n False\n >>> is_listlike(5)\n False\n >>> is_listlike(b\"foo\")\n False\n >>> is_listlike([b\"foo\"])\n True\n >>> is_listlike((b\"foo\",))\n True\n >>> is_listlike({})\n True\n >>> is_listlike(set())\n True\n >>> is_listlike((x for x in range(3)))\n True\n >>> is_listlike(range(5))\n True\n \"\"\"\n return hasattr(x, \"__iter__\") and not isinstance(x, (str, bytes))\n\n\ndef unique(list_, key=lambda x: x):\n \"\"\"efficient function to uniquify a list preserving item order\"\"\"\n seen = set()\n result = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result\n\n\ndef to_unicode(text, encoding=None, errors=\"strict\"):\n \"\"\"Return the unicode representation of a bytes object ``text``. If\n ``text`` is already an unicode object, return it as-is.\"\"\"\n if isinstance(text, str):\n return text\n if not isinstance(text, (bytes, str)):\n raise TypeError(\n \"to_unicode must receive a bytes or str \"\n f\"object, got {type(text).__name__}\"\n )\n if encoding is None:\n encoding = \"utf-8\"\n return text.decode(encoding, errors)\n\n\ndef to_bytes(text, encoding=None, errors=\"strict\"):\n \"\"\"Return the binary representation of ``text``. If ``text``\n is already a bytes object, return it as-is.\"\"\"\n if isinstance(text, bytes):\n return text\n if not isinstance(text, str):\n raise TypeError(\n \"to_bytes must receive a str or bytes \" f\"object, got {type(text).__name__}\"\n )\n if encoding is None:\n encoding = \"utf-8\"\n return text.encode(encoding, errors)\n\n\ndef re_rsearch(pattern, text, chunk_size=1024):\n \"\"\"\n This function does a reverse search in a text using a regular expression\n given in the attribute 'pattern'.\n Since the re module does not provide this functionality, we have to find for\n the expression into chunks of text extracted from the end (for the sake of efficiency).\n At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for\n the pattern. If the pattern is not found, another chunk is extracted, and another\n search is performed.\n This process continues until a match is found, or until the whole file is read.\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n\n def _chunk_iter():\n offset = len(text)\n while True:\n offset -= chunk_size * 1024\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n\n for chunk, offset in _chunk_iter():\n matches = [match for match in pattern.finditer(chunk)]\n if matches:\n start, end = matches[-1].span()\n return offset + start, offset + end\n return None\n\n\ndef memoizemethod_noargs(method):\n \"\"\"Decorator to cache the result of a method (without arguments) using a\n weak reference to its object\n \"\"\"\n cache = weakref.WeakKeyDictionary()\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n\n return new_method\n\n\n_BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b\"\\0\", b\"\\t\", b\"\\n\", b\"\\r\"}\n_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}\n\n\ndef binary_is_text(data):\n \"\"\"Returns ``True`` if the given ``data`` argument (a ``bytes`` object)\n does not contain unprintable control characters.\n \"\"\"\n if not isinstance(data, bytes):\n raise TypeError(f\"data must be bytes, got '{type(data).__name__}'\")\n return all(c not in _BINARYCHARS for c in data)\n\n\ndef get_func_args(func, stripself=False):\n \"\"\"Return the argument name list of a callable\"\"\"\n if inspect.isfunction(func):\n spec = inspect.getfullargspec(func)\n func_args = spec.args + spec.kwonlyargs\n elif inspect.isclass(func):\n return get_func_args(func.__init__, True)\n elif inspect.ismethod(func):\n return get_func_args(func.__func__, True)\n elif inspect.ismethoddescriptor(func):\n return []\n elif isinstance(func, partial):\n return [\n x\n for x in get_func_args(func.func)[len(func.args) :]\n if not (func.keywords and x in func.keywords)\n ]\n elif hasattr(func, \"__call__\"):\n if inspect.isroutine(func):\n return []\n if getattr(func, \"__name__\", None) == \"__call__\":\n return []\n return get_func_args(func.__call__, True)\n else:\n raise TypeError(f\"{type(func)} is not callable\")\n if stripself:\n func_args.pop(0)\n return func_args\n\n\ndef get_spec(func):\n \"\"\"Returns (args, kwargs) tuple for a function\n >>> import re\n >>> get_spec(re.match)\n (['pattern', 'string'], {'flags': 0})\n\n >>> class Test:\n ... def __call__(self, val):\n ... pass\n ... def method(self, val, flags=0):\n ... pass\n\n >>> get_spec(Test)\n (['self', 'val'], {})\n\n >>> get_spec(Test.method)\n (['self', 'val'], {'flags': 0})\n\n >>> get_spec(Test().method)\n (['self', 'val'], {'flags': 0})\n \"\"\"\n\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = inspect.getfullargspec(func)\n elif hasattr(func, \"__call__\"):\n spec = inspect.getfullargspec(func.__call__)\n else:\n raise TypeError(f\"{type(func)} is not callable\")\n\n defaults = spec.defaults or []\n\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return args, kwargs\n\n\ndef equal_attributes(obj1, obj2, attributes):\n \"\"\"Compare two objects attributes\"\"\"\n # not attributes given return False by default\n if not attributes:\n return False\n\n temp1, temp2 = object(), object()\n for attr in attributes:\n # support callables like itemgetter\n if callable(attr):\n if attr(obj1) != attr(obj2):\n return False\n elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):\n return False\n # all attributes equal\n return True\n\n\ndef without_none_values(iterable):\n \"\"\"Return a copy of ``iterable`` with all ``None`` entries removed.\n\n If ``iterable`` is a mapping, return a dictionary where all pairs that have\n value ``None`` have been removed.\n \"\"\"\n try:\n return {k: v for k, v in iterable.items() if v is not None}\n except AttributeError:\n return type(iterable)((v for v in iterable if v is not None))\n\n\ndef global_object_name(obj):\n \"\"\"\n Return full name of a global object.\n\n >>> from scrapy import Request\n >>> global_object_name(Request)\n 'scrapy.http.request.Request'\n \"\"\"\n return f\"{obj.__module__}.{obj.__name__}\"\n\n\nif hasattr(sys, \"pypy_version_info\"):\n\n def garbage_collect():\n # Collecting weakreferences can take two collections on PyPy.\n gc.collect()\n gc.collect()\n\nelse:\n\n def garbage_collect():\n gc.collect()\n\n\nclass MutableChain(Iterable):\n \"\"\"\n Thin wrapper around itertools.chain, allowing to add iterables \"in-place\"\n \"\"\"\n\n def __init__(self, *args: Iterable):\n self.data = chain.from_iterable(args)\n\n def extend(self, *iterables: Iterable) -> None:\n self.data = chain(self.data, chain.from_iterable(iterables))\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.data)\n\n\nasync def _async_chain(*iterables: Union[Iterable, AsyncIterable]) -> AsyncGenerator:\n for it in iterables:\n async for o in as_async_generator(it):\n yield o\n\n\nclass MutableAsyncChain(AsyncIterable):\n \"\"\"\n Similar to MutableChain but for async iterables\n \"\"\"\n\n def __init__(self, *args: Union[Iterable, AsyncIterable]):\n self.data = _async_chain(*args)\n\n def extend(self, *iterables: Union[Iterable, AsyncIterable]) -> None:\n self.data = _async_chain(self.data, _async_chain(*iterables))\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n return await self.data.__anext__()\n", "path": "scrapy/utils/python.py"}], "after_files": [{"content": "\"\"\"\nThis module contains essential stuff that should've come with Python itself ;)\n\"\"\"\nimport gc\nimport inspect\nimport re\nimport sys\nimport weakref\nfrom functools import partial, wraps\nfrom itertools import chain\nfrom typing import Any, AsyncGenerator, AsyncIterable, Iterable, Union\n\nfrom scrapy.utils.asyncgen import as_async_generator\n\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\n >>> flatten([\"foo\", \"bar\"])\n ['foo', 'bar']\n >>> flatten([\"foo\", [\"baz\", 42], \"bar\"])\n ['foo', 'baz', 42, 'bar']\n \"\"\"\n return list(iflatten(x))\n\n\ndef iflatten(x):\n \"\"\"iflatten(sequence) -> iterator\n\n Similar to ``.flatten()``, but returns iterator instead\"\"\"\n for el in x:\n if is_listlike(el):\n for el_ in iflatten(el):\n yield el_\n else:\n yield el\n\n\ndef is_listlike(x: Any) -> bool:\n \"\"\"\n >>> is_listlike(\"foo\")\n False\n >>> is_listlike(5)\n False\n >>> is_listlike(b\"foo\")\n False\n >>> is_listlike([b\"foo\"])\n True\n >>> is_listlike((b\"foo\",))\n True\n >>> is_listlike({})\n True\n >>> is_listlike(set())\n True\n >>> is_listlike((x for x in range(3)))\n True\n >>> is_listlike(range(5))\n True\n \"\"\"\n return hasattr(x, \"__iter__\") and not isinstance(x, (str, bytes))\n\n\ndef unique(list_, key=lambda x: x):\n \"\"\"efficient function to uniquify a list preserving item order\"\"\"\n seen = set()\n result = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result\n\n\ndef to_unicode(text, encoding=None, errors=\"strict\"):\n \"\"\"Return the unicode representation of a bytes object ``text``. If\n ``text`` is already an unicode object, return it as-is.\"\"\"\n if isinstance(text, str):\n return text\n if not isinstance(text, (bytes, str)):\n raise TypeError(\n \"to_unicode must receive a bytes or str \"\n f\"object, got {type(text).__name__}\"\n )\n if encoding is None:\n encoding = \"utf-8\"\n return text.decode(encoding, errors)\n\n\ndef to_bytes(text, encoding=None, errors=\"strict\"):\n \"\"\"Return the binary representation of ``text``. If ``text``\n is already a bytes object, return it as-is.\"\"\"\n if isinstance(text, bytes):\n return text\n if not isinstance(text, str):\n raise TypeError(\n \"to_bytes must receive a str or bytes \" f\"object, got {type(text).__name__}\"\n )\n if encoding is None:\n encoding = \"utf-8\"\n return text.encode(encoding, errors)\n\n\ndef re_rsearch(pattern, text, chunk_size=1024):\n \"\"\"\n This function does a reverse search in a text using a regular expression\n given in the attribute 'pattern'.\n Since the re module does not provide this functionality, we have to find for\n the expression into chunks of text extracted from the end (for the sake of efficiency).\n At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for\n the pattern. If the pattern is not found, another chunk is extracted, and another\n search is performed.\n This process continues until a match is found, or until the whole file is read.\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n\n def _chunk_iter():\n offset = len(text)\n while True:\n offset -= chunk_size * 1024\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n\n for chunk, offset in _chunk_iter():\n matches = [match for match in pattern.finditer(chunk)]\n if matches:\n start, end = matches[-1].span()\n return offset + start, offset + end\n return None\n\n\ndef memoizemethod_noargs(method):\n \"\"\"Decorator to cache the result of a method (without arguments) using a\n weak reference to its object\n \"\"\"\n cache = weakref.WeakKeyDictionary()\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n\n return new_method\n\n\n_BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b\"\\0\", b\"\\t\", b\"\\n\", b\"\\r\"}\n_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}\n\n\ndef binary_is_text(data):\n \"\"\"Returns ``True`` if the given ``data`` argument (a ``bytes`` object)\n does not contain unprintable control characters.\n \"\"\"\n if not isinstance(data, bytes):\n raise TypeError(f\"data must be bytes, got '{type(data).__name__}'\")\n return all(c not in _BINARYCHARS for c in data)\n\n\ndef get_func_args(func, stripself=False):\n \"\"\"Return the argument name list of a callable object\"\"\"\n if not callable(func):\n raise TypeError(f\"func must be callable, got '{type(func).__name__}'\")\n\n args = []\n try:\n sig = inspect.signature(func)\n except ValueError:\n return args\n\n if isinstance(func, partial):\n partial_args = func.args\n partial_kw = func.keywords\n\n for name, param in sig.parameters.items():\n if param.name in partial_args:\n continue\n if partial_kw and param.name in partial_kw:\n continue\n args.append(name)\n else:\n for name in sig.parameters.keys():\n args.append(name)\n\n if stripself and args and args[0] == \"self\":\n args = args[1:]\n return args\n\n\ndef get_spec(func):\n \"\"\"Returns (args, kwargs) tuple for a function\n >>> import re\n >>> get_spec(re.match)\n (['pattern', 'string'], {'flags': 0})\n\n >>> class Test:\n ... def __call__(self, val):\n ... pass\n ... def method(self, val, flags=0):\n ... pass\n\n >>> get_spec(Test)\n (['self', 'val'], {})\n\n >>> get_spec(Test.method)\n (['self', 'val'], {'flags': 0})\n\n >>> get_spec(Test().method)\n (['self', 'val'], {'flags': 0})\n \"\"\"\n\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = inspect.getfullargspec(func)\n elif hasattr(func, \"__call__\"):\n spec = inspect.getfullargspec(func.__call__)\n else:\n raise TypeError(f\"{type(func)} is not callable\")\n\n defaults = spec.defaults or []\n\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return args, kwargs\n\n\ndef equal_attributes(obj1, obj2, attributes):\n \"\"\"Compare two objects attributes\"\"\"\n # not attributes given return False by default\n if not attributes:\n return False\n\n temp1, temp2 = object(), object()\n for attr in attributes:\n # support callables like itemgetter\n if callable(attr):\n if attr(obj1) != attr(obj2):\n return False\n elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):\n return False\n # all attributes equal\n return True\n\n\ndef without_none_values(iterable):\n \"\"\"Return a copy of ``iterable`` with all ``None`` entries removed.\n\n If ``iterable`` is a mapping, return a dictionary where all pairs that have\n value ``None`` have been removed.\n \"\"\"\n try:\n return {k: v for k, v in iterable.items() if v is not None}\n except AttributeError:\n return type(iterable)((v for v in iterable if v is not None))\n\n\ndef global_object_name(obj):\n \"\"\"\n Return full name of a global object.\n\n >>> from scrapy import Request\n >>> global_object_name(Request)\n 'scrapy.http.request.Request'\n \"\"\"\n return f\"{obj.__module__}.{obj.__name__}\"\n\n\nif hasattr(sys, \"pypy_version_info\"):\n\n def garbage_collect():\n # Collecting weakreferences can take two collections on PyPy.\n gc.collect()\n gc.collect()\n\nelse:\n\n def garbage_collect():\n gc.collect()\n\n\nclass MutableChain(Iterable):\n \"\"\"\n Thin wrapper around itertools.chain, allowing to add iterables \"in-place\"\n \"\"\"\n\n def __init__(self, *args: Iterable):\n self.data = chain.from_iterable(args)\n\n def extend(self, *iterables: Iterable) -> None:\n self.data = chain(self.data, chain.from_iterable(iterables))\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.data)\n\n\nasync def _async_chain(*iterables: Union[Iterable, AsyncIterable]) -> AsyncGenerator:\n for it in iterables:\n async for o in as_async_generator(it):\n yield o\n\n\nclass MutableAsyncChain(AsyncIterable):\n \"\"\"\n Similar to MutableChain but for async iterables\n \"\"\"\n\n def __init__(self, *args: Union[Iterable, AsyncIterable]):\n self.data = _async_chain(*args)\n\n def extend(self, *iterables: Union[Iterable, AsyncIterable]) -> None:\n self.data = _async_chain(self.data, _async_chain(*iterables))\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n return await self.data.__anext__()\n", "path": "scrapy/utils/python.py"}]} | 3,704 | 486 |
gh_patches_debug_6455 | rasdani/github-patches | git_diff | voicepaw__so-vits-svc-fork-354 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnpicklingError: Weights only load failed. Unpickler error: Unsupported class numpy.core.multiarray._reconstruct
**Describe the bug**
I tried to update, but I got this exception start from version 3.6.0 during inference
```
UnpicklingError: Weights only load failed. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution.Do it only if you get the file from a trusted source. WeightsUnpickler error: Unsupported class numpy.core.multiarray._reconstruct
```
**To Reproduce**
Steps to reproduce the behavior:
- Update so-vits-svc-fork
- Run inference
**Additional context**
Initially I updated to version 3.8.0, because of the exception I tried to solve by keep downgrading the version until I got to version 3.5.1 to solve the problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/so_vits_svc_fork/cluster/__init__.py`
Content:
```
1 from __future__ import annotations
2
3 from pathlib import Path
4 from typing import Any
5
6 import torch
7 from sklearn.cluster import KMeans
8
9
10 def get_cluster_model(ckpt_path: Path | str):
11 with Path(ckpt_path).open("rb") as f:
12 checkpoint = torch.load(f, map_location="cpu", weights_only=True)
13 kmeans_dict = {}
14 for spk, ckpt in checkpoint.items():
15 km = KMeans(ckpt["n_features_in_"])
16 km.__dict__["n_features_in_"] = ckpt["n_features_in_"]
17 km.__dict__["_n_threads"] = ckpt["_n_threads"]
18 km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"]
19 kmeans_dict[spk] = km
20 return kmeans_dict
21
22
23 def check_speaker(model: Any, speaker: Any):
24 if speaker not in model:
25 raise ValueError(f"Speaker {speaker} not in {list(model.keys())}")
26
27
28 def get_cluster_result(model: Any, x: Any, speaker: Any):
29 """
30 x: np.array [t, 256]
31 return cluster class result
32 """
33 check_speaker(model, speaker)
34 return model[speaker].predict(x)
35
36
37 def get_cluster_center_result(model: Any, x: Any, speaker: Any):
38 """x: np.array [t, 256]"""
39 check_speaker(model, speaker)
40 predict = model[speaker].predict(x)
41 return model[speaker].cluster_centers_[predict]
42
43
44 def get_center(model: Any, x: Any, speaker: Any):
45 check_speaker(model, speaker)
46 return model[speaker].cluster_centers_[x]
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/so_vits_svc_fork/cluster/__init__.py b/src/so_vits_svc_fork/cluster/__init__.py
--- a/src/so_vits_svc_fork/cluster/__init__.py
+++ b/src/so_vits_svc_fork/cluster/__init__.py
@@ -9,7 +9,9 @@
def get_cluster_model(ckpt_path: Path | str):
with Path(ckpt_path).open("rb") as f:
- checkpoint = torch.load(f, map_location="cpu", weights_only=True)
+ checkpoint = torch.load(
+ f, map_location="cpu"
+ ) # Danger of arbitrary code execution
kmeans_dict = {}
for spk, ckpt in checkpoint.items():
km = KMeans(ckpt["n_features_in_"])
| {"golden_diff": "diff --git a/src/so_vits_svc_fork/cluster/__init__.py b/src/so_vits_svc_fork/cluster/__init__.py\n--- a/src/so_vits_svc_fork/cluster/__init__.py\n+++ b/src/so_vits_svc_fork/cluster/__init__.py\n@@ -9,7 +9,9 @@\n \n def get_cluster_model(ckpt_path: Path | str):\n with Path(ckpt_path).open(\"rb\") as f:\n- checkpoint = torch.load(f, map_location=\"cpu\", weights_only=True)\n+ checkpoint = torch.load(\n+ f, map_location=\"cpu\"\n+ ) # Danger of arbitrary code execution\n kmeans_dict = {}\n for spk, ckpt in checkpoint.items():\n km = KMeans(ckpt[\"n_features_in_\"])\n", "issue": "UnpicklingError: Weights only load failed. Unpickler error: Unsupported class numpy.core.multiarray._reconstruct\n**Describe the bug**\r\nI tried to update, but I got this exception start from version 3.6.0 during inference\r\n```\r\nUnpicklingError: Weights only load failed. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution.Do it only if you get the file from a trusted source. WeightsUnpickler error: Unsupported class numpy.core.multiarray._reconstruct\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n- Update so-vits-svc-fork\r\n- Run inference\r\n\r\n**Additional context**\r\nInitially I updated to version 3.8.0, because of the exception I tried to solve by keep downgrading the version until I got to version 3.5.1 to solve the problem.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nimport torch\nfrom sklearn.cluster import KMeans\n\n\ndef get_cluster_model(ckpt_path: Path | str):\n with Path(ckpt_path).open(\"rb\") as f:\n checkpoint = torch.load(f, map_location=\"cpu\", weights_only=True)\n kmeans_dict = {}\n for spk, ckpt in checkpoint.items():\n km = KMeans(ckpt[\"n_features_in_\"])\n km.__dict__[\"n_features_in_\"] = ckpt[\"n_features_in_\"]\n km.__dict__[\"_n_threads\"] = ckpt[\"_n_threads\"]\n km.__dict__[\"cluster_centers_\"] = ckpt[\"cluster_centers_\"]\n kmeans_dict[spk] = km\n return kmeans_dict\n\n\ndef check_speaker(model: Any, speaker: Any):\n if speaker not in model:\n raise ValueError(f\"Speaker {speaker} not in {list(model.keys())}\")\n\n\ndef get_cluster_result(model: Any, x: Any, speaker: Any):\n \"\"\"\n x: np.array [t, 256]\n return cluster class result\n \"\"\"\n check_speaker(model, speaker)\n return model[speaker].predict(x)\n\n\ndef get_cluster_center_result(model: Any, x: Any, speaker: Any):\n \"\"\"x: np.array [t, 256]\"\"\"\n check_speaker(model, speaker)\n predict = model[speaker].predict(x)\n return model[speaker].cluster_centers_[predict]\n\n\ndef get_center(model: Any, x: Any, speaker: Any):\n check_speaker(model, speaker)\n return model[speaker].cluster_centers_[x]\n", "path": "src/so_vits_svc_fork/cluster/__init__.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nimport torch\nfrom sklearn.cluster import KMeans\n\n\ndef get_cluster_model(ckpt_path: Path | str):\n with Path(ckpt_path).open(\"rb\") as f:\n checkpoint = torch.load(\n f, map_location=\"cpu\"\n ) # Danger of arbitrary code execution\n kmeans_dict = {}\n for spk, ckpt in checkpoint.items():\n km = KMeans(ckpt[\"n_features_in_\"])\n km.__dict__[\"n_features_in_\"] = ckpt[\"n_features_in_\"]\n km.__dict__[\"_n_threads\"] = ckpt[\"_n_threads\"]\n km.__dict__[\"cluster_centers_\"] = ckpt[\"cluster_centers_\"]\n kmeans_dict[spk] = km\n return kmeans_dict\n\n\ndef check_speaker(model: Any, speaker: Any):\n if speaker not in model:\n raise ValueError(f\"Speaker {speaker} not in {list(model.keys())}\")\n\n\ndef get_cluster_result(model: Any, x: Any, speaker: Any):\n \"\"\"\n x: np.array [t, 256]\n return cluster class result\n \"\"\"\n check_speaker(model, speaker)\n return model[speaker].predict(x)\n\n\ndef get_cluster_center_result(model: Any, x: Any, speaker: Any):\n \"\"\"x: np.array [t, 256]\"\"\"\n check_speaker(model, speaker)\n predict = model[speaker].predict(x)\n return model[speaker].cluster_centers_[predict]\n\n\ndef get_center(model: Any, x: Any, speaker: Any):\n check_speaker(model, speaker)\n return model[speaker].cluster_centers_[x]\n", "path": "src/so_vits_svc_fork/cluster/__init__.py"}]} | 919 | 179 |
gh_patches_debug_7476 | rasdani/github-patches | git_diff | Mailu__Mailu-1084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setup error for incorrect docker network subnet
When entering an incorrect docker subnet (e.g. 172.168.0.1/16) the setup throws a 500 error -- without any reasons being given.
If you run locally, the error is clearly reported in an ungraceful way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup/server.py`
Content:
```
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11 import hashlib
12 import time
13
14
15 version = os.getenv("this_version", "master")
16 static_url_path = "/" + version + "/static"
17 app = flask.Flask(__name__, static_url_path=static_url_path)
18 flask_bootstrap.Bootstrap(app)
19 db = redis.StrictRedis(host='redis', port=6379, db=0)
20
21
22 def render_flavor(flavor, template, data):
23 return flask.render_template(
24 os.path.join(flavor, template),
25 **data
26 )
27
28
29 @app.add_template_global
30 def secret(length=16):
31 charset = string.ascii_uppercase + string.digits
32 return ''.join(
33 random.SystemRandom().choice(charset)
34 for _ in range(length)
35 )
36
37 #Original copied from https://github.com/andrewlkho/ulagen
38 def random_ipv6_subnet():
39 eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff
40 eui64_canon = "-".join([format(eui64, "02X")[i:i+2] for i in range(0, 18, 2)])
41
42 h = hashlib.sha1()
43 h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))
44 globalid = h.hexdigest()[0:10]
45
46 prefix = ":".join(("fd" + globalid[0:2], globalid[2:6], globalid[6:10]))
47 return prefix
48
49 def build_app(path):
50
51 app.jinja_env.trim_blocks = True
52 app.jinja_env.lstrip_blocks = True
53
54 @app.context_processor
55 def app_context():
56 return dict(versions=os.getenv("VERSIONS","master").split(','))
57
58 prefix_bp = flask.Blueprint(version, __name__)
59 prefix_bp.jinja_loader = jinja2.ChoiceLoader([
60 jinja2.FileSystemLoader(os.path.join(path, "templates")),
61 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
62 ])
63
64 root_bp = flask.Blueprint("root", __name__)
65 root_bp.jinja_loader = jinja2.ChoiceLoader([
66 jinja2.FileSystemLoader(os.path.join(path, "templates")),
67 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
68 ])
69
70 @prefix_bp.context_processor
71 @root_bp.context_processor
72 def bp_context(version=version):
73 return dict(version=version)
74
75 @prefix_bp.route("/")
76 @root_bp.route("/")
77 def wizard():
78 return flask.render_template('wizard.html')
79
80 @prefix_bp.route("/submit_flavor", methods=["POST"])
81 @root_bp.route("/submit_flavor", methods=["POST"])
82 def submit_flavor():
83 data = flask.request.form.copy()
84 subnet6 = random_ipv6_subnet()
85 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
86 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps, subnet6=subnet6)
87
88 @prefix_bp.route("/submit", methods=["POST"])
89 @root_bp.route("/submit", methods=["POST"])
90 def submit():
91 data = flask.request.form.copy()
92 data['uid'] = str(uuid.uuid4())
93 try:
94 data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
95 except ValueError as err:
96 return "Error while generating files: " + str(err)
97 db.set(data['uid'], json.dumps(data))
98 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
99
100 @prefix_bp.route("/setup/<uid>", methods=["GET"])
101 @root_bp.route("/setup/<uid>", methods=["GET"])
102 def setup(uid):
103 data = json.loads(db.get(uid))
104 flavor = data.get("flavor", "compose")
105 rendered = render_flavor(flavor, "setup.html", data)
106 return flask.render_template("setup.html", contents=rendered)
107
108 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
109 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
110 def file(uid, filepath):
111 data = json.loads(db.get(uid))
112 flavor = data.get("flavor", "compose")
113 return flask.Response(
114 render_flavor(flavor, filepath, data),
115 mimetype="application/text"
116 )
117
118 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
119 app.register_blueprint(root_bp)
120
121
122 if __name__ == "__main__":
123 build_app("/tmp/mailutest")
124 app.run(debug=True)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -91,7 +91,7 @@
data = flask.request.form.copy()
data['uid'] = str(uuid.uuid4())
try:
- data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
+ data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])
except ValueError as err:
return "Error while generating files: " + str(err)
db.set(data['uid'], json.dumps(data))
| {"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -91,7 +91,7 @@\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n- data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n+ data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n", "issue": "Setup error for incorrect docker network subnet\nWhen entering an incorrect docker subnet (e.g. 172.168.0.1/16) the setup throws a 500 error -- without any reasons being given. \r\n\r\nIf you run locally, the error is clearly reported in an ungraceful way.\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}], "after_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]} | 1,664 | 132 |
gh_patches_debug_23312 | rasdani/github-patches | git_diff | ephios-dev__ephios-338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Format event description
As a planner, I want to be able to format the event description. This should at least mean that links will be formatted so that they are clickable. Maybe this should mean that the description will accept Markdown.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/extra/templatetags/rich_text.py`
Content:
```
1 import bleach
2 import markdown
3 from django import template
4 from django.utils.safestring import mark_safe
5
6 register = template.Library()
7
8 ALLOWED_TAGS = [
9 "a",
10 "abbr",
11 "acronym",
12 "b",
13 "blockquote",
14 "br",
15 "code",
16 "div",
17 "em",
18 "h1",
19 "h2",
20 "h3",
21 "h4",
22 "h5",
23 "h6",
24 "hr",
25 "i",
26 "li",
27 "ol",
28 "p",
29 "pre",
30 "span",
31 "strong",
32 "table",
33 "tbody",
34 "td",
35 "th",
36 "thead",
37 "tr",
38 "ul",
39 ]
40
41
42 ALLOWED_ATTRIBUTES = {
43 "a": ["href", "title", "class"],
44 "abbr": ["title"],
45 "acronym": ["title"],
46 "table": ["width"],
47 "td": ["width", "align"],
48 "div": ["class"],
49 "p": ["class"],
50 "span": ["class", "title"],
51 }
52
53 ALLOWED_PROTOCOLS = ["http", "https", "mailto", "tel"]
54
55
56 def markdown_compile(source):
57 extensions = ["markdown.extensions.sane_lists", "markdown.extensions.nl2br"]
58 return bleach.clean(
59 markdown.markdown(source, extensions=extensions),
60 tags=ALLOWED_TAGS,
61 attributes=ALLOWED_ATTRIBUTES,
62 protocols=ALLOWED_PROTOCOLS,
63 )
64
65
66 @register.filter
67 def rich_text(text: str, **kwargs):
68 """
69 Processes markdown and cleans HTML in a text input.
70 """
71 text = str(text)
72 linker = bleach.Linker(parse_email=True)
73 body_md = linker.linkify(markdown_compile(text))
74 return mark_safe(body_md)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ephios/extra/templatetags/rich_text.py b/ephios/extra/templatetags/rich_text.py
--- a/ephios/extra/templatetags/rich_text.py
+++ b/ephios/extra/templatetags/rich_text.py
@@ -53,22 +53,28 @@
ALLOWED_PROTOCOLS = ["http", "https", "mailto", "tel"]
-def markdown_compile(source):
+def markdown_compile(source, excluded_tags=""):
extensions = ["markdown.extensions.sane_lists", "markdown.extensions.nl2br"]
+ tags = ALLOWED_TAGS.copy()
+ for tag in excluded_tags.split(","):
+ try:
+ tags.remove(tag)
+ except ValueError:
+ pass
return bleach.clean(
markdown.markdown(source, extensions=extensions),
- tags=ALLOWED_TAGS,
+ tags=tags,
attributes=ALLOWED_ATTRIBUTES,
protocols=ALLOWED_PROTOCOLS,
)
@register.filter
-def rich_text(text: str, **kwargs):
+def rich_text(text: str, excluded_tags=""):
"""
Processes markdown and cleans HTML in a text input.
"""
text = str(text)
linker = bleach.Linker(parse_email=True)
- body_md = linker.linkify(markdown_compile(text))
+ body_md = linker.linkify(markdown_compile(text, excluded_tags=excluded_tags))
return mark_safe(body_md)
| {"golden_diff": "diff --git a/ephios/extra/templatetags/rich_text.py b/ephios/extra/templatetags/rich_text.py\n--- a/ephios/extra/templatetags/rich_text.py\n+++ b/ephios/extra/templatetags/rich_text.py\n@@ -53,22 +53,28 @@\n ALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"tel\"]\n \n \n-def markdown_compile(source):\n+def markdown_compile(source, excluded_tags=\"\"):\n extensions = [\"markdown.extensions.sane_lists\", \"markdown.extensions.nl2br\"]\n+ tags = ALLOWED_TAGS.copy()\n+ for tag in excluded_tags.split(\",\"):\n+ try:\n+ tags.remove(tag)\n+ except ValueError:\n+ pass\n return bleach.clean(\n markdown.markdown(source, extensions=extensions),\n- tags=ALLOWED_TAGS,\n+ tags=tags,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n \n \n @register.filter\n-def rich_text(text: str, **kwargs):\n+def rich_text(text: str, excluded_tags=\"\"):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n linker = bleach.Linker(parse_email=True)\n- body_md = linker.linkify(markdown_compile(text))\n+ body_md = linker.linkify(markdown_compile(text, excluded_tags=excluded_tags))\n return mark_safe(body_md)\n", "issue": "Format event description\nAs a planner, I want to be able to format the event description. This should at least mean that links will be formatted so that they are clickable. Maybe this should mean that the description will accept Markdown.\n", "before_files": [{"content": "import bleach\nimport markdown\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"td\",\n \"th\",\n \"thead\",\n \"tr\",\n \"ul\",\n]\n\n\nALLOWED_ATTRIBUTES = {\n \"a\": [\"href\", \"title\", \"class\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"table\": [\"width\"],\n \"td\": [\"width\", \"align\"],\n \"div\": [\"class\"],\n \"p\": [\"class\"],\n \"span\": [\"class\", \"title\"],\n}\n\nALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"tel\"]\n\n\ndef markdown_compile(source):\n extensions = [\"markdown.extensions.sane_lists\", \"markdown.extensions.nl2br\"]\n return bleach.clean(\n markdown.markdown(source, extensions=extensions),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n linker = bleach.Linker(parse_email=True)\n body_md = linker.linkify(markdown_compile(text))\n return mark_safe(body_md)\n", "path": "ephios/extra/templatetags/rich_text.py"}], "after_files": [{"content": "import bleach\nimport markdown\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"td\",\n \"th\",\n \"thead\",\n \"tr\",\n \"ul\",\n]\n\n\nALLOWED_ATTRIBUTES = {\n \"a\": [\"href\", \"title\", \"class\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"table\": [\"width\"],\n \"td\": [\"width\", \"align\"],\n \"div\": [\"class\"],\n \"p\": [\"class\"],\n \"span\": [\"class\", \"title\"],\n}\n\nALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"tel\"]\n\n\ndef markdown_compile(source, excluded_tags=\"\"):\n extensions = [\"markdown.extensions.sane_lists\", \"markdown.extensions.nl2br\"]\n tags = ALLOWED_TAGS.copy()\n for tag in excluded_tags.split(\",\"):\n try:\n tags.remove(tag)\n except ValueError:\n pass\n return bleach.clean(\n markdown.markdown(source, extensions=extensions),\n tags=tags,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, excluded_tags=\"\"):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n linker = bleach.Linker(parse_email=True)\n body_md = linker.linkify(markdown_compile(text, excluded_tags=excluded_tags))\n return mark_safe(body_md)\n", "path": "ephios/extra/templatetags/rich_text.py"}]} | 839 | 314 |
gh_patches_debug_39701 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[performance] Unwanted cast from float32 to double in slic
## Description
If a `float32` image is given to `skimage.segmentation.slic`, it will be cast to `double` implicitly.
In fact, in `slic`, the Cython function `_slic_cython` is called with the input image preprocessed with `img_as_float`. If the input array data type is `float32`, it is not touched.
Since the `_slic_cython` signature is
```
(double[:, :, :, ::1] image_zyx,
double[:, ::1] segments,
float step,
Py_ssize_t max_iter,
double[::1] spacing,
bint slic_zero)
```
the input image is automatically and silently cast to `double`.
Using the `np_floats` fused type as advised in #3111 solves the problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/segmentation/slic_superpixels.py`
Content:
```
1 from collections.abc import Iterable
2 import numpy as np
3 from scipy import ndimage as ndi
4
5 from ..util import img_as_float, regular_grid
6 from ..segmentation._slic import (_slic_cython,
7 _enforce_label_connectivity_cython)
8 from ..color import rgb2lab
9
10
11 def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,
12 spacing=None, multichannel=True, convert2lab=None,
13 enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,
14 slic_zero=False):
15 """Segments image using k-means clustering in Color-(x,y,z) space.
16
17 Parameters
18 ----------
19 image : 2D, 3D or 4D ndarray
20 Input image, which can be 2D or 3D, and grayscale or multichannel
21 (see `multichannel` parameter).
22 n_segments : int, optional
23 The (approximate) number of labels in the segmented output image.
24 compactness : float, optional
25 Balances color proximity and space proximity. Higher values give
26 more weight to space proximity, making superpixel shapes more
27 square/cubic. In SLICO mode, this is the initial compactness.
28 This parameter depends strongly on image contrast and on the
29 shapes of objects in the image. We recommend exploring possible
30 values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before
31 refining around a chosen value.
32 max_iter : int, optional
33 Maximum number of iterations of k-means.
34 sigma : float or (3,) array-like of floats, optional
35 Width of Gaussian smoothing kernel for pre-processing for each
36 dimension of the image. The same sigma is applied to each dimension in
37 case of a scalar value. Zero means no smoothing.
38 Note, that `sigma` is automatically scaled if it is scalar and a
39 manual voxel spacing is provided (see Notes section).
40 spacing : (3,) array-like of floats, optional
41 The voxel spacing along each image dimension. By default, `slic`
42 assumes uniform spacing (same voxel resolution along z, y and x).
43 This parameter controls the weights of the distances along z, y,
44 and x during k-means clustering.
45 multichannel : bool, optional
46 Whether the last axis of the image is to be interpreted as multiple
47 channels or another spatial dimension.
48 convert2lab : bool, optional
49 Whether the input should be converted to Lab colorspace prior to
50 segmentation. The input image *must* be RGB. Highly recommended.
51 This option defaults to ``True`` when ``multichannel=True`` *and*
52 ``image.shape[-1] == 3``.
53 enforce_connectivity: bool, optional
54 Whether the generated segments are connected or not
55 min_size_factor: float, optional
56 Proportion of the minimum segment size to be removed with respect
57 to the supposed segment size ```depth*width*height/n_segments```
58 max_size_factor: float, optional
59 Proportion of the maximum connected segment size. A value of 3 works
60 in most of the cases.
61 slic_zero: bool, optional
62 Run SLIC-zero, the zero-parameter mode of SLIC. [2]_
63
64 Returns
65 -------
66 labels : 2D or 3D array
67 Integer mask indicating segment labels.
68
69 Raises
70 ------
71 ValueError
72 If ``convert2lab`` is set to ``True`` but the last array
73 dimension is not of length 3.
74
75 Notes
76 -----
77 * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to
78 segmentation.
79
80 * If `sigma` is scalar and `spacing` is provided, the kernel width is
81 divided along each dimension by the spacing. For example, if ``sigma=1``
82 and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This
83 ensures sensible smoothing for anisotropic images.
84
85 * The image is rescaled to be in [0, 1] prior to processing.
86
87 * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To
88 interpret them as 3D with the last dimension having length 3, use
89 `multichannel=False`.
90
91 References
92 ----------
93 .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,
94 Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to
95 State-of-the-art Superpixel Methods, TPAMI, May 2012.
96 .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO
97
98 Examples
99 --------
100 >>> from skimage.segmentation import slic
101 >>> from skimage.data import astronaut
102 >>> img = astronaut()
103 >>> segments = slic(img, n_segments=100, compactness=10)
104
105 Increasing the compactness parameter yields more square regions:
106
107 >>> segments = slic(img, n_segments=100, compactness=20)
108
109 """
110
111 image = img_as_float(image)
112 is_2d = False
113 if image.ndim == 2:
114 # 2D grayscale image
115 image = image[np.newaxis, ..., np.newaxis]
116 is_2d = True
117 elif image.ndim == 3 and multichannel:
118 # Make 2D multichannel image 3D with depth = 1
119 image = image[np.newaxis, ...]
120 is_2d = True
121 elif image.ndim == 3 and not multichannel:
122 # Add channel as single last dimension
123 image = image[..., np.newaxis]
124
125 if spacing is None:
126 spacing = np.ones(3)
127 elif isinstance(spacing, (list, tuple)):
128 spacing = np.array(spacing, dtype=np.double)
129
130 if not isinstance(sigma, Iterable):
131 sigma = np.array([sigma, sigma, sigma], dtype=np.double)
132 sigma /= spacing.astype(np.double)
133 elif isinstance(sigma, (list, tuple)):
134 sigma = np.array(sigma, dtype=np.double)
135 if (sigma > 0).any():
136 # add zero smoothing for multichannel dimension
137 sigma = list(sigma) + [0]
138 image = ndi.gaussian_filter(image, sigma)
139
140 if multichannel and (convert2lab or convert2lab is None):
141 if image.shape[-1] != 3 and convert2lab:
142 raise ValueError("Lab colorspace conversion requires a RGB image.")
143 elif image.shape[-1] == 3:
144 image = rgb2lab(image)
145
146 depth, height, width = image.shape[:3]
147
148 # initialize cluster centroids for desired number of segments
149 grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
150 slices = regular_grid(image.shape[:3], n_segments)
151 step_z, step_y, step_x = [int(s.step if s.step is not None else 1)
152 for s in slices]
153 segments_z = grid_z[slices]
154 segments_y = grid_y[slices]
155 segments_x = grid_x[slices]
156
157 segments_color = np.zeros(segments_z.shape + (image.shape[3],))
158 segments = np.concatenate([segments_z[..., np.newaxis],
159 segments_y[..., np.newaxis],
160 segments_x[..., np.newaxis],
161 segments_color],
162 axis=-1).reshape(-1, 3 + image.shape[3])
163 segments = np.ascontiguousarray(segments)
164
165 # we do the scaling of ratio in the same way as in the SLIC paper
166 # so the values have the same meaning
167 step = float(max((step_z, step_y, step_x)))
168 ratio = 1.0 / compactness
169
170 image = np.ascontiguousarray(image * ratio)
171
172 labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)
173
174 if enforce_connectivity:
175 segment_size = depth * height * width / n_segments
176 min_size = int(min_size_factor * segment_size)
177 max_size = int(max_size_factor * segment_size)
178 labels = _enforce_label_connectivity_cython(labels,
179 min_size,
180 max_size)
181
182 if is_2d:
183 labels = labels[0]
184
185 return labels
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/segmentation/slic_superpixels.py b/skimage/segmentation/slic_superpixels.py
--- a/skimage/segmentation/slic_superpixels.py
+++ b/skimage/segmentation/slic_superpixels.py
@@ -109,6 +109,8 @@
"""
image = img_as_float(image)
+ dtype = image.dtype
+
is_2d = False
if image.ndim == 2:
# 2D grayscale image
@@ -123,15 +125,15 @@
image = image[..., np.newaxis]
if spacing is None:
- spacing = np.ones(3)
+ spacing = np.ones(3, dtype=dtype)
elif isinstance(spacing, (list, tuple)):
- spacing = np.array(spacing, dtype=np.double)
+ spacing = np.array(spacing, dtype=dtype)
if not isinstance(sigma, Iterable):
- sigma = np.array([sigma, sigma, sigma], dtype=np.double)
- sigma /= spacing.astype(np.double)
+ sigma = np.array([sigma, sigma, sigma], dtype=dtype)
+ sigma /= spacing.astype(dtype)
elif isinstance(sigma, (list, tuple)):
- sigma = np.array(sigma, dtype=np.double)
+ sigma = np.array(sigma, dtype=dtype)
if (sigma > 0).any():
# add zero smoothing for multichannel dimension
sigma = list(sigma) + [0]
@@ -146,7 +148,10 @@
depth, height, width = image.shape[:3]
# initialize cluster centroids for desired number of segments
- grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
+ grid_z, grid_y, grid_x = np.meshgrid(np.arange(depth, dtype=dtype),
+ np.arange(height, dtype=dtype),
+ np.arange(width, dtype=dtype),
+ indexing='ij')
slices = regular_grid(image.shape[:3], n_segments)
step_z, step_y, step_x = [int(s.step if s.step is not None else 1)
for s in slices]
@@ -154,7 +159,8 @@
segments_y = grid_y[slices]
segments_x = grid_x[slices]
- segments_color = np.zeros(segments_z.shape + (image.shape[3],))
+ segments_color = np.zeros(segments_z.shape + (image.shape[3],),
+ dtype=dtype)
segments = np.concatenate([segments_z[..., np.newaxis],
segments_y[..., np.newaxis],
segments_x[..., np.newaxis],
@@ -164,8 +170,8 @@
# we do the scaling of ratio in the same way as in the SLIC paper
# so the values have the same meaning
- step = float(max((step_z, step_y, step_x)))
- ratio = 1.0 / compactness
+ step = dtype.type(max((step_z, step_y, step_x)))
+ ratio = dtype.type(1.0 / compactness)
image = np.ascontiguousarray(image * ratio)
| {"golden_diff": "diff --git a/skimage/segmentation/slic_superpixels.py b/skimage/segmentation/slic_superpixels.py\n--- a/skimage/segmentation/slic_superpixels.py\n+++ b/skimage/segmentation/slic_superpixels.py\n@@ -109,6 +109,8 @@\n \"\"\"\n \n image = img_as_float(image)\n+ dtype = image.dtype\n+\n is_2d = False\n if image.ndim == 2:\n # 2D grayscale image\n@@ -123,15 +125,15 @@\n image = image[..., np.newaxis]\n \n if spacing is None:\n- spacing = np.ones(3)\n+ spacing = np.ones(3, dtype=dtype)\n elif isinstance(spacing, (list, tuple)):\n- spacing = np.array(spacing, dtype=np.double)\n+ spacing = np.array(spacing, dtype=dtype)\n \n if not isinstance(sigma, Iterable):\n- sigma = np.array([sigma, sigma, sigma], dtype=np.double)\n- sigma /= spacing.astype(np.double)\n+ sigma = np.array([sigma, sigma, sigma], dtype=dtype)\n+ sigma /= spacing.astype(dtype)\n elif isinstance(sigma, (list, tuple)):\n- sigma = np.array(sigma, dtype=np.double)\n+ sigma = np.array(sigma, dtype=dtype)\n if (sigma > 0).any():\n # add zero smoothing for multichannel dimension\n sigma = list(sigma) + [0]\n@@ -146,7 +148,10 @@\n depth, height, width = image.shape[:3]\n \n # initialize cluster centroids for desired number of segments\n- grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]\n+ grid_z, grid_y, grid_x = np.meshgrid(np.arange(depth, dtype=dtype),\n+ np.arange(height, dtype=dtype),\n+ np.arange(width, dtype=dtype),\n+ indexing='ij')\n slices = regular_grid(image.shape[:3], n_segments)\n step_z, step_y, step_x = [int(s.step if s.step is not None else 1)\n for s in slices]\n@@ -154,7 +159,8 @@\n segments_y = grid_y[slices]\n segments_x = grid_x[slices]\n \n- segments_color = np.zeros(segments_z.shape + (image.shape[3],))\n+ segments_color = np.zeros(segments_z.shape + (image.shape[3],),\n+ dtype=dtype)\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_x[..., np.newaxis],\n@@ -164,8 +170,8 @@\n \n # we do the scaling of ratio in the same way as in the SLIC paper\n # so the values have the same meaning\n- step = float(max((step_z, step_y, step_x)))\n- ratio = 1.0 / compactness\n+ step = dtype.type(max((step_z, step_y, step_x)))\n+ ratio = dtype.type(1.0 / compactness)\n \n image = np.ascontiguousarray(image * ratio)\n", "issue": "[performance] Unwanted cast from float32 to double in slic\n## Description\r\n\r\nIf a `float32` image is given to `skimage.segmentation.slic`, it will be cast to `double` implicitly.\r\n\r\nIn fact, in `slic`, the Cython function `_slic_cython` is called with the input image preprocessed with `img_as_float`. If the input array data type is `float32`, it is not touched.\r\nSince the `_slic_cython` signature is \r\n```\r\n (double[:, :, :, ::1] image_zyx,\r\n double[:, ::1] segments,\r\n float step,\r\n Py_ssize_t max_iter,\r\n double[::1] spacing,\r\n bint slic_zero)\r\n```\r\nthe input image is automatically and silently cast to `double`.\r\n\r\nUsing the `np_floats` fused type as advised in #3111 solves the problem.\r\n\n", "before_files": [{"content": "from collections.abc import Iterable\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom ..util import img_as_float, regular_grid\nfrom ..segmentation._slic import (_slic_cython,\n _enforce_label_connectivity_cython)\nfrom ..color import rgb2lab\n\n\ndef slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,\n spacing=None, multichannel=True, convert2lab=None,\n enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,\n slic_zero=False):\n \"\"\"Segments image using k-means clustering in Color-(x,y,z) space.\n\n Parameters\n ----------\n image : 2D, 3D or 4D ndarray\n Input image, which can be 2D or 3D, and grayscale or multichannel\n (see `multichannel` parameter).\n n_segments : int, optional\n The (approximate) number of labels in the segmented output image.\n compactness : float, optional\n Balances color proximity and space proximity. Higher values give\n more weight to space proximity, making superpixel shapes more\n square/cubic. In SLICO mode, this is the initial compactness.\n This parameter depends strongly on image contrast and on the\n shapes of objects in the image. We recommend exploring possible\n values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before\n refining around a chosen value.\n max_iter : int, optional\n Maximum number of iterations of k-means.\n sigma : float or (3,) array-like of floats, optional\n Width of Gaussian smoothing kernel for pre-processing for each\n dimension of the image. The same sigma is applied to each dimension in\n case of a scalar value. Zero means no smoothing.\n Note, that `sigma` is automatically scaled if it is scalar and a\n manual voxel spacing is provided (see Notes section).\n spacing : (3,) array-like of floats, optional\n The voxel spacing along each image dimension. By default, `slic`\n assumes uniform spacing (same voxel resolution along z, y and x).\n This parameter controls the weights of the distances along z, y,\n and x during k-means clustering.\n multichannel : bool, optional\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension.\n convert2lab : bool, optional\n Whether the input should be converted to Lab colorspace prior to\n segmentation. The input image *must* be RGB. Highly recommended.\n This option defaults to ``True`` when ``multichannel=True`` *and*\n ``image.shape[-1] == 3``.\n enforce_connectivity: bool, optional\n Whether the generated segments are connected or not\n min_size_factor: float, optional\n Proportion of the minimum segment size to be removed with respect\n to the supposed segment size ```depth*width*height/n_segments```\n max_size_factor: float, optional\n Proportion of the maximum connected segment size. A value of 3 works\n in most of the cases.\n slic_zero: bool, optional\n Run SLIC-zero, the zero-parameter mode of SLIC. [2]_\n\n Returns\n -------\n labels : 2D or 3D array\n Integer mask indicating segment labels.\n\n Raises\n ------\n ValueError\n If ``convert2lab`` is set to ``True`` but the last array\n dimension is not of length 3.\n\n Notes\n -----\n * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to\n segmentation.\n\n * If `sigma` is scalar and `spacing` is provided, the kernel width is\n divided along each dimension by the spacing. For example, if ``sigma=1``\n and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This\n ensures sensible smoothing for anisotropic images.\n\n * The image is rescaled to be in [0, 1] prior to processing.\n\n * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To\n interpret them as 3D with the last dimension having length 3, use\n `multichannel=False`.\n\n References\n ----------\n .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,\n Pascal Fua, and Sabine S\u00fcsstrunk, SLIC Superpixels Compared to\n State-of-the-art Superpixel Methods, TPAMI, May 2012.\n .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO\n\n Examples\n --------\n >>> from skimage.segmentation import slic\n >>> from skimage.data import astronaut\n >>> img = astronaut()\n >>> segments = slic(img, n_segments=100, compactness=10)\n\n Increasing the compactness parameter yields more square regions:\n\n >>> segments = slic(img, n_segments=100, compactness=20)\n\n \"\"\"\n\n image = img_as_float(image)\n is_2d = False\n if image.ndim == 2:\n # 2D grayscale image\n image = image[np.newaxis, ..., np.newaxis]\n is_2d = True\n elif image.ndim == 3 and multichannel:\n # Make 2D multichannel image 3D with depth = 1\n image = image[np.newaxis, ...]\n is_2d = True\n elif image.ndim == 3 and not multichannel:\n # Add channel as single last dimension\n image = image[..., np.newaxis]\n\n if spacing is None:\n spacing = np.ones(3)\n elif isinstance(spacing, (list, tuple)):\n spacing = np.array(spacing, dtype=np.double)\n\n if not isinstance(sigma, Iterable):\n sigma = np.array([sigma, sigma, sigma], dtype=np.double)\n sigma /= spacing.astype(np.double)\n elif isinstance(sigma, (list, tuple)):\n sigma = np.array(sigma, dtype=np.double)\n if (sigma > 0).any():\n # add zero smoothing for multichannel dimension\n sigma = list(sigma) + [0]\n image = ndi.gaussian_filter(image, sigma)\n\n if multichannel and (convert2lab or convert2lab is None):\n if image.shape[-1] != 3 and convert2lab:\n raise ValueError(\"Lab colorspace conversion requires a RGB image.\")\n elif image.shape[-1] == 3:\n image = rgb2lab(image)\n\n depth, height, width = image.shape[:3]\n\n # initialize cluster centroids for desired number of segments\n grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]\n slices = regular_grid(image.shape[:3], n_segments)\n step_z, step_y, step_x = [int(s.step if s.step is not None else 1)\n for s in slices]\n segments_z = grid_z[slices]\n segments_y = grid_y[slices]\n segments_x = grid_x[slices]\n\n segments_color = np.zeros(segments_z.shape + (image.shape[3],))\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_x[..., np.newaxis],\n segments_color],\n axis=-1).reshape(-1, 3 + image.shape[3])\n segments = np.ascontiguousarray(segments)\n\n # we do the scaling of ratio in the same way as in the SLIC paper\n # so the values have the same meaning\n step = float(max((step_z, step_y, step_x)))\n ratio = 1.0 / compactness\n\n image = np.ascontiguousarray(image * ratio)\n\n labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)\n\n if enforce_connectivity:\n segment_size = depth * height * width / n_segments\n min_size = int(min_size_factor * segment_size)\n max_size = int(max_size_factor * segment_size)\n labels = _enforce_label_connectivity_cython(labels,\n min_size,\n max_size)\n\n if is_2d:\n labels = labels[0]\n\n return labels\n", "path": "skimage/segmentation/slic_superpixels.py"}], "after_files": [{"content": "from collections.abc import Iterable\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom ..util import img_as_float, regular_grid\nfrom ..segmentation._slic import (_slic_cython,\n _enforce_label_connectivity_cython)\nfrom ..color import rgb2lab\n\n\ndef slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,\n spacing=None, multichannel=True, convert2lab=None,\n enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,\n slic_zero=False):\n \"\"\"Segments image using k-means clustering in Color-(x,y,z) space.\n\n Parameters\n ----------\n image : 2D, 3D or 4D ndarray\n Input image, which can be 2D or 3D, and grayscale or multichannel\n (see `multichannel` parameter).\n n_segments : int, optional\n The (approximate) number of labels in the segmented output image.\n compactness : float, optional\n Balances color proximity and space proximity. Higher values give\n more weight to space proximity, making superpixel shapes more\n square/cubic. In SLICO mode, this is the initial compactness.\n This parameter depends strongly on image contrast and on the\n shapes of objects in the image. We recommend exploring possible\n values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before\n refining around a chosen value.\n max_iter : int, optional\n Maximum number of iterations of k-means.\n sigma : float or (3,) array-like of floats, optional\n Width of Gaussian smoothing kernel for pre-processing for each\n dimension of the image. The same sigma is applied to each dimension in\n case of a scalar value. Zero means no smoothing.\n Note, that `sigma` is automatically scaled if it is scalar and a\n manual voxel spacing is provided (see Notes section).\n spacing : (3,) array-like of floats, optional\n The voxel spacing along each image dimension. By default, `slic`\n assumes uniform spacing (same voxel resolution along z, y and x).\n This parameter controls the weights of the distances along z, y,\n and x during k-means clustering.\n multichannel : bool, optional\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension.\n convert2lab : bool, optional\n Whether the input should be converted to Lab colorspace prior to\n segmentation. The input image *must* be RGB. Highly recommended.\n This option defaults to ``True`` when ``multichannel=True`` *and*\n ``image.shape[-1] == 3``.\n enforce_connectivity: bool, optional\n Whether the generated segments are connected or not\n min_size_factor: float, optional\n Proportion of the minimum segment size to be removed with respect\n to the supposed segment size ```depth*width*height/n_segments```\n max_size_factor: float, optional\n Proportion of the maximum connected segment size. A value of 3 works\n in most of the cases.\n slic_zero: bool, optional\n Run SLIC-zero, the zero-parameter mode of SLIC. [2]_\n\n Returns\n -------\n labels : 2D or 3D array\n Integer mask indicating segment labels.\n\n Raises\n ------\n ValueError\n If ``convert2lab`` is set to ``True`` but the last array\n dimension is not of length 3.\n\n Notes\n -----\n * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to\n segmentation.\n\n * If `sigma` is scalar and `spacing` is provided, the kernel width is\n divided along each dimension by the spacing. For example, if ``sigma=1``\n and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This\n ensures sensible smoothing for anisotropic images.\n\n * The image is rescaled to be in [0, 1] prior to processing.\n\n * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To\n interpret them as 3D with the last dimension having length 3, use\n `multichannel=False`.\n\n References\n ----------\n .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,\n Pascal Fua, and Sabine S\u00fcsstrunk, SLIC Superpixels Compared to\n State-of-the-art Superpixel Methods, TPAMI, May 2012.\n .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO\n\n Examples\n --------\n >>> from skimage.segmentation import slic\n >>> from skimage.data import astronaut\n >>> img = astronaut()\n >>> segments = slic(img, n_segments=100, compactness=10)\n\n Increasing the compactness parameter yields more square regions:\n\n >>> segments = slic(img, n_segments=100, compactness=20)\n\n \"\"\"\n\n image = img_as_float(image)\n dtype = image.dtype\n\n is_2d = False\n if image.ndim == 2:\n # 2D grayscale image\n image = image[np.newaxis, ..., np.newaxis]\n is_2d = True\n elif image.ndim == 3 and multichannel:\n # Make 2D multichannel image 3D with depth = 1\n image = image[np.newaxis, ...]\n is_2d = True\n elif image.ndim == 3 and not multichannel:\n # Add channel as single last dimension\n image = image[..., np.newaxis]\n\n if spacing is None:\n spacing = np.ones(3, dtype=dtype)\n elif isinstance(spacing, (list, tuple)):\n spacing = np.array(spacing, dtype=dtype)\n\n if not isinstance(sigma, Iterable):\n sigma = np.array([sigma, sigma, sigma], dtype=dtype)\n sigma /= spacing.astype(dtype)\n elif isinstance(sigma, (list, tuple)):\n sigma = np.array(sigma, dtype=dtype)\n if (sigma > 0).any():\n # add zero smoothing for multichannel dimension\n sigma = list(sigma) + [0]\n image = ndi.gaussian_filter(image, sigma)\n\n if multichannel and (convert2lab or convert2lab is None):\n if image.shape[-1] != 3 and convert2lab:\n raise ValueError(\"Lab colorspace conversion requires a RGB image.\")\n elif image.shape[-1] == 3:\n image = rgb2lab(image)\n\n depth, height, width = image.shape[:3]\n\n # initialize cluster centroids for desired number of segments\n grid_z, grid_y, grid_x = np.meshgrid(np.arange(depth, dtype=dtype),\n np.arange(height, dtype=dtype),\n np.arange(width, dtype=dtype),\n indexing='ij')\n slices = regular_grid(image.shape[:3], n_segments)\n step_z, step_y, step_x = [int(s.step if s.step is not None else 1)\n for s in slices]\n segments_z = grid_z[slices]\n segments_y = grid_y[slices]\n segments_x = grid_x[slices]\n\n segments_color = np.zeros(segments_z.shape + (image.shape[3],),\n dtype=dtype)\n segments = np.concatenate([segments_z[..., np.newaxis],\n segments_y[..., np.newaxis],\n segments_x[..., np.newaxis],\n segments_color],\n axis=-1).reshape(-1, 3 + image.shape[3])\n segments = np.ascontiguousarray(segments)\n\n # we do the scaling of ratio in the same way as in the SLIC paper\n # so the values have the same meaning\n step = dtype.type(max((step_z, step_y, step_x)))\n ratio = dtype.type(1.0 / compactness)\n\n image = np.ascontiguousarray(image * ratio)\n\n labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)\n\n if enforce_connectivity:\n segment_size = depth * height * width / n_segments\n min_size = int(min_size_factor * segment_size)\n max_size = int(max_size_factor * segment_size)\n labels = _enforce_label_connectivity_cython(labels,\n min_size,\n max_size)\n\n if is_2d:\n labels = labels[0]\n\n return labels\n", "path": "skimage/segmentation/slic_superpixels.py"}]} | 2,766 | 700 |
gh_patches_debug_30302 | rasdani/github-patches | git_diff | wagtail__wagtail-10860 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Please add a progress bar to "wagtail_update_image_renditions" management command
I love this new management command to regenerate all image renditions `wagtail_update_image_renditions`. But often the websites I had to use it on had many thousands of images and this command would need some hours to complete. I would love to see some kind of **progress feedback** in the terminal to get an idea of the time this task might need to complete. This could be text based of even a bar, like we know from packages like "tqdm".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/images/management/commands/wagtail_update_image_renditions.py`
Content:
```
1 import logging
2
3 from django.core.management.base import BaseCommand
4 from django.db import transaction
5
6 from wagtail.images import get_image_model
7
8 logger = logging.getLogger(__name__)
9
10
11 class Command(BaseCommand):
12 """Command to create missing image renditions with the option to remove (purge) any existing ones."""
13
14 help = "This command will generate all image renditions, with an option to purge existing renditions first."
15
16 def add_arguments(self, parser):
17 parser.add_argument(
18 "--purge-only",
19 action="store_true",
20 help="Purge all image renditions without regenerating them",
21 )
22 parser.add_argument(
23 "--chunk-size",
24 type=int,
25 default=50,
26 help="Operate in x size chunks (default: %(default)s)",
27 )
28
29 def handle(self, *args, **options):
30 Rendition = get_image_model().get_rendition_model()
31
32 renditions = Rendition.objects.all()
33
34 purge_only = options["purge_only"]
35
36 if not renditions.exists():
37 self.stdout.write(self.style.WARNING("No image renditions found."))
38 return
39
40 rendition_ids = list(renditions.values_list("id", flat=True))
41 num_renditions = len(rendition_ids)
42
43 if purge_only:
44 self.stdout.write(
45 self.style.HTTP_INFO(f"Purging {num_renditions} rendition(s)")
46 )
47 else:
48 self.stdout.write(
49 self.style.HTTP_INFO(f"Regenerating {num_renditions} rendition(s)")
50 )
51
52 for rendition in (
53 # Pre-calculate the ids of the renditions to change,
54 # otherwise `.iterator` never ends.
55 renditions.filter(id__in=rendition_ids)
56 .select_related("image")
57 .iterator(chunk_size=options["chunk_size"])
58 ):
59 try:
60 with transaction.atomic():
61 rendition_filter = rendition.filter
62 rendition_image = rendition.image
63
64 # Delete the existing rendition
65 rendition.delete()
66
67 if not purge_only:
68 # Create a new one
69 rendition_image.get_rendition(rendition_filter)
70 except: # noqa:E722
71 logger.exception("Error operating on rendition %d", rendition.id)
72 self.stderr.write(
73 self.style.ERROR(f"Failed to operate on rendition {rendition.id}")
74 )
75 num_renditions -= 1
76
77 if num_renditions:
78 self.stdout.write(
79 self.style.SUCCESS(
80 f"Successfully processed {num_renditions} rendition(s)"
81 )
82 )
83 else:
84 self.stdout.write(self.style.WARNING("Could not process any renditions."))
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/images/management/commands/wagtail_update_image_renditions.py b/wagtail/images/management/commands/wagtail_update_image_renditions.py
--- a/wagtail/images/management/commands/wagtail_update_image_renditions.py
+++ b/wagtail/images/management/commands/wagtail_update_image_renditions.py
@@ -8,6 +8,17 @@
logger = logging.getLogger(__name__)
+def progress_bar(current, total, bar_length=50):
+ fraction = current / total
+
+ arrow = int(fraction * bar_length - 1) * "-" + ">"
+ padding = int(bar_length - len(arrow)) * " "
+
+ ending = "\n" if current == total else "\r"
+
+ return (f"Progress: [{arrow}{padding}] {int(fraction*100)}%", ending)
+
+
class Command(BaseCommand):
"""Command to create missing image renditions with the option to remove (purge) any existing ones."""
@@ -49,6 +60,7 @@
self.style.HTTP_INFO(f"Regenerating {num_renditions} rendition(s)")
)
+ progress_bar_current = 1
for rendition in (
# Pre-calculate the ids of the renditions to change,
# otherwise `.iterator` never ends.
@@ -64,6 +76,10 @@
# Delete the existing rendition
rendition.delete()
+ _progress_bar = progress_bar(progress_bar_current, num_renditions)
+ self.stdout.write(_progress_bar[0], ending=_progress_bar[1])
+ progress_bar_current = progress_bar_current + 1
+
if not purge_only:
# Create a new one
rendition_image.get_rendition(rendition_filter)
| {"golden_diff": "diff --git a/wagtail/images/management/commands/wagtail_update_image_renditions.py b/wagtail/images/management/commands/wagtail_update_image_renditions.py\n--- a/wagtail/images/management/commands/wagtail_update_image_renditions.py\n+++ b/wagtail/images/management/commands/wagtail_update_image_renditions.py\n@@ -8,6 +8,17 @@\n logger = logging.getLogger(__name__)\n \n \n+def progress_bar(current, total, bar_length=50):\n+ fraction = current / total\n+\n+ arrow = int(fraction * bar_length - 1) * \"-\" + \">\"\n+ padding = int(bar_length - len(arrow)) * \" \"\n+\n+ ending = \"\\n\" if current == total else \"\\r\"\n+\n+ return (f\"Progress: [{arrow}{padding}] {int(fraction*100)}%\", ending)\n+\n+\n class Command(BaseCommand):\n \"\"\"Command to create missing image renditions with the option to remove (purge) any existing ones.\"\"\"\n \n@@ -49,6 +60,7 @@\n self.style.HTTP_INFO(f\"Regenerating {num_renditions} rendition(s)\")\n )\n \n+ progress_bar_current = 1\n for rendition in (\n # Pre-calculate the ids of the renditions to change,\n # otherwise `.iterator` never ends.\n@@ -64,6 +76,10 @@\n # Delete the existing rendition\n rendition.delete()\n \n+ _progress_bar = progress_bar(progress_bar_current, num_renditions)\n+ self.stdout.write(_progress_bar[0], ending=_progress_bar[1])\n+ progress_bar_current = progress_bar_current + 1\n+\n if not purge_only:\n # Create a new one\n rendition_image.get_rendition(rendition_filter)\n", "issue": "Please add a progress bar to \"wagtail_update_image_renditions\" management command\nI love this new management command to regenerate all image renditions `wagtail_update_image_renditions`. But often the websites I had to use it on had many thousands of images and this command would need some hours to complete. I would love to see some kind of **progress feedback** in the terminal to get an idea of the time this task might need to complete. This could be text based of even a bar, like we know from packages like \"tqdm\".\n", "before_files": [{"content": "import logging\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom wagtail.images import get_image_model\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n \"\"\"Command to create missing image renditions with the option to remove (purge) any existing ones.\"\"\"\n\n help = \"This command will generate all image renditions, with an option to purge existing renditions first.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--purge-only\",\n action=\"store_true\",\n help=\"Purge all image renditions without regenerating them\",\n )\n parser.add_argument(\n \"--chunk-size\",\n type=int,\n default=50,\n help=\"Operate in x size chunks (default: %(default)s)\",\n )\n\n def handle(self, *args, **options):\n Rendition = get_image_model().get_rendition_model()\n\n renditions = Rendition.objects.all()\n\n purge_only = options[\"purge_only\"]\n\n if not renditions.exists():\n self.stdout.write(self.style.WARNING(\"No image renditions found.\"))\n return\n\n rendition_ids = list(renditions.values_list(\"id\", flat=True))\n num_renditions = len(rendition_ids)\n\n if purge_only:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Purging {num_renditions} rendition(s)\")\n )\n else:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Regenerating {num_renditions} rendition(s)\")\n )\n\n for rendition in (\n # Pre-calculate the ids of the renditions to change,\n # otherwise `.iterator` never ends.\n renditions.filter(id__in=rendition_ids)\n .select_related(\"image\")\n .iterator(chunk_size=options[\"chunk_size\"])\n ):\n try:\n with transaction.atomic():\n rendition_filter = rendition.filter\n rendition_image = rendition.image\n\n # Delete the existing rendition\n rendition.delete()\n\n if not purge_only:\n # Create a new one\n rendition_image.get_rendition(rendition_filter)\n except: # noqa:E722\n logger.exception(\"Error operating on rendition %d\", rendition.id)\n self.stderr.write(\n self.style.ERROR(f\"Failed to operate on rendition {rendition.id}\")\n )\n num_renditions -= 1\n\n if num_renditions:\n self.stdout.write(\n self.style.SUCCESS(\n f\"Successfully processed {num_renditions} rendition(s)\"\n )\n )\n else:\n self.stdout.write(self.style.WARNING(\"Could not process any renditions.\"))\n", "path": "wagtail/images/management/commands/wagtail_update_image_renditions.py"}], "after_files": [{"content": "import logging\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom wagtail.images import get_image_model\n\nlogger = logging.getLogger(__name__)\n\n\ndef progress_bar(current, total, bar_length=50):\n fraction = current / total\n\n arrow = int(fraction * bar_length - 1) * \"-\" + \">\"\n padding = int(bar_length - len(arrow)) * \" \"\n\n ending = \"\\n\" if current == total else \"\\r\"\n\n return (f\"Progress: [{arrow}{padding}] {int(fraction*100)}%\", ending)\n\n\nclass Command(BaseCommand):\n \"\"\"Command to create missing image renditions with the option to remove (purge) any existing ones.\"\"\"\n\n help = \"This command will generate all image renditions, with an option to purge existing renditions first.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--purge-only\",\n action=\"store_true\",\n help=\"Purge all image renditions without regenerating them\",\n )\n parser.add_argument(\n \"--chunk-size\",\n type=int,\n default=50,\n help=\"Operate in x size chunks (default: %(default)s)\",\n )\n\n def handle(self, *args, **options):\n Rendition = get_image_model().get_rendition_model()\n\n renditions = Rendition.objects.all()\n\n purge_only = options[\"purge_only\"]\n\n if not renditions.exists():\n self.stdout.write(self.style.WARNING(\"No image renditions found.\"))\n return\n\n rendition_ids = list(renditions.values_list(\"id\", flat=True))\n num_renditions = len(rendition_ids)\n\n if purge_only:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Purging {num_renditions} rendition(s)\")\n )\n else:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Regenerating {num_renditions} rendition(s)\")\n )\n\n progress_bar_current = 1\n for rendition in (\n # Pre-calculate the ids of the renditions to change,\n # otherwise `.iterator` never ends.\n renditions.filter(id__in=rendition_ids)\n .select_related(\"image\")\n .iterator(chunk_size=options[\"chunk_size\"])\n ):\n try:\n with transaction.atomic():\n rendition_filter = rendition.filter\n rendition_image = rendition.image\n\n # Delete the existing rendition\n rendition.delete()\n\n _progress_bar = progress_bar(progress_bar_current, num_renditions)\n self.stdout.write(_progress_bar[0], ending=_progress_bar[1])\n progress_bar_current = progress_bar_current + 1\n\n if not purge_only:\n # Create a new one\n rendition_image.get_rendition(rendition_filter)\n except: # noqa:E722\n logger.exception(\"Error operating on rendition %d\", rendition.id)\n self.stderr.write(\n self.style.ERROR(f\"Failed to operate on rendition {rendition.id}\")\n )\n num_renditions -= 1\n\n if num_renditions:\n self.stdout.write(\n self.style.SUCCESS(\n f\"Successfully processed {num_renditions} rendition(s)\"\n )\n )\n else:\n self.stdout.write(self.style.WARNING(\"Could not process any renditions.\"))\n", "path": "wagtail/images/management/commands/wagtail_update_image_renditions.py"}]} | 1,113 | 397 |
gh_patches_debug_19081 | rasdani/github-patches | git_diff | nvaccess__nvda-11609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Visual Studio IntelliSense overly verbose
To reproduce this issue:
1. I created a console application
2. In the main method I started typing "Console" and it reads ok
3. When I press "." to access "WriteLine" or other method it reads the whole line. Which gets very confusing and anoying when lines get longer. Imagine pressing up or down arrows and for each item in the Intelli sense it repeats the whole line of code and lastly reads the item in the IntelliSense.
For example, if the line is following:
string name = string.format("Hello {0}", textField.text);
When I got to the point when I type "textField" and than "." it will repeat the whole line and than read the "text" property, and it repeats if I move up or down using arrow keys.
It is worth mentioning that this issue is not present when using Freedom scientific JAWS, but NVDA is much better choice for blind developers.
I also reported this issue to the Microsoft: https://developercommunity.visualstudio.com/content/problem/164719/nvda-screen-reader-is-not-working-properly-with-in.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/NVDAObjects/UIA/VisualStudio.py`
Content:
```
1 # This file is covered by the GNU General Public License.
2 # See the file COPYING for more details.
3 # Copyright (C) 2020 NV Access Limited, Leonard de Ruijter
4
5 """
6 Object overlay classes for Visual Studio components
7 available in Visual Studio and SQL Server Management Studio.
8 """
9
10 from . import UIA
11 import speech
12 import braille
13 import api
14
15
16 class IntelliSenseItem(UIA):
17
18 def _get_name(self):
19 return self.UIAElement.cachedAutomationID
20
21 def event_UIA_elementSelected(self):
22 # Cancel speech to have speech announce the selection as soon as possible.
23 # This is needed because L{reportFocus} does not cancel speech.
24 # Therefore, if speech wouldn't be cancelled,
25 # selection announcements would queue up when changing selection rapidly.
26 speech.cancelSpeech()
27 api.setNavigatorObject(self, isFocus=True)
28 self.reportFocus()
29 # Display results as flash messages.
30 braille.handler.message(braille.getPropertiesBraille(
31 name=self.name, role=self.role, positionInfo=self.positionInfo, description=self.description
32 ))
33
34
35 class IntelliSenseList(UIA):
36 ...
37
38
39 def findExtraOverlayClasses(obj, clsList):
40 if obj.UIAElement.cachedAutomationId == "listBoxCompletions":
41 clsList.insert(0, IntelliSenseList)
42 elif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == "IntellisenseMenuItem":
43 clsList.insert(0, IntelliSenseItem)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/source/NVDAObjects/UIA/VisualStudio.py b/source/NVDAObjects/UIA/VisualStudio.py
--- a/source/NVDAObjects/UIA/VisualStudio.py
+++ b/source/NVDAObjects/UIA/VisualStudio.py
@@ -36,8 +36,31 @@
...
+class IntelliSenseLiveRegion(UIA):
+ """
+ Visual Studio uses both Intellisense menu item objects and a live region
+ to communicate Intellisense selections.
+ NVDA uses the menu item approach and therefore the live region provides doubled information
+ and is disabled.
+ """
+
+ _shouldAllowUIALiveRegionChangeEvent = False
+
+
+_INTELLISENSE_LIST_AUTOMATION_IDS = {
+ "listBoxCompletions",
+ "CompletionList"
+}
+
+
def findExtraOverlayClasses(obj, clsList):
- if obj.UIAElement.cachedAutomationId == "listBoxCompletions":
+ if obj.UIAAutomationId in _INTELLISENSE_LIST_AUTOMATION_IDS:
clsList.insert(0, IntelliSenseList)
elif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == "IntellisenseMenuItem":
clsList.insert(0, IntelliSenseItem)
+ elif (
+ obj.UIAElement.cachedClassName == "LiveTextBlock"
+ and obj.previous
+ and isinstance(obj.previous.previous, IntelliSenseList)
+ ):
+ clsList.insert(0, IntelliSenseLiveRegion)
| {"golden_diff": "diff --git a/source/NVDAObjects/UIA/VisualStudio.py b/source/NVDAObjects/UIA/VisualStudio.py\n--- a/source/NVDAObjects/UIA/VisualStudio.py\n+++ b/source/NVDAObjects/UIA/VisualStudio.py\n@@ -36,8 +36,31 @@\n \t...\n \n \n+class IntelliSenseLiveRegion(UIA):\n+\t\"\"\"\n+\tVisual Studio uses both Intellisense menu item objects and a live region\n+\tto communicate Intellisense selections.\n+\tNVDA uses the menu item approach and therefore the live region provides doubled information\n+\tand is disabled.\n+\t\"\"\"\n+\n+\t_shouldAllowUIALiveRegionChangeEvent = False\n+\n+\n+_INTELLISENSE_LIST_AUTOMATION_IDS = {\n+\t\"listBoxCompletions\",\n+\t\"CompletionList\"\n+}\n+\n+\n def findExtraOverlayClasses(obj, clsList):\n-\tif obj.UIAElement.cachedAutomationId == \"listBoxCompletions\":\n+\tif obj.UIAAutomationId in _INTELLISENSE_LIST_AUTOMATION_IDS:\n \t\tclsList.insert(0, IntelliSenseList)\n \telif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == \"IntellisenseMenuItem\":\n \t\tclsList.insert(0, IntelliSenseItem)\n+\telif (\n+\t\tobj.UIAElement.cachedClassName == \"LiveTextBlock\"\n+\t\tand obj.previous\n+\t\tand isinstance(obj.previous.previous, IntelliSenseList)\n+\t):\n+\t\tclsList.insert(0, IntelliSenseLiveRegion)\n", "issue": "Visual Studio IntelliSense overly verbose\nTo reproduce this issue:\r\n1. I created a console application\r\n2. In the main method I started typing \"Console\" and it reads ok\r\n3. When I press \".\" to access \"WriteLine\" or other method it reads the whole line. Which gets very confusing and anoying when lines get longer. Imagine pressing up or down arrows and for each item in the Intelli sense it repeats the whole line of code and lastly reads the item in the IntelliSense.\r\nFor example, if the line is following:\r\nstring name = string.format(\"Hello {0}\", textField.text);\r\n\r\nWhen I got to the point when I type \"textField\" and than \".\" it will repeat the whole line and than read the \"text\" property, and it repeats if I move up or down using arrow keys.\r\n\r\nIt is worth mentioning that this issue is not present when using Freedom scientific JAWS, but NVDA is much better choice for blind developers.\r\n\r\nI also reported this issue to the Microsoft: https://developercommunity.visualstudio.com/content/problem/164719/nvda-screen-reader-is-not-working-properly-with-in.html\n", "before_files": [{"content": "# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n# Copyright (C) 2020 NV Access Limited, Leonard de Ruijter\n\n\"\"\"\nObject overlay classes for Visual Studio components\navailable in Visual Studio and SQL Server Management Studio.\n\"\"\"\n\nfrom . import UIA\nimport speech\nimport braille\nimport api\n\n\nclass IntelliSenseItem(UIA):\n\n\tdef _get_name(self):\n\t\treturn self.UIAElement.cachedAutomationID\n\n\tdef event_UIA_elementSelected(self):\n\t\t# Cancel speech to have speech announce the selection as soon as possible.\n\t\t# This is needed because L{reportFocus} does not cancel speech.\n\t\t# Therefore, if speech wouldn't be cancelled,\n\t\t# selection announcements would queue up when changing selection rapidly.\n\t\tspeech.cancelSpeech()\n\t\tapi.setNavigatorObject(self, isFocus=True)\n\t\tself.reportFocus()\n\t\t# Display results as flash messages.\n\t\tbraille.handler.message(braille.getPropertiesBraille(\n\t\t\tname=self.name, role=self.role, positionInfo=self.positionInfo, description=self.description\n\t\t))\n\n\nclass IntelliSenseList(UIA):\n\t...\n\n\ndef findExtraOverlayClasses(obj, clsList):\n\tif obj.UIAElement.cachedAutomationId == \"listBoxCompletions\":\n\t\tclsList.insert(0, IntelliSenseList)\n\telif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == \"IntellisenseMenuItem\":\n\t\tclsList.insert(0, IntelliSenseItem)\n", "path": "source/NVDAObjects/UIA/VisualStudio.py"}], "after_files": [{"content": "# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n# Copyright (C) 2020 NV Access Limited, Leonard de Ruijter\n\n\"\"\"\nObject overlay classes for Visual Studio components\navailable in Visual Studio and SQL Server Management Studio.\n\"\"\"\n\nfrom . import UIA\nimport speech\nimport braille\nimport api\n\n\nclass IntelliSenseItem(UIA):\n\n\tdef _get_name(self):\n\t\treturn self.UIAElement.cachedAutomationID\n\n\tdef event_UIA_elementSelected(self):\n\t\t# Cancel speech to have speech announce the selection as soon as possible.\n\t\t# This is needed because L{reportFocus} does not cancel speech.\n\t\t# Therefore, if speech wouldn't be cancelled,\n\t\t# selection announcements would queue up when changing selection rapidly.\n\t\tspeech.cancelSpeech()\n\t\tapi.setNavigatorObject(self, isFocus=True)\n\t\tself.reportFocus()\n\t\t# Display results as flash messages.\n\t\tbraille.handler.message(braille.getPropertiesBraille(\n\t\t\tname=self.name, role=self.role, positionInfo=self.positionInfo, description=self.description\n\t\t))\n\n\nclass IntelliSenseList(UIA):\n\t...\n\n\nclass IntelliSenseLiveRegion(UIA):\n\t\"\"\"\n\tVisual Studio uses both Intellisense menu item objects and a live region\n\tto communicate Intellisense selections.\n\tNVDA uses the menu item approach and therefore the live region provides doubled information\n\tand is disabled.\n\t\"\"\"\n\n\t_shouldAllowUIALiveRegionChangeEvent = False\n\n\n_INTELLISENSE_LIST_AUTOMATION_IDS = {\n\t\"listBoxCompletions\",\n\t\"CompletionList\"\n}\n\n\ndef findExtraOverlayClasses(obj, clsList):\n\tif obj.UIAAutomationId in _INTELLISENSE_LIST_AUTOMATION_IDS:\n\t\tclsList.insert(0, IntelliSenseList)\n\telif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == \"IntellisenseMenuItem\":\n\t\tclsList.insert(0, IntelliSenseItem)\n\telif (\n\t\tobj.UIAElement.cachedClassName == \"LiveTextBlock\"\n\t\tand obj.previous\n\t\tand isinstance(obj.previous.previous, IntelliSenseList)\n\t):\n\t\tclsList.insert(0, IntelliSenseLiveRegion)\n", "path": "source/NVDAObjects/UIA/VisualStudio.py"}]} | 912 | 336 |
gh_patches_debug_47517 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-1727 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3 regression: Undefined Jinja2 variables get rendered as empty string in linting
### Solution to issue cannot be found in the documentation.
- [X] I checked the documentation.
### Issue
For linting, undefined Jinja2 variables get rendered by `conda_smithy.utils.NullUndefined`. That class contains a `__unicode__` method that returns the name of the variable. This is useful to put a clear placeholder where variables will be filled by variants from `conda_build_config.yaml` during the actual build. However, `NullUndefined` doesn't overwrite the `__str__` method of Jinja's own `Undefined`, which returns an empty string.
In effect, linting in a Python 2 environment renders, e.g. `- {{ libjpeg }}` as `- libjpeg`, but in a Python 3 environment, we get `- ` which becomes `None` in the `requirements_section` dictionary.
### Installed packages
```shell
-
```
### Environment info
```shell
-
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_smithy/utils.py`
Content:
```
1 import shutil
2 import tempfile
3 import io
4 import jinja2
5 import datetime
6 import time
7 import os
8 import sys
9 from pathlib import Path
10 from collections import defaultdict
11 from contextlib import contextmanager
12
13 import ruamel.yaml
14
15
16 def get_feedstock_name_from_meta(meta):
17 """Resolve the feedtstock name from the parsed meta.yaml."""
18 if "feedstock-name" in meta.meta["extra"]:
19 return meta.meta["extra"]["feedstock-name"]
20 elif "parent_recipe" in meta.meta["extra"]:
21 return meta.meta["extra"]["parent_recipe"]["name"]
22 else:
23 return meta.name()
24
25
26 def get_feedstock_about_from_meta(meta) -> dict:
27 """Fetch the feedtstock about from the parsed meta.yaml."""
28 # it turns out that conda_build would not preserve the feedstock about:
29 # - if a subpackage does not have about, it uses the feedstock's
30 # - if a subpackage has about, it's used as is
31 # therefore we need to parse the yaml again just to get the about section...
32 if "parent_recipe" in meta.meta["extra"]:
33 recipe_meta = os.path.join(
34 meta.meta["extra"]["parent_recipe"]["path"], "meta.yaml"
35 )
36 with io.open(recipe_meta, "rt") as fh:
37 content = render_meta_yaml("".join(fh))
38 meta = get_yaml().load(content)
39 return dict(meta["about"])
40 else:
41 # no parent recipe for any reason, use self's about
42 return dict(meta.meta["about"])
43
44
45 def get_yaml():
46 # define global yaml API
47 # roundrip-loader and allowing duplicate keys
48 # for handling # [filter] / # [not filter]
49 # Don't use a global variable for this as a global
50 # variable will make conda-smithy thread unsafe.
51 yaml = ruamel.yaml.YAML(typ="rt")
52 yaml.allow_duplicate_keys = True
53 return yaml
54
55
56 @contextmanager
57 def tmp_directory():
58 tmp_dir = tempfile.mkdtemp("_recipe")
59 yield tmp_dir
60 shutil.rmtree(tmp_dir)
61
62
63 class NullUndefined(jinja2.Undefined):
64 def __unicode__(self):
65 return self._undefined_name
66
67 def __getattr__(self, name):
68 return "{}.{}".format(self, name)
69
70 def __getitem__(self, name):
71 return '{}["{}"]'.format(self, name)
72
73
74 class MockOS(dict):
75 def __init__(self):
76 self.environ = defaultdict(lambda: "")
77 self.sep = "/"
78
79
80 def stub_compatible_pin(*args, **kwargs):
81 return f"compatible_pin {args[0]}"
82
83
84 def stub_subpackage_pin(*args, **kwargs):
85 return f"subpackage_pin {args[0]}"
86
87
88 def render_meta_yaml(text):
89 env = jinja2.Environment(undefined=NullUndefined)
90
91 # stub out cb3 jinja2 functions - they are not important for linting
92 # if we don't stub them out, the ruamel.yaml load fails to interpret them
93 # we can't just use conda-build's api.render functionality, because it would apply selectors
94 env.globals.update(
95 dict(
96 compiler=lambda x: x + "_compiler_stub",
97 pin_subpackage=stub_subpackage_pin,
98 pin_compatible=stub_compatible_pin,
99 cdt=lambda *args, **kwargs: "cdt_stub",
100 load_file_regex=lambda *args, **kwargs: defaultdict(lambda: ""),
101 datetime=datetime,
102 time=time,
103 target_platform="linux-64",
104 mpi="mpi",
105 )
106 )
107 mockos = MockOS()
108 py_ver = "3.7"
109 context = {"os": mockos, "environ": mockos.environ, "PY_VER": py_ver}
110 content = env.from_string(text).render(context)
111 return content
112
113
114 @contextmanager
115 def update_conda_forge_config(forge_yaml):
116 """Utility method used to update conda forge configuration files
117
118 Uage:
119 >>> with update_conda_forge_config(somepath) as cfg:
120 ... cfg['foo'] = 'bar'
121 """
122 if os.path.exists(forge_yaml):
123 with open(forge_yaml, "r") as fh:
124 code = get_yaml().load(fh)
125 else:
126 code = {}
127
128 # Code could come in as an empty list.
129 if not code:
130 code = {}
131
132 yield code
133
134 get_yaml().dump(code, Path(forge_yaml))
135
136
137 def merge_dict(src, dest):
138 """Recursive merge dictionary"""
139 for key, value in src.items():
140 if isinstance(value, dict):
141 # get node or create one
142 node = dest.setdefault(key, {})
143 merge_dict(value, node)
144 else:
145 dest[key] = value
146
147 return dest
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py
--- a/conda_smithy/utils.py
+++ b/conda_smithy/utils.py
@@ -61,7 +61,7 @@
class NullUndefined(jinja2.Undefined):
- def __unicode__(self):
+ def __str__(self):
return self._undefined_name
def __getattr__(self, name):
| {"golden_diff": "diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py\n--- a/conda_smithy/utils.py\n+++ b/conda_smithy/utils.py\n@@ -61,7 +61,7 @@\n \n \n class NullUndefined(jinja2.Undefined):\n- def __unicode__(self):\n+ def __str__(self):\n return self._undefined_name\n \n def __getattr__(self, name):\n", "issue": "Python 3 regression: Undefined Jinja2 variables get rendered as empty string in linting\n### Solution to issue cannot be found in the documentation.\n\n- [X] I checked the documentation.\n\n### Issue\n\nFor linting, undefined Jinja2 variables get rendered by `conda_smithy.utils.NullUndefined`. That class contains a `__unicode__` method that returns the name of the variable. This is useful to put a clear placeholder where variables will be filled by variants from `conda_build_config.yaml` during the actual build. However, `NullUndefined` doesn't overwrite the `__str__` method of Jinja's own `Undefined`, which returns an empty string.\r\n\r\nIn effect, linting in a Python 2 environment renders, e.g. `- {{ libjpeg }}` as `- libjpeg`, but in a Python 3 environment, we get `- ` which becomes `None` in the `requirements_section` dictionary.\n\n### Installed packages\n\n```shell\n-\n```\n\n\n### Environment info\n\n```shell\n-\n```\n\n", "before_files": [{"content": "import shutil\nimport tempfile\nimport io\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\ndef get_feedstock_name_from_meta(meta):\n \"\"\"Resolve the feedtstock name from the parsed meta.yaml.\"\"\"\n if \"feedstock-name\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"feedstock-name\"]\n elif \"parent_recipe\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"parent_recipe\"][\"name\"]\n else:\n return meta.name()\n\n\ndef get_feedstock_about_from_meta(meta) -> dict:\n \"\"\"Fetch the feedtstock about from the parsed meta.yaml.\"\"\"\n # it turns out that conda_build would not preserve the feedstock about:\n # - if a subpackage does not have about, it uses the feedstock's\n # - if a subpackage has about, it's used as is\n # therefore we need to parse the yaml again just to get the about section...\n if \"parent_recipe\" in meta.meta[\"extra\"]:\n recipe_meta = os.path.join(\n meta.meta[\"extra\"][\"parent_recipe\"][\"path\"], \"meta.yaml\"\n )\n with io.open(recipe_meta, \"rt\") as fh:\n content = render_meta_yaml(\"\".join(fh))\n meta = get_yaml().load(content)\n return dict(meta[\"about\"])\n else:\n # no parent recipe for any reason, use self's about\n return dict(meta.meta[\"about\"])\n\n\ndef get_yaml():\n # define global yaml API\n # roundrip-loader and allowing duplicate keys\n # for handling # [filter] / # [not filter]\n # Don't use a global variable for this as a global\n # variable will make conda-smithy thread unsafe.\n yaml = ruamel.yaml.YAML(typ=\"rt\")\n yaml.allow_duplicate_keys = True\n return yaml\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef stub_compatible_pin(*args, **kwargs):\n return f\"compatible_pin {args[0]}\"\n\n\ndef stub_subpackage_pin(*args, **kwargs):\n return f\"subpackage_pin {args[0]}\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=stub_subpackage_pin,\n pin_compatible=stub_compatible_pin,\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n mpi=\"mpi\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(forge_yaml):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = get_yaml().load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n get_yaml().dump(code, Path(forge_yaml))\n\n\ndef merge_dict(src, dest):\n \"\"\"Recursive merge dictionary\"\"\"\n for key, value in src.items():\n if isinstance(value, dict):\n # get node or create one\n node = dest.setdefault(key, {})\n merge_dict(value, node)\n else:\n dest[key] = value\n\n return dest\n", "path": "conda_smithy/utils.py"}], "after_files": [{"content": "import shutil\nimport tempfile\nimport io\nimport jinja2\nimport datetime\nimport time\nimport os\nimport sys\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport ruamel.yaml\n\n\ndef get_feedstock_name_from_meta(meta):\n \"\"\"Resolve the feedtstock name from the parsed meta.yaml.\"\"\"\n if \"feedstock-name\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"feedstock-name\"]\n elif \"parent_recipe\" in meta.meta[\"extra\"]:\n return meta.meta[\"extra\"][\"parent_recipe\"][\"name\"]\n else:\n return meta.name()\n\n\ndef get_feedstock_about_from_meta(meta) -> dict:\n \"\"\"Fetch the feedtstock about from the parsed meta.yaml.\"\"\"\n # it turns out that conda_build would not preserve the feedstock about:\n # - if a subpackage does not have about, it uses the feedstock's\n # - if a subpackage has about, it's used as is\n # therefore we need to parse the yaml again just to get the about section...\n if \"parent_recipe\" in meta.meta[\"extra\"]:\n recipe_meta = os.path.join(\n meta.meta[\"extra\"][\"parent_recipe\"][\"path\"], \"meta.yaml\"\n )\n with io.open(recipe_meta, \"rt\") as fh:\n content = render_meta_yaml(\"\".join(fh))\n meta = get_yaml().load(content)\n return dict(meta[\"about\"])\n else:\n # no parent recipe for any reason, use self's about\n return dict(meta.meta[\"about\"])\n\n\ndef get_yaml():\n # define global yaml API\n # roundrip-loader and allowing duplicate keys\n # for handling # [filter] / # [not filter]\n # Don't use a global variable for this as a global\n # variable will make conda-smithy thread unsafe.\n yaml = ruamel.yaml.YAML(typ=\"rt\")\n yaml.allow_duplicate_keys = True\n return yaml\n\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp(\"_recipe\")\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __str__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return \"{}.{}\".format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: \"\")\n self.sep = \"/\"\n\n\ndef stub_compatible_pin(*args, **kwargs):\n return f\"compatible_pin {args[0]}\"\n\n\ndef stub_subpackage_pin(*args, **kwargs):\n return f\"subpackage_pin {args[0]}\"\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(\n dict(\n compiler=lambda x: x + \"_compiler_stub\",\n pin_subpackage=stub_subpackage_pin,\n pin_compatible=stub_compatible_pin,\n cdt=lambda *args, **kwargs: \"cdt_stub\",\n load_file_regex=lambda *args, **kwargs: defaultdict(lambda: \"\"),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n mpi=\"mpi\",\n )\n )\n mockos = MockOS()\n py_ver = \"3.7\"\n context = {\"os\": mockos, \"environ\": mockos.environ, \"PY_VER\": py_ver}\n content = env.from_string(text).render(context)\n return content\n\n\n@contextmanager\ndef update_conda_forge_config(forge_yaml):\n \"\"\"Utility method used to update conda forge configuration files\n\n Uage:\n >>> with update_conda_forge_config(somepath) as cfg:\n ... cfg['foo'] = 'bar'\n \"\"\"\n if os.path.exists(forge_yaml):\n with open(forge_yaml, \"r\") as fh:\n code = get_yaml().load(fh)\n else:\n code = {}\n\n # Code could come in as an empty list.\n if not code:\n code = {}\n\n yield code\n\n get_yaml().dump(code, Path(forge_yaml))\n\n\ndef merge_dict(src, dest):\n \"\"\"Recursive merge dictionary\"\"\"\n for key, value in src.items():\n if isinstance(value, dict):\n # get node or create one\n node = dest.setdefault(key, {})\n merge_dict(value, node)\n else:\n dest[key] = value\n\n return dest\n", "path": "conda_smithy/utils.py"}]} | 1,844 | 95 |
gh_patches_debug_566 | rasdani/github-patches | git_diff | pex-tool__pex-797 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.0.1
On the docket:
+ [x] pex --index-url=... fails in 2.0.0 #794
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.0.0'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.0.0'
+__version__ = '2.0.1'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.0.0'\n+__version__ = '2.0.1'\n", "issue": "Release 2.0.1\nOn the docket:\r\n\r\n+ [x] pex --index-url=... fails in 2.0.0 #794\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.0'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.1'\n", "path": "pex/version.py"}]} | 343 | 94 |
gh_patches_debug_56452 | rasdani/github-patches | git_diff | netket__netket-111 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python bindings for Jastrow machines randomly failing
I realized in #91 that once in a while the python tests for the `Jastrow` machines fail. This issue seems related to some memory problem, but I still don't understand if it is on the c++ side or python
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import re
3 import sys
4 import platform
5 import subprocess
6
7 from setuptools import setup, Extension
8 from setuptools.command.build_ext import build_ext
9 from distutils.version import LooseVersion
10
11
12 class CMakeExtension(Extension):
13 def __init__(self, name, sourcedir=''):
14 Extension.__init__(self, name, sources=[])
15 self.sourcedir = os.path.abspath(sourcedir)
16
17
18 class CMakeBuild(build_ext):
19 def run(self):
20 try:
21 out = subprocess.check_output(['cmake', '--version'])
22 except OSError:
23 raise RuntimeError("CMake must be installed to build the following extensions: " +
24 ", ".join(e.name for e in self.extensions))
25
26 if platform.system() == "Windows":
27 cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
28 if cmake_version < '3.1.0':
29 raise RuntimeError("CMake >= 3.1.0 is required on Windows")
30
31 for ext in self.extensions:
32 self.build_extension(ext)
33
34 def build_extension(self, ext):
35 extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
36 cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
37 '-DPYTHON_EXECUTABLE=' + sys.executable]
38
39 cfg = 'Debug' if self.debug else 'Release'
40 build_args = ['--config', cfg]
41
42 if platform.system() == "Windows":
43 cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
44 if sys.maxsize > 2**32:
45 cmake_args += ['-A', 'x64']
46 build_args += ['--', '/m']
47 else:
48 cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
49 build_args += ['--', '-j2']
50
51 env = os.environ.copy()
52 env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
53 self.distribution.get_version())
54 if not os.path.exists(self.build_temp):
55 os.makedirs(self.build_temp)
56 subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
57 subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
58
59 setup(
60 name='netket',
61 version='0.1',
62 author='Giuseppe Carleo et al.',
63 description='NetKet',
64 url='http://github.com/netket/netket',
65 author_email='[email protected]',
66 license='Apache',
67 ext_modules=[CMakeExtension('netket')],
68 cmdclass=dict(build_ext=CMakeBuild),
69 zip_safe=False,
70 )
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
setup(
name='netket',
- version='0.1',
+ version='2.0',
author='Giuseppe Carleo et al.',
description='NetKet',
url='http://github.com/netket/netket',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n \n setup(\n name='netket',\n- version='0.1',\n+ version='2.0',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n", "issue": "Python bindings for Jastrow machines randomly failing\nI realized in #91 that once in a while the python tests for the `Jastrow` machines fail. This issue seems related to some memory problem, but I still don't understand if it is on the c++ side or python \n", "before_files": [{"content": "import os\nimport re\nimport sys\nimport platform\nimport subprocess\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.version import LooseVersion\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n if cmake_version < '3.1.0':\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n\n cfg = 'Debug' if self.debug else 'Release'\n build_args = ['--config', cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]\n if sys.maxsize > 2**32:\n cmake_args += ['-A', 'x64']\n build_args += ['--', '/m']\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n build_args += ['--', '-j2']\n\n env = os.environ.copy()\n env['CXXFLAGS'] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(env.get('CXXFLAGS', ''),\n self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)\n\nsetup(\n name='netket',\n version='0.1',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n author_email='[email protected]',\n license='Apache',\n ext_modules=[CMakeExtension('netket')],\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport re\nimport sys\nimport platform\nimport subprocess\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.version import LooseVersion\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n if cmake_version < '3.1.0':\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n\n cfg = 'Debug' if self.debug else 'Release'\n build_args = ['--config', cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]\n if sys.maxsize > 2**32:\n cmake_args += ['-A', 'x64']\n build_args += ['--', '/m']\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n build_args += ['--', '-j2']\n\n env = os.environ.copy()\n env['CXXFLAGS'] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(env.get('CXXFLAGS', ''),\n self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)\n\nsetup(\n name='netket',\n version='2.0',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n author_email='[email protected]',\n license='Apache',\n ext_modules=[CMakeExtension('netket')],\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,045 | 86 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.